From 64c785082c21a88d3c25c2b95f16fe29eb5ee862 Mon Sep 17 00:00:00 2001 From: Yu Zhang Date: Thu, 30 Sep 2021 01:51:53 +0800 Subject: KVM: nVMX: Use INVALID_GPA for pointers used in nVMX. Clean up nested.c and vmx.c by using INVALID_GPA instead of "-1ull", to denote an invalid address in nested VMX. Affected addresses are the ones of VMXON region, current VMCS, VMCS link pointer, virtual- APIC page, ENCLS-exiting bitmap, and IO bitmap etc. Suggested-by: Sean Christopherson Signed-off-by: Yu Zhang Message-Id: <20210929175154.11396-2-yu.c.zhang@linux.intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 60 +++++++++++++++++++++++------------------------ arch/x86/kvm/vmx/vmx.c | 4 ++-- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index eedcebf58004..371b1dee17c1 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -191,7 +191,7 @@ static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error) * failValid writes the error number to the current VMCS, which * can't be done if there isn't a current VMCS. */ - if (vmx->nested.current_vmptr == -1ull && + if (vmx->nested.current_vmptr == INVALID_GPA && !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) return nested_vmx_failInvalid(vcpu); @@ -218,7 +218,7 @@ static inline u64 vmx_control_msr(u32 low, u32 high) static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) { secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); - vmcs_write64(VMCS_LINK_POINTER, -1ull); + vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); vmx->nested.need_vmcs12_to_shadow_sync = false; } @@ -292,7 +292,7 @@ static void free_nested(struct kvm_vcpu *vcpu) vmx->nested.smm.vmxon = false; free_vpid(vmx->nested.vpid02); vmx->nested.posted_intr_nv = -1; - vmx->nested.current_vmptr = -1ull; + vmx->nested.current_vmptr = INVALID_GPA; if (enable_shadow_vmcs) { vmx_disable_shadow_vmcs(vmx); vmcs_clear(vmx->vmcs01.shadow_vmcs); @@ -709,7 +709,7 @@ static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *shadow; if (!nested_cpu_has_shadow_vmcs(vmcs12) || - vmcs12->vmcs_link_pointer == -1ull) + vmcs12->vmcs_link_pointer == INVALID_GPA) return; shadow = get_shadow_vmcs12(vcpu); @@ -727,7 +727,7 @@ static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx = to_vmx(vcpu); if (!nested_cpu_has_shadow_vmcs(vmcs12) || - vmcs12->vmcs_link_pointer == -1ull) + vmcs12->vmcs_link_pointer == INVALID_GPA) return; kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer, @@ -1994,7 +1994,7 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld( } if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { - vmx->nested.current_vmptr = -1ull; + vmx->nested.current_vmptr = INVALID_GPA; nested_release_evmcs(vcpu); @@ -2178,7 +2178,7 @@ static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) } if (cpu_has_vmx_encls_vmexit()) - vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); + vmcs_write64(ENCLS_EXITING_BITMAP, INVALID_GPA); /* * Set the MSR load/store lists to match L0's settings. Only the @@ -2197,7 +2197,7 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, { prepare_vmcs02_constant_state(vmx); - vmcs_write64(VMCS_LINK_POINTER, -1ull); + vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); if (enable_vpid) { if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) @@ -2949,7 +2949,7 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, struct vmcs12 *shadow; struct kvm_host_map map; - if (vmcs12->vmcs_link_pointer == -1ull) + if (vmcs12->vmcs_link_pointer == INVALID_GPA) return 0; if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))) @@ -3216,7 +3216,7 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to * force VM-Entry to fail. */ - vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); + vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, INVALID_GPA); } } @@ -3527,7 +3527,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) } if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) && - vmx->nested.current_vmptr == -1ull)) + vmx->nested.current_vmptr == INVALID_GPA)) return nested_vmx_failInvalid(vcpu); vmcs12 = get_vmcs12(vcpu); @@ -4975,7 +4975,7 @@ static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - if (vmx->nested.current_vmptr == -1ull) + if (vmx->nested.current_vmptr == INVALID_GPA) return; copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); @@ -4995,7 +4995,7 @@ static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); - vmx->nested.current_vmptr = -1ull; + vmx->nested.current_vmptr = INVALID_GPA; } /* Emulate the VMXOFF instruction */ @@ -5090,12 +5090,12 @@ static int handle_vmread(struct kvm_vcpu *vcpu) return 1; /* - * In VMX non-root operation, when the VMCS-link pointer is -1ull, + * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA, * any VMREAD sets the ALU flags for VMfailInvalid. */ - if (vmx->nested.current_vmptr == -1ull || + if (vmx->nested.current_vmptr == INVALID_GPA || (is_guest_mode(vcpu) && - get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)) + get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA)) return nested_vmx_failInvalid(vcpu); /* Decode instruction info and find the field to read */ @@ -5182,12 +5182,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) return 1; /* - * In VMX non-root operation, when the VMCS-link pointer is -1ull, + * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA, * any VMWRITE sets the ALU flags for VMfailInvalid. */ - if (vmx->nested.current_vmptr == -1ull || + if (vmx->nested.current_vmptr == INVALID_GPA || (is_guest_mode(vcpu) && - get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)) + get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA)) return nested_vmx_failInvalid(vcpu); if (instr_info & BIT(10)) @@ -5630,7 +5630,7 @@ bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, gpa_t bitmap, last_bitmap; u8 b; - last_bitmap = (gpa_t)-1; + last_bitmap = INVALID_GPA; b = -1; while (size > 0) { @@ -6106,8 +6106,8 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, .format = KVM_STATE_NESTED_FORMAT_VMX, .size = sizeof(kvm_state), .hdr.vmx.flags = 0, - .hdr.vmx.vmxon_pa = -1ull, - .hdr.vmx.vmcs12_pa = -1ull, + .hdr.vmx.vmxon_pa = INVALID_GPA, + .hdr.vmx.vmcs12_pa = INVALID_GPA, .hdr.vmx.preemption_timer_deadline = 0, }; struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = @@ -6133,7 +6133,7 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, if (is_guest_mode(vcpu) && nested_cpu_has_shadow_vmcs(vmcs12) && - vmcs12->vmcs_link_pointer != -1ull) + vmcs12->vmcs_link_pointer != INVALID_GPA) kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12); } @@ -6209,7 +6209,7 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, return -EFAULT; if (nested_cpu_has_shadow_vmcs(vmcs12) && - vmcs12->vmcs_link_pointer != -1ull) { + vmcs12->vmcs_link_pointer != INVALID_GPA) { if (copy_to_user(user_vmx_nested_state->shadow_vmcs12, get_shadow_vmcs12(vcpu), VMCS12_SIZE)) return -EFAULT; @@ -6244,11 +6244,11 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX) return -EINVAL; - if (kvm_state->hdr.vmx.vmxon_pa == -1ull) { + if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) { if (kvm_state->hdr.vmx.smm.flags) return -EINVAL; - if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) + if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) return -EINVAL; /* @@ -6302,7 +6302,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, vmx_leave_nested(vcpu); - if (kvm_state->hdr.vmx.vmxon_pa == -1ull) + if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) return 0; vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; @@ -6315,13 +6315,13 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, /* See vmx_has_valid_vmcs12. */ if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) || (kvm_state->flags & KVM_STATE_NESTED_EVMCS) || - (kvm_state->hdr.vmx.vmcs12_pa != -1ull)) + (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)) return -EINVAL; else return 0; } - if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) { + if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) { if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) return -EINVAL; @@ -6366,7 +6366,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, ret = -EINVAL; if (nested_cpu_has_shadow_vmcs(vmcs12) && - vmcs12->vmcs_link_pointer != -1ull) { + vmcs12->vmcs_link_pointer != INVALID_GPA) { struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); if (kvm_state->size < diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 116b08904ac3..4d1514f6f36c 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4340,7 +4340,7 @@ static void init_vmcs(struct vcpu_vmx *vmx) if (cpu_has_vmx_msr_bitmap()) vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); - vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ + vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); /* 22.3.1.5 */ /* Control */ pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); @@ -6902,7 +6902,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) vcpu_setup_sgx_lepubkeyhash(vcpu); vmx->nested.posted_intr_nv = -1; - vmx->nested.current_vmptr = -1ull; + vmx->nested.current_vmptr = INVALID_GPA; vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; vcpu->arch.microcode_version = 0x100000000ULL; -- cgit v1.2.3 From feb3162f9debbbeee5b00ad5a4e776f826dd9161 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 30 Sep 2021 01:51:54 +0800 Subject: KVM: nVMX: Reset vmxon_ptr upon VMXOFF emulation. Currently, 'vmx->nested.vmxon_ptr' is not reset upon VMXOFF emulation. This is not a problem per se as we never access it when !vmx->nested.vmxon. But this should be done to avoid any issue in the future. Also, initialize the vmxon_ptr when vcpu is created. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Yu Zhang Message-Id: <20210929175154.11396-3-yu.c.zhang@linux.intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 1 + arch/x86/kvm/vmx/vmx.c | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 371b1dee17c1..af1bbb73430a 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -290,6 +290,7 @@ static void free_nested(struct kvm_vcpu *vcpu) vmx->nested.vmxon = false; vmx->nested.smm.vmxon = false; + vmx->nested.vmxon_ptr = INVALID_GPA; free_vpid(vmx->nested.vpid02); vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = INVALID_GPA; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4d1514f6f36c..6323ed6a0746 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6902,6 +6902,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) vcpu_setup_sgx_lepubkeyhash(vcpu); vmx->nested.posted_intr_nv = -1; + vmx->nested.vmxon_ptr = INVALID_GPA; vmx->nested.current_vmptr = INVALID_GPA; vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; -- cgit v1.2.3 From 11476d277e06bbd7e1ba3315e0cfc78f529be9e2 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 29 Sep 2021 15:28:46 +0800 Subject: KVM: use vma_pages() helper Use vma_pages function on vma object instead of explicit computation. Fix the following coccicheck warning: ./virt/kvm/kvm_main.c:3526:29-35: WARNING: Consider using vma_pages helper on vma Reported-by: Abaci Robot Signed-off-by: Yang Li Message-Id: <1632900526-119643-1-git-send-email-yang.lee@linux.alibaba.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 7851f3a1b5f7..8f0e9ea11280 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3523,7 +3523,7 @@ static const struct vm_operations_struct kvm_vcpu_vm_ops = { static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) { struct kvm_vcpu *vcpu = file->private_data; - unsigned long pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + unsigned long pages = vma_pages(vma); if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && -- cgit v1.2.3 From 6470accc7ba948b0b3aca22b273fe84ec638a116 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 3 Sep 2021 09:51:36 +0200 Subject: KVM: x86: hyper-v: Avoid calling kvm_make_vcpus_request_mask() with vcpu_mask==NULL In preparation to making kvm_make_vcpus_request_mask() use for_each_set_bit() switch kvm_hv_flush_tlb() to calling kvm_make_all_cpus_request() for 'all cpus' case. Note: kvm_make_all_cpus_request() (unlike kvm_make_vcpus_request_mask()) currently dynamically allocates cpumask on each call and this is suboptimal. Both kvm_make_all_cpus_request() and kvm_make_vcpus_request_mask() are going to be switched to using pre-allocated per-cpu masks. Reviewed-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini Message-Id: <20210903075141.403071-4-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index d5124b520f76..192062f65c97 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1838,16 +1838,19 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool cpumask_clear(&hv_vcpu->tlb_flush); - vcpu_mask = all_cpus ? NULL : - sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, - vp_bitmap, vcpu_bitmap); - /* * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't * analyze it here, flush TLB regardless of the specified address space. */ - kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST, - NULL, vcpu_mask, &hv_vcpu->tlb_flush); + if (all_cpus) { + kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH_GUEST); + } else { + vcpu_mask = sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, + vp_bitmap, vcpu_bitmap); + + kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST, + NULL, vcpu_mask, &hv_vcpu->tlb_flush); + } ret_success: /* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */ -- cgit v1.2.3 From ae0946cd3601752dc58f86d84258e5361e9c8cd4 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 3 Sep 2021 09:51:37 +0200 Subject: KVM: Optimize kvm_make_vcpus_request_mask() a bit Iterating over set bits in 'vcpu_bitmap' should be faster than going through all vCPUs, especially when just a few bits are set. Drop kvm_make_vcpus_request_mask() call from kvm_make_all_cpus_request_except() to avoid handling the special case when 'vcpu_bitmap' is NULL, move the code to kvm_make_all_cpus_request_except() itself. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini Message-Id: <20210903075141.403071-5-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 88 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 53 insertions(+), 35 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 8f0e9ea11280..61101e7932f7 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -251,50 +251,57 @@ static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait) return true; } +static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu, + unsigned int req, cpumask_var_t tmp, + int current_cpu) +{ + int cpu; + + kvm_make_request(req, vcpu); + + if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) + return; + + /* + * tmp can be "unavailable" if cpumasks are allocated off stack as + * allocation of the mask is deliberately not fatal and is handled by + * falling back to kicking all online CPUs. + */ + if (!cpumask_available(tmp)) + return; + + /* + * Note, the vCPU could get migrated to a different pCPU at any point + * after kvm_request_needs_ipi(), which could result in sending an IPI + * to the previous pCPU. But, that's OK because the purpose of the IPI + * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is + * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES + * after this point is also OK, as the requirement is only that KVM wait + * for vCPUs that were reading SPTEs _before_ any changes were + * finalized. See kvm_vcpu_kick() for more details on handling requests. + */ + if (kvm_request_needs_ipi(vcpu, req)) { + cpu = READ_ONCE(vcpu->cpu); + if (cpu != -1 && cpu != current_cpu) + __cpumask_set_cpu(cpu, tmp); + } +} + bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except, unsigned long *vcpu_bitmap, cpumask_var_t tmp) { - int i, cpu, me; struct kvm_vcpu *vcpu; + int i, me; bool called; me = get_cpu(); - kvm_for_each_vcpu(i, vcpu, kvm) { - if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) || - vcpu == except) + for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) { + vcpu = kvm_get_vcpu(kvm, i); + if (!vcpu || vcpu == except) continue; - - kvm_make_request(req, vcpu); - - if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) - continue; - - /* - * tmp can be "unavailable" if cpumasks are allocated off stack - * as allocation of the mask is deliberately not fatal and is - * handled by falling back to kicking all online CPUs. - */ - if (!cpumask_available(tmp)) - continue; - - /* - * Note, the vCPU could get migrated to a different pCPU at any - * point after kvm_request_needs_ipi(), which could result in - * sending an IPI to the previous pCPU. But, that's ok because - * the purpose of the IPI is to ensure the vCPU returns to - * OUTSIDE_GUEST_MODE, which is satisfied if the vCPU migrates. - * Entering READING_SHADOW_PAGE_TABLES after this point is also - * ok, as the requirement is only that KVM wait for vCPUs that - * were reading SPTEs _before_ any changes were finalized. See - * kvm_vcpu_kick() for more details on handling requests. - */ - if (kvm_request_needs_ipi(vcpu, req)) { - cpu = READ_ONCE(vcpu->cpu); - if (cpu != -1 && cpu != me) - __cpumask_set_cpu(cpu, tmp); - } + kvm_make_vcpu_request(kvm, vcpu, req, tmp, me); } called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT)); @@ -306,12 +313,23 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except) { + struct kvm_vcpu *vcpu; cpumask_var_t cpus; bool called; + int i, me; zalloc_cpumask_var(&cpus, GFP_ATOMIC); - called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus); + me = get_cpu(); + + kvm_for_each_vcpu(i, vcpu, kvm) { + if (vcpu == except) + continue; + kvm_make_vcpu_request(kvm, vcpu, req, cpus, me); + } + + called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); + put_cpu(); free_cpumask_var(cpus); return called; -- cgit v1.2.3 From 381cecc5d7b777ada7cdf12f5b0bf4caf43bf7aa Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 3 Sep 2021 09:51:38 +0200 Subject: KVM: Drop 'except' parameter from kvm_make_vcpus_request_mask() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Both remaining callers of kvm_make_vcpus_request_mask() pass 'NULL' for 'except' parameter so it can just be dropped. No functional change intended ©. Suggested-by: Sean Christopherson Reviewed-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini Message-Id: <20210903075141.403071-6-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 2 +- arch/x86/kvm/x86.c | 2 +- include/linux/kvm_host.h | 1 - virt/kvm/kvm_main.c | 3 +-- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 192062f65c97..b4b432a164ae 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1849,7 +1849,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool vp_bitmap, vcpu_bitmap); kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST, - NULL, vcpu_mask, &hv_vcpu->tlb_flush); + vcpu_mask, &hv_vcpu->tlb_flush); } ret_success: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index aabd3a2ec1bc..7212ba654ba2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9247,7 +9247,7 @@ void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, zalloc_cpumask_var(&cpus, GFP_ATOMIC); kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, - NULL, vcpu_bitmap, cpus); + vcpu_bitmap, cpus); free_cpumask_var(cpus); } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0f18df7fe874..89e1a0069833 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -160,7 +160,6 @@ static inline bool is_error_page(struct page *page) #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, - struct kvm_vcpu *except, unsigned long *vcpu_bitmap, cpumask_var_t tmp); bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 61101e7932f7..bcadbc0a70f2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -288,7 +288,6 @@ static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu, } bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, - struct kvm_vcpu *except, unsigned long *vcpu_bitmap, cpumask_var_t tmp) { struct kvm_vcpu *vcpu; @@ -299,7 +298,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) { vcpu = kvm_get_vcpu(kvm, i); - if (!vcpu || vcpu == except) + if (!vcpu) continue; kvm_make_vcpu_request(kvm, vcpu, req, tmp, me); } -- cgit v1.2.3 From baff59ccdc657d290be51b95b38ebe5de40036b4 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 3 Sep 2021 09:51:40 +0200 Subject: KVM: Pre-allocate cpumasks for kvm_make_all_cpus_request_except() Allocating cpumask dynamically in zalloc_cpumask_var() is not ideal. Allocation is somewhat slow and can (in theory and when CPUMASK_OFFSTACK) fail. kvm_make_all_cpus_request_except() already disables preemption so we can use pre-allocated per-cpu cpumasks instead. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini Message-Id: <20210903075141.403071-8-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index bcadbc0a70f2..9cb0fd1723e6 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -155,6 +155,8 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); static unsigned long long kvm_createvm_count; static unsigned long long kvm_active_vms; +static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); + __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, unsigned long start, unsigned long end) { @@ -313,14 +315,15 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except) { struct kvm_vcpu *vcpu; - cpumask_var_t cpus; + struct cpumask *cpus; bool called; int i, me; - zalloc_cpumask_var(&cpus, GFP_ATOMIC); - me = get_cpu(); + cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); + cpumask_clear(cpus); + kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu == except) continue; @@ -330,7 +333,6 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); put_cpu(); - free_cpumask_var(cpus); return called; } @@ -5560,9 +5562,17 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, goto out_free_3; } + for_each_possible_cpu(cpu) { + if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu), + GFP_KERNEL, cpu_to_node(cpu))) { + r = -ENOMEM; + goto out_free_4; + } + } + r = kvm_async_pf_init(); if (r) - goto out_free; + goto out_free_5; kvm_chardev_ops.owner = module; kvm_vm_fops.owner = module; @@ -5588,7 +5598,10 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, out_unreg: kvm_async_pf_deinit(); -out_free: +out_free_5: + for_each_possible_cpu(cpu) + free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); +out_free_4: kmem_cache_destroy(kvm_vcpu_cache); out_free_3: unregister_reboot_notifier(&kvm_reboot_notifier); @@ -5608,8 +5621,12 @@ EXPORT_SYMBOL_GPL(kvm_init); void kvm_exit(void) { + int cpu; + debugfs_remove_recursive(kvm_debugfs_dir); misc_deregister(&kvm_dev); + for_each_possible_cpu(cpu) + free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); kmem_cache_destroy(kvm_vcpu_cache); kvm_async_pf_deinit(); unregister_syscore_ops(&kvm_syscore_ops); -- cgit v1.2.3 From 620b2438abf98f09e19802cbc3bc2e98179cdbe2 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 3 Sep 2021 09:51:41 +0200 Subject: KVM: Make kvm_make_vcpus_request_mask() use pre-allocated cpu_kick_mask kvm_make_vcpus_request_mask() already disables preemption so just like kvm_make_all_cpus_request_except() it can be switched to using pre-allocated per-cpu cpumasks. This allows for improvements for both users of the function: in Hyper-V emulation code 'tlb_flush' can now be dropped from 'struct kvm_vcpu_hv' and kvm_make_scan_ioapic_request_mask() gets rid of dynamic allocation. cpumask_available() checks in kvm_make_vcpu_request() and kvm_kick_many_cpus() can now be dropped as they checks for an impossible condition: kvm_init() makes sure per-cpu masks are allocated. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini Message-Id: <20210903075141.403071-9-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/hyperv.c | 5 +---- arch/x86/kvm/x86.c | 9 +-------- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 29 +++++++++-------------------- 5 files changed, 12 insertions(+), 34 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f8f48a7ec577..120ac07e4094 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -581,7 +581,6 @@ struct kvm_vcpu_hv { struct kvm_hyperv_exit exit; struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT]; DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); - cpumask_t tlb_flush; bool enforce_cpuid; struct { u32 features_eax; /* HYPERV_CPUID_FEATURES.EAX */ diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index b4b432a164ae..6f11cda2bfa4 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1754,7 +1754,6 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool int i; gpa_t gpa; struct kvm *kvm = vcpu->kvm; - struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct hv_tlb_flush_ex flush_ex; struct hv_tlb_flush flush; u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; @@ -1836,8 +1835,6 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool } } - cpumask_clear(&hv_vcpu->tlb_flush); - /* * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't * analyze it here, flush TLB regardless of the specified address space. @@ -1849,7 +1846,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool vp_bitmap, vcpu_bitmap); kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST, - vcpu_mask, &hv_vcpu->tlb_flush); + vcpu_mask); } ret_success: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7212ba654ba2..03568cbbe8bd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9242,14 +9242,7 @@ static void process_smi(struct kvm_vcpu *vcpu) void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, unsigned long *vcpu_bitmap) { - cpumask_var_t cpus; - - zalloc_cpumask_var(&cpus, GFP_ATOMIC); - - kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, - vcpu_bitmap, cpus); - - free_cpumask_var(cpus); + kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, vcpu_bitmap); } void kvm_make_scan_ioapic_request(struct kvm *kvm) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 89e1a0069833..f1b96a2ebaa7 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -160,7 +160,7 @@ static inline bool is_error_page(struct page *page) #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, - unsigned long *vcpu_bitmap, cpumask_var_t tmp); + unsigned long *vcpu_bitmap); bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9cb0fd1723e6..18d245fe2118 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -237,15 +237,8 @@ static void ack_flush(void *_completed) { } -static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait) +static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait) { - const struct cpumask *cpus; - - if (likely(cpumask_available(tmp))) - cpus = tmp; - else - cpus = cpu_online_mask; - if (cpumask_empty(cpus)) return false; @@ -254,7 +247,7 @@ static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait) } static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu, - unsigned int req, cpumask_var_t tmp, + unsigned int req, struct cpumask *tmp, int current_cpu) { int cpu; @@ -264,14 +257,6 @@ static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu, if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) return; - /* - * tmp can be "unavailable" if cpumasks are allocated off stack as - * allocation of the mask is deliberately not fatal and is handled by - * falling back to kicking all online CPUs. - */ - if (!cpumask_available(tmp)) - return; - /* * Note, the vCPU could get migrated to a different pCPU at any point * after kvm_request_needs_ipi(), which could result in sending an IPI @@ -290,22 +275,26 @@ static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu, } bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, - unsigned long *vcpu_bitmap, cpumask_var_t tmp) + unsigned long *vcpu_bitmap) { struct kvm_vcpu *vcpu; + struct cpumask *cpus; int i, me; bool called; me = get_cpu(); + cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); + cpumask_clear(cpus); + for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) { vcpu = kvm_get_vcpu(kvm, i); if (!vcpu) continue; - kvm_make_vcpu_request(kvm, vcpu, req, tmp, me); + kvm_make_vcpu_request(kvm, vcpu, req, cpus, me); } - called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT)); + called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); put_cpu(); return called; -- cgit v1.2.3 From 1e254d0d86a0f2efd4190a89d5204b37c18c6381 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 13 Sep 2021 15:57:43 +0200 Subject: Revert "x86/kvm: fix vcpu-id indexed array sizes" This reverts commit 76b4f357d0e7d8f6f0013c733e6cba1773c266d3. The commit has the wrong reasoning, as KVM_MAX_VCPU_ID is not defining the maximum allowed vcpu-id as its name suggests, but the number of vcpu-ids. So revert this patch again. Suggested-by: Eduardo Habkost Signed-off-by: Juergen Gross Signed-off-by: Paolo Bonzini Message-Id: <20210913135745.13944-2-jgross@suse.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/ioapic.c | 2 +- arch/x86/kvm/ioapic.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 8c065da73f8e..4e0f52660842 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) { ioapic->rtc_status.pending_eoi = 0; - bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1); + bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID); } static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h index bbd4a5d18b5d..27e61ff3ac3e 100644 --- a/arch/x86/kvm/ioapic.h +++ b/arch/x86/kvm/ioapic.h @@ -39,13 +39,13 @@ struct kvm_vcpu; struct dest_map { /* vcpu bitmap where IRQ has been sent */ - DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1); + DECLARE_BITMAP(map, KVM_MAX_VCPU_ID); /* * Vector sent to a given vcpu, only valid when * the vcpu's bit in map is set */ - u8 vectors[KVM_MAX_VCPU_ID + 1]; + u8 vectors[KVM_MAX_VCPU_ID]; }; -- cgit v1.2.3 From a1c42ddedf35dbf5f25ea0982ed6e226eef7a78c Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 13 Sep 2021 15:57:44 +0200 Subject: kvm: rename KVM_MAX_VCPU_ID to KVM_MAX_VCPU_IDS KVM_MAX_VCPU_ID is not specifying the highest allowed vcpu-id, but the number of allowed vcpu-ids. This has already led to confusion, so rename KVM_MAX_VCPU_ID to KVM_MAX_VCPU_IDS to make its semantics more clear Suggested-by: Eduardo Habkost Signed-off-by: Juergen Gross Signed-off-by: Paolo Bonzini Message-Id: <20210913135745.13944-3-jgross@suse.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/devices/xics.rst | 2 +- Documentation/virt/kvm/devices/xive.rst | 2 +- arch/mips/kvm/mips.c | 2 +- arch/powerpc/include/asm/kvm_book3s.h | 2 +- arch/powerpc/include/asm/kvm_host.h | 4 ++-- arch/powerpc/kvm/book3s_xive.c | 2 +- arch/powerpc/kvm/powerpc.c | 2 +- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/ioapic.c | 2 +- arch/x86/kvm/ioapic.h | 4 ++-- arch/x86/kvm/x86.c | 2 +- include/linux/kvm_host.h | 4 ++-- tools/testing/selftests/kvm/kvm_create_max_vcpus.c | 2 +- virt/kvm/kvm_main.c | 2 +- 14 files changed, 17 insertions(+), 17 deletions(-) diff --git a/Documentation/virt/kvm/devices/xics.rst b/Documentation/virt/kvm/devices/xics.rst index 2d6927e0b776..bf32c77174ab 100644 --- a/Documentation/virt/kvm/devices/xics.rst +++ b/Documentation/virt/kvm/devices/xics.rst @@ -22,7 +22,7 @@ Groups: Errors: ======= ========================================== - -EINVAL Value greater than KVM_MAX_VCPU_ID. + -EINVAL Value greater than KVM_MAX_VCPU_IDS. -EFAULT Invalid user pointer for attr->addr. -EBUSY A vcpu is already connected to the device. ======= ========================================== diff --git a/Documentation/virt/kvm/devices/xive.rst b/Documentation/virt/kvm/devices/xive.rst index 8bdf3dc38f01..8b5e7b40bdf8 100644 --- a/Documentation/virt/kvm/devices/xive.rst +++ b/Documentation/virt/kvm/devices/xive.rst @@ -91,7 +91,7 @@ the legacy interrupt mode, referred as XICS (POWER7/8). Errors: ======= ========================================== - -EINVAL Value greater than KVM_MAX_VCPU_ID. + -EINVAL Value greater than KVM_MAX_VCPU_IDS. -EFAULT Invalid user pointer for attr->addr. -EBUSY A vCPU is already connected to the device. ======= ========================================== diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 75c6f264c626..562aa878b266 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1073,7 +1073,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = KVM_MAX_VCPUS; break; case KVM_CAP_MAX_VCPU_ID: - r = KVM_MAX_VCPU_ID; + r = KVM_MAX_VCPU_IDS; break; case KVM_CAP_MIPS_FPU: /* We don't handle systems with inconsistent cpu_has_fpu */ diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index caaa0f592d8e..3d31f2c59e43 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -434,7 +434,7 @@ extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu); #define SPLIT_HACK_OFFS 0xfb000000 /* - * This packs a VCPU ID from the [0..KVM_MAX_VCPU_ID) space down to the + * This packs a VCPU ID from the [0..KVM_MAX_VCPU_IDS) space down to the * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride * (but not its actual threading mode, which is not available) to avoid * collisions. diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 080a7feb7731..59cb38b04ede 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -33,11 +33,11 @@ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #include /* for MAX_SMT_THREADS */ -#define KVM_MAX_VCPU_ID (MAX_SMT_THREADS * KVM_MAX_VCORES) +#define KVM_MAX_VCPU_IDS (MAX_SMT_THREADS * KVM_MAX_VCORES) #define KVM_MAX_NESTED_GUESTS KVMPPC_NR_LPIDS #else -#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS +#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #define __KVM_HAVE_ARCH_INTC_INITIALIZED diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index a18db9e16ea4..225008882958 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -1928,7 +1928,7 @@ int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr) pr_devel("%s nr_servers=%u\n", __func__, nr_servers); - if (!nr_servers || nr_servers > KVM_MAX_VCPU_ID) + if (!nr_servers || nr_servers > KVM_MAX_VCPU_IDS) return -EINVAL; mutex_lock(&xive->lock); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index b4e6f70b97b9..8ab90ce8738f 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -649,7 +649,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = KVM_MAX_VCPUS; break; case KVM_CAP_MAX_VCPU_ID: - r = KVM_MAX_VCPU_ID; + r = KVM_MAX_VCPU_IDS; break; #ifdef CONFIG_PPC_BOOK3S_64 case KVM_CAP_PPC_GET_SMMU_INFO: diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 120ac07e4094..09c18e54e0a1 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -50,7 +50,7 @@ * so ratio of 4 should be enough. */ #define KVM_VCPU_ID_RATIO 4 -#define KVM_MAX_VCPU_ID (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO) +#define KVM_MAX_VCPU_IDS (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO) /* memory slots that are not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 3 diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 4e0f52660842..816a82515dcd 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) { ioapic->rtc_status.pending_eoi = 0; - bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID); + bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_IDS); } static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h index 27e61ff3ac3e..e66e620c3bed 100644 --- a/arch/x86/kvm/ioapic.h +++ b/arch/x86/kvm/ioapic.h @@ -39,13 +39,13 @@ struct kvm_vcpu; struct dest_map { /* vcpu bitmap where IRQ has been sent */ - DECLARE_BITMAP(map, KVM_MAX_VCPU_ID); + DECLARE_BITMAP(map, KVM_MAX_VCPU_IDS); /* * Vector sent to a given vcpu, only valid when * the vcpu's bit in map is set */ - u8 vectors[KVM_MAX_VCPU_ID]; + u8 vectors[KVM_MAX_VCPU_IDS]; }; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 03568cbbe8bd..6ad2f55c78a5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4077,7 +4077,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = KVM_MAX_VCPUS; break; case KVM_CAP_MAX_VCPU_ID: - r = KVM_MAX_VCPU_ID; + r = KVM_MAX_VCPU_IDS; break; case KVM_CAP_PV_MMU: /* obsolete */ r = 0; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f1b96a2ebaa7..1f9e80ce4723 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -39,8 +39,8 @@ #include #include -#ifndef KVM_MAX_VCPU_ID -#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS +#ifndef KVM_MAX_VCPU_IDS +#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS #endif /* diff --git a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c index 0299cd81b8ba..f968dfd4ee88 100644 --- a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c +++ b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c @@ -53,7 +53,7 @@ int main(int argc, char *argv[]) kvm_max_vcpu_id = kvm_max_vcpus; TEST_ASSERT(kvm_max_vcpu_id >= kvm_max_vcpus, - "KVM_MAX_VCPU_ID (%d) must be at least as large as KVM_MAX_VCPUS (%d).", + "KVM_MAX_VCPU_IDS (%d) must be at least as large as KVM_MAX_VCPUS (%d).", kvm_max_vcpu_id, kvm_max_vcpus); test_vcpu_creation(0, kvm_max_vcpus); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 18d245fe2118..3f6d450355f0 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3595,7 +3595,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) struct kvm_vcpu *vcpu; struct page *page; - if (id >= KVM_MAX_VCPU_ID) + if (id >= KVM_MAX_VCPU_IDS) return -EINVAL; mutex_lock(&kvm->lock); -- cgit v1.2.3 From 15cabbc259f254e7e3fe0243dfd9dfc9aeb85c06 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 31 Aug 2021 09:42:23 -0700 Subject: KVM: x86: Subsume nested GPA read helper into load_pdptrs() Open code the call to mmu->translate_gpa() when loading nested PDPTRs and kill off the existing helper, kvm_read_guest_page_mmu(), to discourage incorrect use. Reading guest memory straight from an L2 GPA is extremely rare (as evidenced by the lack of users), as very few constructs in x86 specify physical addresses, even fewer are virtualized by KVM, and even fewer yet require emulation of L2 by L0 KVM. No functional change intended. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini Message-Id: <20210831164224.1119728-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 3 --- arch/x86/kvm/x86.c | 56 +++++++++++++---------------------------- 2 files changed, 18 insertions(+), 41 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 09c18e54e0a1..0cb35ef26ab3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1714,9 +1714,6 @@ void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); -int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - gfn_t gfn, void *data, int offset, int len, - u32 access); bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6ad2f55c78a5..bc8572e29b26 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -790,30 +790,6 @@ bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) } EXPORT_SYMBOL_GPL(kvm_require_dr); -/* - * This function will be used to read from the physical memory of the currently - * running guest. The difference to kvm_vcpu_read_guest_page is that this function - * can read from guest physical or from the guest's guest physical memory. - */ -int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - gfn_t ngfn, void *data, int offset, int len, - u32 access) -{ - struct x86_exception exception; - gfn_t real_gfn; - gpa_t ngpa; - - ngpa = gfn_to_gpa(ngfn); - real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); - if (real_gfn == UNMAPPED_GVA) - return -EFAULT; - - real_gfn = gpa_to_gfn(real_gfn); - - return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len); -} -EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); - static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) { return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); @@ -825,34 +801,38 @@ static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; - unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; + unsigned offset = (((cr3 & (PAGE_SIZE-1)) >> 5) << 2) * sizeof(u64); + gpa_t real_gpa; int i; int ret; u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; - ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, - offset * sizeof(u64), sizeof(pdpte), - PFERR_USER_MASK|PFERR_WRITE_MASK); - if (ret < 0) { - ret = 0; - goto out; - } + /* + * If the MMU is nested, CR3 holds an L2 GPA and needs to be translated + * to an L1 GPA. + */ + real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(pdpt_gfn), + PFERR_USER_MASK | PFERR_WRITE_MASK, NULL); + if (real_gpa == UNMAPPED_GVA) + return 0; + + ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte, + offset, sizeof(pdpte)); + if (ret < 0) + return 0; + for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if ((pdpte[i] & PT_PRESENT_MASK) && (pdpte[i] & pdptr_rsvd_bits(vcpu))) { - ret = 0; - goto out; + return 0; } } - ret = 1; memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); vcpu->arch.pdptrs_from_userspace = false; -out: - - return ret; + return 1; } EXPORT_SYMBOL_GPL(load_pdptrs); -- cgit v1.2.3 From 94c641ba7a897f19f1b6e5d932c5629b93d8096c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 31 Aug 2021 09:42:24 -0700 Subject: KVM: x86: Simplify retrieving the page offset when loading PDTPRs Replace impressively complex "logic" for computing the page offset from CR3 when loading PDPTRs. Unlike other paging modes, the address held in CR3 for PAE paging is 32-byte aligned, i.e. occupies bits 31:5, thus bits 11:5 need to be used as the offset from the gfn when reading PDPTRs. The existing calculation originated in commit 1342d3536d6a ("[PATCH] KVM: MMU: Load the pae pdptrs on cr3 change like the processor does"), which read the PDPTRs from guest memory as individual 8-byte loads. At the time, the so called "offset" was the base index of PDPTR0 as a _u64_, not a byte offset. Naming aside, the computation was useful and arguably simplified the overall flow. Unfortunately, when commit 195aefde9cc2 ("KVM: Add general accessors to read and write guest memory") added accessors with offsets at byte granularity, the cleverness of the original code was lost and KVM was left with convoluted code for a simple operation. No functional change intended. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini Message-Id: <20210831164224.1119728-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bc8572e29b26..c25913294f61 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -801,7 +801,6 @@ static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; - unsigned offset = (((cr3 & (PAGE_SIZE-1)) >> 5) << 2) * sizeof(u64); gpa_t real_gpa; int i; int ret; @@ -816,8 +815,9 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) if (real_gpa == UNMAPPED_GVA) return 0; + /* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */ ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte, - offset, sizeof(pdpte)); + cr3 & GENMASK(11, 5), sizeof(pdpte)); if (ret < 0) return 0; -- cgit v1.2.3 From ff8828c84f9376a8c3040da16a88e2c39aa3a527 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 20 Sep 2021 17:02:56 -0700 Subject: KVM: x86: Do not mark all registers as avail/dirty during RESET/INIT Do not blindly mark all registers as available+dirty at RESET/INIT, and instead rely on writes to registers to go through the proper mutators or to explicitly mark registers as dirty. INIT in particular does not blindly overwrite all registers, e.g. select bits in CR0 are preserved across INIT, thus marking registers available+dirty without first reading the register from hardware is incorrect. In practice this is a benign bug as KVM doesn't let the guest control CR0 bits that are preserved across INIT, and all other true registers are explicitly written during the RESET/INIT flows. The PDPTRs and EX_INFO "registers" are not explicitly written, but accessing those values during RESET/INIT is nonsensical and would be a KVM bug regardless of register caching. Fixes: 66f7b72e1171 ("KVM: x86: Make register state after reset conform to specification") [sean: !!! NOT FOR STABLE !!!] Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini Message-Id: <20210921000303.400537-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 1 + arch/x86/kvm/x86.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 6323ed6a0746..7c496fd0bb96 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4449,6 +4449,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) kvm_set_cr8(vcpu, 0); vmx_segment_cache_clear(vmx); + kvm_register_mark_available(vcpu, VCPU_EXREG_SEGMENTS); seg_setup(VCPU_SREG_CS); vmcs_write16(GUEST_CS_SELECTOR, 0xf000); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c25913294f61..629442e3847b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10859,9 +10859,9 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vcpu->arch.xcr0 = XFEATURE_MASK_FP; } + /* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */ memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); - vcpu->arch.regs_avail = ~0; - vcpu->arch.regs_dirty = ~0; + kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP); /* * Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon) -- cgit v1.2.3 From 5ebbc470d7f33e8fba6bdb2ac8e62c49cec99108 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 20 Sep 2021 17:02:57 -0700 Subject: KVM: x86: Remove defunct setting of CR0.ET for guests during vCPU create Drop code to set CR0.ET for the guest during initialization of the guest FPU. The code was added as a misguided bug fix by commit 380102c8e431 ("KVM Set the ET flag in CR0 after initializing FX") to resolve an issue where vcpu->cr0 (now vcpu->arch.cr0) was not correctly initialized on SVM systems. While init_vmcb() did set CR0.ET, it only did so in the VMCB, and subtly did not update vcpu->cr0. Stuffing CR0.ET worked around the immediate problem, but did not fix the real bug of vcpu->cr0 and the VMCB being out of sync. That underlying bug was eventually remedied by commit 18fa000ae453 ("KVM: SVM: Reset cr0 properly on vcpu reset"). No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini Message-Id: <20210921000303.400537-5-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 629442e3847b..69d656125103 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10611,8 +10611,6 @@ static void fx_init(struct kvm_vcpu *vcpu) * Ensure guest xcr0 is valid for loading */ vcpu->arch.xcr0 = XFEATURE_MASK_FP; - - vcpu->arch.cr0 |= X86_CR0_ET; } void kvm_free_guest_fpu(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From e8f65b9bb4832028cdbd5927ddb67f66c6ccdd27 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 20 Sep 2021 17:02:58 -0700 Subject: KVM: x86: Remove defunct setting of XCR0 for guest during vCPU create Drop code to initialize XCR0 during fx_init(), a.k.a. vCPU creation, as XCR0 has been initialized during kvm_vcpu_reset() (for RESET) since commit a554d207dc46 ("KVM: X86: Processor States following Reset or INIT"). Back when XCR0 support was added by commit 2acf923e38fb ("KVM: VMX: Enable XSAVE/XRSTOR for guest"), KVM didn't differentiate between RESET and INIT. Ignoring the fact that calling fx_init() for INIT is obviously wrong, e.g. FPU state after INIT is not the same as after RESET, setting XCR0 in fx_init() was correct. Eventually fx_init() got moved to kvm_arch_vcpu_init(), a.k.a. vCPU creation (ignore the terrible name) by commit 0ee6a5172573 ("x86/fpu, kvm: Simplify fx_init()"). Finally, commit 95a0d01eef7a ("KVM: x86: Move all vcpu init code into kvm_arch_vcpu_create()") killed off kvm_arch_vcpu_init(), leaving behind the oddity of redundant setting of guest state during vCPU creation. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini Message-Id: <20210921000303.400537-6-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 69d656125103..da0d48d10769 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -973,7 +973,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) /* * Do not allow the guest to set bits that we do not support * saving. However, xcr0 bit 0 is always set, even if the - * emulated CPU does not support XSAVE (see fx_init). + * emulated CPU does not support XSAVE (see kvm_vcpu_reset()). */ valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; if (xcr0 & ~valid_bits) @@ -10606,11 +10606,6 @@ static void fx_init(struct kvm_vcpu *vcpu) if (boot_cpu_has(X86_FEATURE_XSAVES)) vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; - - /* - * Ensure guest xcr0 is valid for loading - */ - vcpu->arch.xcr0 = XFEATURE_MASK_FP; } void kvm_free_guest_fpu(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 583d369b36a90753d8b169f041b39078ac4e1633 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 20 Sep 2021 17:02:59 -0700 Subject: KVM: x86: Fold fx_init() into kvm_arch_vcpu_create() Move the few bits of relevant fx_init() code into kvm_arch_vcpu_create(), dropping the superfluous check on vcpu->arch.guest_fpu that was blindly and wrongly added by commit ed02b213098a ("KVM: SVM: Guest FPU state save/restore not needed for SEV-ES guest"). Note, KVM currently allocates and then frees FPU state for SEV-ES guests, rather than avoid the allocation in the first place. While that approach is inarguably inefficient and unnecessary, it's a cleanup for the future. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini Message-Id: <20210921000303.400537-7-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index da0d48d10769..1ab0caa97f30 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10597,17 +10597,6 @@ static int sync_regs(struct kvm_vcpu *vcpu) return 0; } -static void fx_init(struct kvm_vcpu *vcpu) -{ - if (!vcpu->arch.guest_fpu) - return; - - fpstate_init(&vcpu->arch.guest_fpu->state); - if (boot_cpu_has(X86_FEATURE_XSAVES)) - vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv = - host_xcr0 | XSTATE_COMPACTION_ENABLED; -} - void kvm_free_guest_fpu(struct kvm_vcpu *vcpu) { if (vcpu->arch.guest_fpu) { @@ -10686,7 +10675,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) pr_err("kvm: failed to allocate vcpu's fpu\n"); goto free_user_fpu; } - fx_init(vcpu); + fpstate_init(&vcpu->arch.guest_fpu->state); + if (boot_cpu_has(X86_FEATURE_XSAVES)) + vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv = + host_xcr0 | XSTATE_COMPACTION_ENABLED; vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); -- cgit v1.2.3 From d06567353e129b460978353cbe2210c23467d6f8 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 20 Sep 2021 17:03:00 -0700 Subject: KVM: VMX: Drop explicit zeroing of MSR guest values at vCPU creation Don't zero out user return and nested MSRs during vCPU creation, and instead rely on vcpu_vmx being zero-allocated. Explicitly zeroing MSRs is not wrong, and is in fact necessary if KVM ever emulates vCPU RESET outside of vCPU creation, but zeroing only a subset of MSRs is confusing. Poking directly into KVM's backing is also undesirable in that it doesn't scale and is error prone. Ideally KVM would have a common RESET path for all MSRs, e.g. by expanding kvm_set_msr(), which would obviate the need for this out-of-bad code (to support standalone RESET). No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini Message-Id: <20210921000303.400537-8-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 7c496fd0bb96..65be36166989 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6837,10 +6837,8 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) goto free_vpid; } - for (i = 0; i < kvm_nr_uret_msrs; ++i) { - vmx->guest_uret_msrs[i].data = 0; + for (i = 0; i < kvm_nr_uret_msrs; ++i) vmx->guest_uret_msrs[i].mask = -1ull; - } if (boot_cpu_has(X86_FEATURE_RTM)) { /* * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception. @@ -6897,8 +6895,6 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) if (nested) memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs)); - else - memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs)); vcpu_setup_sgx_lepubkeyhash(vcpu); -- cgit v1.2.3 From 06692e4b8055cc0c6b136fa7df77221ae9639e97 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 20 Sep 2021 17:03:01 -0700 Subject: KVM: VMX: Move RESET emulation to vmx_vcpu_reset() Move vCPU RESET emulation, including initializating of select VMCS state, to vmx_vcpu_reset(). Drop the open coded "vCPU load" sequence, as ->vcpu_reset() is invoked while the vCPU is properly loaded (which is kind of the point of ->vcpu_reset()...). Hopefully KVM will someday expose a dedicated RESET ioctl(), and in the meantime separating "create" from "RESET" is a nice cleanup. Deferring VMCS initialization is effectively a nop as it's impossible to safely access the VMCS between the current call site and its new home, as both the vCPU and the pCPU are put immediately after init_vmcs(), i.e. the VMCS isn't guaranteed to be loaded. Note, task preemption is not a problem as vmx_sched_in() _can't_ touch the VMCS as ->sched_in() is invoked before the vCPU, and thus VMCS, is reloaded. I.e. the preemption path also can't consume VMCS state. Cc: Reiji Watanabe Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini Message-Id: <20210921000303.400537-9-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 63 +++++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 65be36166989..b8bfb46d58f4 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4328,10 +4328,6 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) #define VMX_XSS_EXIT_BITMAP 0 -/* - * Noting that the initialization of Guest-state Area of VMCS is in - * vmx_vcpu_reset(). - */ static void init_vmcs(struct vcpu_vmx *vmx) { if (nested) @@ -4436,10 +4432,40 @@ static void init_vmcs(struct vcpu_vmx *vmx) vmx_setup_uret_msrs(vmx); } +static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + init_vmcs(vmx); + + if (nested) + memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs)); + + vcpu_setup_sgx_lepubkeyhash(vcpu); + + vmx->nested.posted_intr_nv = -1; + vmx->nested.vmxon_ptr = INVALID_GPA; + vmx->nested.current_vmptr = INVALID_GPA; + vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; + + vcpu->arch.microcode_version = 0x100000000ULL; + vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED; + + /* + * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR + * or POSTED_INTR_WAKEUP_VECTOR. + */ + vmx->pi_desc.nv = POSTED_INTR_VECTOR; + vmx->pi_desc.sn = 1; +} + static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) { struct vcpu_vmx *vmx = to_vmx(vcpu); + if (!init_event) + __vmx_vcpu_reset(vcpu); + vmx->rmode.vm86_active = 0; vmx->spec_ctrl = 0; @@ -6816,7 +6842,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) { struct vmx_uret_msr *tsx_ctrl; struct vcpu_vmx *vmx; - int i, cpu, err; + int i, err; BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0); vmx = to_vmx(vcpu); @@ -6875,12 +6901,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) } vmx->loaded_vmcs = &vmx->vmcs01; - cpu = get_cpu(); - vmx_vcpu_load(vcpu, cpu); - vcpu->cpu = cpu; - init_vmcs(vmx); - vmx_vcpu_put(vcpu); - put_cpu(); + if (cpu_need_virtualize_apic_accesses(vcpu)) { err = alloc_apic_access_page(vcpu->kvm); if (err) @@ -6893,26 +6914,6 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) goto free_vmcs; } - if (nested) - memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs)); - - vcpu_setup_sgx_lepubkeyhash(vcpu); - - vmx->nested.posted_intr_nv = -1; - vmx->nested.vmxon_ptr = INVALID_GPA; - vmx->nested.current_vmptr = INVALID_GPA; - vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; - - vcpu->arch.microcode_version = 0x100000000ULL; - vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED; - - /* - * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR - * or POSTED_INTR_WAKEUP_VECTOR. - */ - vmx->pi_desc.nv = POSTED_INTR_VECTOR; - vmx->pi_desc.sn = 1; - return 0; free_vmcs: -- cgit v1.2.3 From 9ebe530b9f5da89f9628923348db767e5d497e7b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 20 Sep 2021 17:03:02 -0700 Subject: KVM: SVM: Move RESET emulation to svm_vcpu_reset() Move RESET emulation for SVM vCPUs to svm_vcpu_reset(), and drop an extra init_vmcb() from svm_create_vcpu() in the process. Hopefully KVM will someday expose a dedicated RESET ioctl(), and in the meantime separating "create" from "RESET" is a nice cleanup. Keep the call to svm_switch_vmcb() so that misuse of svm->vmcb at worst breaks the guest, e.g. premature accesses doesn't cause a NULL pointer dereference. Cc: Reiji Watanabe Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini Message-Id: <20210921000303.400537-10-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/sev.c | 6 +++--- arch/x86/kvm/svm/svm.c | 29 +++++++++++++++++------------ arch/x86/kvm/svm/svm.h | 2 +- 3 files changed, 21 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index c36b5fe4c27c..1e8b26b93b4f 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2631,11 +2631,11 @@ void sev_es_init_vmcb(struct vcpu_svm *svm) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); } -void sev_es_create_vcpu(struct vcpu_svm *svm) +void sev_es_vcpu_reset(struct vcpu_svm *svm) { /* - * Set the GHCB MSR value as per the GHCB specification when creating - * a vCPU for an SEV-ES guest. + * Set the GHCB MSR value as per the GHCB specification when emulating + * vCPU RESET for an SEV-ES guest. */ set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX, GHCB_VERSION_MIN, diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 989685098b3e..c770dce55ba8 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1303,6 +1303,19 @@ static void init_vmcb(struct kvm_vcpu *vcpu) } +static void __svm_vcpu_reset(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + svm_vcpu_init_msrpm(vcpu, svm->msrpm); + + svm_init_osvw(vcpu); + vcpu->arch.microcode_version = 0x01000065; + + if (sev_es_guest(vcpu->kvm)) + sev_es_vcpu_reset(svm); +} + static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) { struct vcpu_svm *svm = to_svm(vcpu); @@ -1311,6 +1324,9 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) svm->virt_spec_ctrl = 0; init_vmcb(vcpu); + + if (!init_event) + __svm_vcpu_reset(vcpu); } void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb) @@ -1370,24 +1386,13 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) svm->vmcb01.ptr = page_address(vmcb01_page); svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); + svm_switch_vmcb(svm, &svm->vmcb01); if (vmsa_page) svm->vmsa = page_address(vmsa_page); svm->guest_state_loaded = false; - svm_switch_vmcb(svm, &svm->vmcb01); - init_vmcb(vcpu); - - svm_vcpu_init_msrpm(vcpu, svm->msrpm); - - svm_init_osvw(vcpu); - vcpu->arch.microcode_version = 0x01000065; - - if (sev_es_guest(vcpu->kvm)) - /* Perform SEV-ES specific VMCB creation updates */ - sev_es_create_vcpu(svm); - return 0; error_free_vmsa_page: diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 128a54b1fbf1..c19859e62dec 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -562,7 +562,7 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu); int sev_handle_vmgexit(struct kvm_vcpu *vcpu); int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); void sev_es_init_vmcb(struct vcpu_svm *svm); -void sev_es_create_vcpu(struct vcpu_svm *svm); +void sev_es_vcpu_reset(struct vcpu_svm *svm); void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu); void sev_es_unmap_ghcb(struct vcpu_svm *svm); -- cgit v1.2.3 From 62dd57dd67d74ff5bfdfc28260a35cc4a31babb3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 20 Sep 2021 17:03:03 -0700 Subject: KVM: x86: WARN on non-zero CRs at RESET to detect improper initalization WARN if CR0, CR3, or CR4 are non-zero at RESET, which given the current KVM implementation, really means WARN if they're not zeroed at vCPU creation. VMX in particular has several ->set_*() flows that read other registers to handle side effects, and because those flows are common to RESET and INIT, KVM subtly relies on emulated/virtualized registers to be zeroed at vCPU creation in order to do the right thing at RESET. Use CRs as a sentinel because they are most likely to be written as side effects, and because KVM specifically needs CR0.PG and CR0.PE to be '0' to correctly reflect the state of the vCPU's MMU. CRs are also loaded and stored from/to the VMCS, and so adds some level of coverage to verify that KVM doesn't conflate zero-allocating the VMCS with properly initializing the VMCS with VMWRITEs. Note, '0' is somewhat arbitrary, vCPU creation can technically stuff any value for a register so long as it's coherent with respect to the current vCPU state. In practice, '0' works for all registers and is convenient. Suggested-by: Vitaly Kuznetsov Signed-off-by: Sean Christopherson Reviewed-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini Message-Id: <20210921000303.400537-11-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1ab0caa97f30..833300fc78e3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10783,6 +10783,16 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) unsigned long new_cr0; u32 eax, dummy; + /* + * Several of the "set" flows, e.g. ->set_cr0(), read other registers + * to handle side effects. RESET emulation hits those flows and relies + * on emulated/virtualized registers, including those that are loaded + * into hardware, to be zeroed at vCPU creation. Use CRs as a sentinel + * to detect improper or missing initialization. + */ + WARN_ON_ONCE(!init_event && + (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu))); + kvm_lapic_reset(vcpu, init_event); vcpu->arch.hflags = 0; -- cgit v1.2.3 From 25b9784586a41f1fccc4d2cf7f210252b9df149c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 29 Sep 2021 15:24:26 -0700 Subject: KVM: x86: Manually retrieve CPUID.0x1 when getting FMS for RESET/INIT Manually look for a CPUID.0x1 entry instead of bouncing through kvm_cpuid() when retrieving the Family-Model-Stepping information for vCPU RESET/INIT. This fixes a potential undefined behavior bug due to kvm_cpuid() using the uninitialized "dummy" param as the ECX _input_, a.k.a. the index. A more minimal fix would be to simply zero "dummy", but the extra work in kvm_cpuid() is wasteful, and KVM should be treating the FMS retrieval as an out-of-band access, e.g. same as how KVM computes guest.MAXPHYADDR. Both Intel's SDM and AMD's APM describe the RDX value at RESET/INIT as holding the CPU's FMS information, not as holding CPUID.0x1.EAX. KVM's usage of CPUID entries to get FMS is simply a pragmatic approach to avoid having yet another way for userspace to provide inconsistent data. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Message-Id: <20210929222426.1855730-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 833300fc78e3..25bfc12c0d08 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10779,9 +10779,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) { + struct kvm_cpuid_entry2 *cpuid_0x1; unsigned long old_cr0 = kvm_read_cr0(vcpu); unsigned long new_cr0; - u32 eax, dummy; /* * Several of the "set" flows, e.g. ->set_cr0(), read other registers @@ -10862,13 +10862,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) * Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon) * if no CPUID match is found. Note, it's impossible to get a match at * RESET since KVM emulates RESET before exposing the vCPU to userspace, - * i.e. it'simpossible for kvm_cpuid() to find a valid entry on RESET. - * But, go through the motions in case that's ever remedied. + * i.e. it's impossible for kvm_find_cpuid_entry() to find a valid entry + * on RESET. But, go through the motions in case that's ever remedied. */ - eax = 1; - if (!kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true)) - eax = 0x600; - kvm_rdx_write(vcpu, eax); + cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1, 0); + kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600); vcpu->arch.ia32_xss = 0; -- cgit v1.2.3 From d22869aff4dc8fbbb694c6fe9a4b5db2d570a138 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 26 Aug 2021 13:07:52 +0100 Subject: kvm: selftests: Fix spelling mistake "missmatch" -> "mismatch" There is a spelling mistake in an error message. Fix it. Signed-off-by: Colin Ian King Message-Id: <20210826120752.12633-1-colin.king@canonical.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/lib/sparsebit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c index a0d0c83d83de..50e0cf41a7dd 100644 --- a/tools/testing/selftests/kvm/lib/sparsebit.c +++ b/tools/testing/selftests/kvm/lib/sparsebit.c @@ -1866,7 +1866,7 @@ void sparsebit_validate_internal(struct sparsebit *s) * of total bits set. */ if (s->num_set != total_bits_set) { - fprintf(stderr, "Number of bits set missmatch,\n" + fprintf(stderr, "Number of bits set mismatch,\n" " s->num_set: 0x%lx total_bits_set: 0x%lx", s->num_set, total_bits_set); -- cgit v1.2.3 From 22d7108ce47290d47e1ea83a28fbfc85e0ecf97e Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 26 Aug 2021 09:49:28 +0200 Subject: KVM: selftests: Fix kvm_vm_free() in cr4_cpuid_sync and vmx_tsc_adjust tests The kvm_vm_free() statement here is currently dead code, since the loop in front of it can only be left with the "goto done" that jumps right after the kvm_vm_free(). Fix it by swapping the locations of the "done" label and the kvm_vm_free(). Signed-off-by: Thomas Huth Message-Id: <20210826074928.240942-1-thuth@redhat.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c | 3 +-- tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c index f40fd097cb35..6f6fd189dda3 100644 --- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c +++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c @@ -109,8 +109,7 @@ int main(int argc, char *argv[]) } } - kvm_vm_free(vm); - done: + kvm_vm_free(vm); return 0; } diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c index 7e33a350b053..e683d0ac3e45 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c @@ -161,7 +161,7 @@ int main(int argc, char *argv[]) } } - kvm_vm_free(vm); done: + kvm_vm_free(vm); return 0; } -- cgit v1.2.3 From c6cecc4b9324b97775d7002a13460c247f586e8e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 18 Aug 2021 23:56:15 +0000 Subject: KVM: x86/mmu: Complete prefetch for trailing SPTEs for direct, legacy MMU Make a final call to direct_pte_prefetch_many() if there are "trailing" SPTEs to prefetch, i.e. SPTEs for GFNs following the faulting GFN. The call to direct_pte_prefetch_many() in the loop only handles the case where there are !PRESENT SPTEs preceding a PRESENT SPTE. E.g. if the faulting GFN is a multiple of 8 (the prefetch size) and all SPTEs for the following GFNs are !PRESENT, the loop will terminate with "start = sptep+1" and not prefetch any SPTEs. Prefetching trailing SPTEs as intended can drastically reduce the number of guest page faults, e.g. accessing the first byte of every 4kb page in a 6gb chunk of virtual memory, in a VM with 8gb of preallocated memory, the number of pf_fixed events observed in L0 drops from ~1.75M to <0.27M. Note, this only affects memory that is backed by 4kb pages as KVM doesn't prefetch when installing hugepages. Shadow paging prefetching is not affected as it does not batch the prefetches due to the need to process the corresponding guest PTE. The TDP MMU is not affected because it doesn't have prefetching, yet... Fixes: 957ed9effd80 ("KVM: MMU: prefetch ptes when intercepted guest #PF") Cc: Sergey Senozhatsky Cc: Ben Gardon Signed-off-by: Sean Christopherson Message-Id: <20210818235615.2047588-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 1a64ba5b9437..4238fe3e91c2 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2842,11 +2842,13 @@ static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, if (!start) continue; if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) - break; + return; start = NULL; } else if (!start) start = spte; } + if (start) + direct_pte_prefetch_many(vcpu, sp, start, spte); } static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) -- cgit v1.2.3 From bd047e54408910b5184e27fbfc880ad74c260c90 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Sat, 18 Sep 2021 08:56:30 +0800 Subject: KVM: X86: Don't flush current tlb on shadow page modification After any shadow page modification, flushing tlb only on current VCPU is weird due to other VCPU's tlb might still be stale. In other words, if there is any mandatory tlb-flushing after shadow page modification, SET_SPTE_NEED_REMOTE_TLB_FLUSH or remote_flush should be set and the tlbs of all VCPUs should be flushed. There is not point to only flush current tlb except when the request is from vCPU's or pCPU's activities. If there was any bug that mandatory tlb-flushing is required and SET_SPTE_NEED_REMOTE_TLB_FLUSH/remote_flush is failed to set, this patch would expose the bug in a more destructive way. The related code paths are checked and no missing SET_SPTE_NEED_REMOTE_TLB_FLUSH is found yet. Currently, there is no optional tlb-flushing after sync page related code is changed to flush tlb timely. So we can just remove these local flushing code. Signed-off-by: Lai Jiangshan Signed-off-by: Paolo Bonzini Message-Id: <20210918005636.3675-5-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 5 ----- arch/x86/kvm/mmu/tdp_mmu.c | 1 - 2 files changed, 6 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 4238fe3e91c2..c031daa49638 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1937,9 +1937,6 @@ static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, { if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush)) return; - - if (local_flush) - kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); } #ifdef CONFIG_KVM_MMU_AUDIT @@ -2149,7 +2146,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, break; WARN_ON(!list_empty(&invalid_list)); - kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); } __clear_sp_write_flooding_count(sp); @@ -2757,7 +2753,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { if (write_fault) ret = RET_PF_EMULATE; - kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); } if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 64ccfc1fa553..7a5a24ca50e4 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -959,7 +959,6 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { if (write) ret = RET_PF_EMULATE; - kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); } /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ -- cgit v1.2.3 From 06152b2dec3e264e9c8d2150d075d50faead8110 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Sat, 18 Sep 2021 08:56:31 +0800 Subject: KVM: X86: Remove kvm_mmu_flush_or_zap() Because local_flush is useless, kvm_mmu_flush_or_zap() can be removed and kvm_mmu_remote_flush_or_zap is used instead. Signed-off-by: Lai Jiangshan Signed-off-by: Paolo Bonzini Message-Id: <20210918005636.3675-6-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c031daa49638..443b67d94784 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1931,14 +1931,6 @@ static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, return true; } -static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, - struct list_head *invalid_list, - bool remote_flush, bool local_flush) -{ - if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush)) - return; -} - #ifdef CONFIG_KVM_MMU_AUDIT #include "mmu_audit.c" #else @@ -2032,7 +2024,6 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu, struct mmu_page_path parents; struct kvm_mmu_pages pages; LIST_HEAD(invalid_list); - bool flush = false; while (mmu_unsync_walk(parent, &pages)) { bool protected = false; @@ -2042,27 +2033,25 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu, if (protected) { kvm_flush_remote_tlbs(vcpu->kvm); - flush = false; } for_each_sp(pages, sp, parents, i) { kvm_unlink_unsync_page(vcpu->kvm, sp); - flush |= kvm_sync_page(vcpu, sp, &invalid_list); + kvm_sync_page(vcpu, sp, &invalid_list); mmu_pages_clear_parents(&parents); } if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { - kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); + kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, false); if (!can_yield) { kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); return -EINTR; } cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); - flush = false; } } - kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); + kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, false); return 0; } @@ -5209,7 +5198,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, LIST_HEAD(invalid_list); u64 entry, gentry, *spte; int npte; - bool remote_flush, local_flush; + bool flush = false; /* * If we don't have indirect shadow pages, it means no page is @@ -5218,8 +5207,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) return; - remote_flush = local_flush = false; - pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); /* @@ -5248,18 +5235,17 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, if (!spte) continue; - local_flush = true; while (npte--) { entry = *spte; mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL); if (gentry && sp->role.level != PG_LEVEL_4K) ++vcpu->kvm->stat.mmu_pde_zapped; if (need_remote_flush(entry, *spte)) - remote_flush = true; + flush = true; ++spte; } } - kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); + kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush); kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); write_unlock(&vcpu->kvm->mmu_lock); } -- cgit v1.2.3 From c3e5e415bc1e6c36315edf616c3329816e9f7393 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Sat, 18 Sep 2021 08:56:32 +0800 Subject: KVM: X86: Change kvm_sync_page() to return true when remote flush is needed Currently kvm_sync_page() returns true when there is any present spte. But the return value is ignored in the callers. Changing kvm_sync_page() to return true when remote flush is needed and changing mmu->sync_page() not to directly flush can combine and reduce remote flush requests. Signed-off-by: Lai Jiangshan Signed-off-by: Paolo Bonzini Message-Id: <20210918005636.3675-7-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 21 +++++++++++++-------- arch/x86/kvm/mmu/paging_tmpl.h | 21 ++++++++++----------- 2 files changed, 23 insertions(+), 19 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 443b67d94784..34de5a8ea7dc 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1795,7 +1795,7 @@ static void mark_unsync(u64 *spte) static int nonpaging_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { - return 0; + return -1; } #define KVM_PAGE_ARRAY_NR 16 @@ -1909,12 +1909,14 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list) { - if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { + int ret = vcpu->arch.mmu->sync_page(vcpu, sp); + + if (ret < 0) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); return false; } - return true; + return !!ret; } static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, @@ -2024,6 +2026,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu, struct mmu_page_path parents; struct kvm_mmu_pages pages; LIST_HEAD(invalid_list); + bool flush = false; while (mmu_unsync_walk(parent, &pages)) { bool protected = false; @@ -2033,25 +2036,27 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu, if (protected) { kvm_flush_remote_tlbs(vcpu->kvm); + flush = false; } for_each_sp(pages, sp, parents, i) { kvm_unlink_unsync_page(vcpu->kvm, sp); - kvm_sync_page(vcpu, sp, &invalid_list); + flush |= kvm_sync_page(vcpu, sp, &invalid_list); mmu_pages_clear_parents(&parents); } if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { - kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, false); + kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush); if (!can_yield) { kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); return -EINTR; } cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); + flush = false; } } - kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, false); + kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush); return 0; } @@ -2135,6 +2140,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, break; WARN_ON(!list_empty(&invalid_list)); + kvm_flush_remote_tlbs(vcpu->kvm); } __clear_sp_write_flooding_count(sp); @@ -4191,7 +4197,7 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu) } static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, - unsigned int access, int *nr_present) + unsigned int access) { if (unlikely(is_mmio_spte(*sptep))) { if (gfn != get_mmio_spte_gfn(*sptep)) { @@ -4199,7 +4205,6 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, return true; } - (*nr_present)++; mark_mmio_spte(vcpu, sptep, gfn, access); return true; } diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 913d52a7923e..aca40993096e 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -1066,11 +1066,16 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr, * Using the cached information from sp->gfns is safe because: * - The spte has a reference to the struct page, so the pfn for a given gfn * can't change unless all sptes pointing to it are nuked first. + * + * Returns + * < 0: the sp should be zapped + * 0: the sp is synced and no tlb flushing is required + * > 0: the sp is synced and tlb flushing is required */ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { union kvm_mmu_page_role mmu_role = vcpu->arch.mmu->mmu_role.base; - int i, nr_present = 0; + int i; bool host_writable; gpa_t first_pte_gpa; int set_spte_ret = 0; @@ -1098,7 +1103,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) */ if (WARN_ON_ONCE(sp->role.direct || (sp->role.word ^ mmu_role.word) & ~sync_role_ign.word)) - return 0; + return -1; first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); @@ -1115,7 +1120,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, sizeof(pt_element_t))) - return 0; + return -1; if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; @@ -1127,8 +1132,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) pte_access &= FNAME(gpte_access)(gpte); FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); - if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access, - &nr_present)) + if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access)) continue; if (gfn != sp->gfns[i]) { @@ -1137,8 +1141,6 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) continue; } - nr_present++; - host_writable = sp->spt[i] & shadow_host_writable_mask; set_spte_ret |= set_spte(vcpu, &sp->spt[i], @@ -1147,10 +1149,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) true, false, host_writable); } - if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH) - kvm_flush_remote_tlbs(vcpu->kvm); - - return nr_present; + return set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH; } #undef pt_element_t -- cgit v1.2.3 From 5591c0694d85b768c28e9d04c0138b8ff202bff6 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Sat, 18 Sep 2021 08:56:33 +0800 Subject: KVM: X86: Zap the invalid list after remote tlb flushing In mmu_sync_children(), it can zap the invalid list after remote tlb flushing. Emptifying the invalid list ASAP might help reduce a remote tlb flushing in some cases. Signed-off-by: Lai Jiangshan Signed-off-by: Paolo Bonzini Message-Id: <20210918005636.3675-8-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 34de5a8ea7dc..ccfcdc02d214 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2035,7 +2035,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu, protected |= rmap_write_protect(vcpu, sp->gfn); if (protected) { - kvm_flush_remote_tlbs(vcpu->kvm); + kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true); flush = false; } -- cgit v1.2.3 From cc2a8e66bbcd4b55e9012cb0447e9386dcb6367e Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Sat, 18 Sep 2021 08:56:34 +0800 Subject: KVM: X86: Remove FNAME(update_pte) Its solo caller is changed to use FNAME(prefetch_gpte) directly. Signed-off-by: Lai Jiangshan Signed-off-by: Paolo Bonzini Message-Id: <20210918005636.3675-9-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/paging_tmpl.h | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index aca40993096e..08f466ac36ff 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -589,14 +589,6 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, return true; } -static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - u64 *spte, const void *pte) -{ - pt_element_t gpte = *(const pt_element_t *)pte; - - FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); -} - static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu, struct guest_walker *gw, int level) { @@ -1007,7 +999,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) sizeof(pt_element_t))) break; - FNAME(update_pte)(vcpu, sp, sptep, &gpte); + FNAME(prefetch_gpte)(vcpu, sp, sptep, gpte, false); } if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) -- cgit v1.2.3 From f1c4a88c41ea04a7036409a37e17cf22a8dbe9e2 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Sat, 18 Sep 2021 08:56:35 +0800 Subject: KVM: X86: Don't unsync pagetables when speculative We'd better only unsync the pagetable when there just was a really write fault on a level-1 pagetable. Signed-off-by: Lai Jiangshan Signed-off-by: Paolo Bonzini Message-Id: <20210918005636.3675-10-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 6 +++++- arch/x86/kvm/mmu/mmu_internal.h | 3 ++- arch/x86/kvm/mmu/spte.c | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ccfcdc02d214..73aa15e89311 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2582,7 +2582,8 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must * be write-protected. */ -int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync) +int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync, + bool speculative) { struct kvm_mmu_page *sp; bool locked = false; @@ -2608,6 +2609,9 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync) if (sp->unsync) continue; + if (speculative) + return -EEXIST; + /* * TDP MMU page faults require an additional spinlock as they * run with mmu_lock held for read, not write, and the unsync diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index bf2bdbf333c2..2ba12ef46cb0 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -124,7 +124,8 @@ static inline bool is_nx_huge_page_enabled(void) return READ_ONCE(nx_huge_pages); } -int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync); +int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync, + bool speculative); void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 3e97cdb13eb7..b68a580f3510 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -159,7 +159,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, * e.g. it's write-tracked (upper-level SPs) or has one or more * shadow pages and unsync'ing pages is not allowed. */ - if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync)) { + if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync, speculative)) { pgprintk("%s: found shadow page for %llx, marking ro\n", __func__, gfn); ret |= SET_SPTE_WRITE_PROTECTED_PT; -- cgit v1.2.3 From 8b8f9d753b84c243bf0b1004b515c53b7ec7e138 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Sat, 18 Sep 2021 08:56:36 +0800 Subject: KVM: X86: Don't check unsync if the original spte is writible If the original spte is writable, the target gfn should not be the gfn of synchronized shadowpage and can continue to be writable. When !can_unsync, speculative must be false. So when the check of "!can_unsync" is removed, we need to move the label of "out" up. Signed-off-by: Lai Jiangshan Signed-off-by: Paolo Bonzini Message-Id: <20210918005636.3675-11-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/spte.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index b68a580f3510..a33c581aabd6 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -150,7 +150,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots. * Same reasoning can be applied to dirty page accounting. */ - if (!can_unsync && is_writable_pte(old_spte)) + if (is_writable_pte(old_spte)) goto out; /* @@ -171,10 +171,10 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, if (pte_access & ACC_WRITE_MASK) spte |= spte_shadow_dirty_mask(spte); +out: if (speculative) spte = mark_spte_for_access_track(spte); -out: WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level), "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level, get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level)); -- cgit v1.2.3 From 515a0c79e7963cc4556ca61516cc09d39e592712 Mon Sep 17 00:00:00 2001 From: "Longpeng(Mike)" Date: Fri, 27 Aug 2021 16:00:03 +0800 Subject: kvm: irqfd: avoid update unmodified entries of the routing All of the irqfds would to be updated when update the irq routing, it's too expensive if there're too many irqfds. However we can reduce the cost by avoid some unnecessary updates. For irqs of MSI type on X86, the update can be saved if the msi values are not change. The vfio migration could receives benefit from this optimi- zaiton. The test VM has 128 vcpus and 8 VF (with 65 vectors enabled), so the VM has more than 520 irqfds. We mesure the cost of the vfio_msix_enable (in QEMU, it would set routing for each irqfd) for each VF, and we can see the total cost can be significantly reduced. Origin Apply this Patch 1st 8 4 2nd 15 5 3rd 22 6 4th 24 6 5th 36 7 6th 44 7 7th 51 8 8th 58 8 Total 258ms 51ms We're also tring to optimize the QEMU part [1], but it's still worth to optimize the KVM to gain more benefits. [1] https://lists.gnu.org/archive/html/qemu-devel/2021-08/msg04215.html Signed-off-by: Longpeng(Mike) Message-Id: <20210827080003.2689-1-longpeng2@huawei.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 9 +++++++++ include/linux/kvm_host.h | 2 ++ virt/kvm/eventfd.c | 15 ++++++++++++++- 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 25bfc12c0d08..ee1b9e168d46 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -12062,6 +12062,15 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, return static_call(kvm_x86_update_pi_irte)(kvm, host_irq, guest_irq, set); } +bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, + struct kvm_kernel_irq_routing_entry *new) +{ + if (new->type != KVM_IRQ_ROUTING_MSI) + return true; + + return !!memcmp(&old->msi, &new->msi, sizeof(new->msi)); +} + bool kvm_vector_hashing_enabled(void) { return vector_hashing; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1f9e80ce4723..3f87d6ad20bf 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1764,6 +1764,8 @@ void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); +bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *, + struct kvm_kernel_irq_routing_entry *); #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index e996989cd580..2ad013b8bde9 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -281,6 +281,13 @@ int __attribute__((weak)) kvm_arch_update_irqfd_routing( { return 0; } + +bool __attribute__((weak)) kvm_arch_irqfd_route_changed( + struct kvm_kernel_irq_routing_entry *old, + struct kvm_kernel_irq_routing_entry *new) +{ + return true; +} #endif static int @@ -615,10 +622,16 @@ void kvm_irq_routing_update(struct kvm *kvm) spin_lock_irq(&kvm->irqfds.lock); list_for_each_entry(irqfd, &kvm->irqfds.items, list) { +#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS + /* Under irqfds.lock, so can read irq_entry safely */ + struct kvm_kernel_irq_routing_entry old = irqfd->irq_entry; +#endif + irqfd_update(kvm, irqfd); #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS - if (irqfd->producer) { + if (irqfd->producer && + kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) { int ret = kvm_arch_update_irqfd_routing( irqfd->kvm, irqfd->producer->irq, irqfd->gsi, 1); -- cgit v1.2.3 From 0226a45c468f72fe5e085d1cad571c42bff06cad Mon Sep 17 00:00:00 2001 From: Maxim Levitsky Date: Tue, 14 Sep 2021 18:48:15 +0300 Subject: KVM: x86: nSVM: don't copy pause related settings According to the SDM, the CPU never modifies these settings. It loads them on VM entry and updates an internal copy instead. Also don't load them from the vmcb12 as we don't expose these features to the nested guest yet. Signed-off-by: Maxim Levitsky Message-Id: <20210914154825.104886-5-mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 510b833cbd39..ec3e926b20dd 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -550,9 +550,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) svm->vmcb->control.event_inj = svm->nested.ctl.event_inj; svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err; - svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count; - svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh; - nested_svm_transition_tlb_flush(vcpu); /* Enter Guest-Mode */ @@ -810,11 +807,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm) vmcb12->control.event_inj = svm->nested.ctl.event_inj; vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err; - vmcb12->control.pause_filter_count = - svm->vmcb->control.pause_filter_count; - vmcb12->control.pause_filter_thresh = - svm->vmcb->control.pause_filter_thresh; - nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr); svm_switch_vmcb(svm, &svm->vmcb01); -- cgit v1.2.3 From 4c84926e229e0efdafa2756d7e6c4ae2fb0b7945 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky Date: Tue, 14 Sep 2021 18:48:19 +0300 Subject: KVM: x86: SVM: add module param to control LBR virtualization This is useful for debug and also makes it consistent with the rest of the SVM optional features. Signed-off-by: Maxim Levitsky Message-Id: <20210914154825.104886-9-mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c770dce55ba8..270a091d57b0 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -186,6 +186,10 @@ module_param(vls, int, 0444); static int vgif = true; module_param(vgif, int, 0444); +/* enable/disable LBR virtualization */ +static int lbrv = true; +module_param(lbrv, int, 0444); + /* * enable / disable AVIC. Because the defaults differ for APICv * support between VMX and SVM we cannot use module_param_named. @@ -1059,6 +1063,13 @@ static __init int svm_hardware_setup(void) pr_info("Virtual GIF supported\n"); } + if (lbrv) { + if (!boot_cpu_has(X86_FEATURE_LBRV)) + lbrv = false; + else + pr_info("LBR virtualization supported\n"); + } + svm_set_cpu_caps(); /* @@ -2923,7 +2934,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->tsc_aux = data; break; case MSR_IA32_DEBUGCTLMSR: - if (!boot_cpu_has(X86_FEATURE_LBRV)) { + if (!lbrv) { vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", __func__, data); break; -- cgit v1.2.3 From 36e8194dcd749c2455d0fcbad43e719699691a11 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 23 Sep 2021 12:46:07 -0400 Subject: KVM: x86: SVM: don't set VMLOAD/VMSAVE intercepts on vCPU reset Commit adc2a23734ac ("KVM: nSVM: improve SYSENTER emulation on AMD"), made init_vmcb set vmload/vmsave intercepts unconditionally, and relied on svm_vcpu_after_set_cpuid to clear them when possible. However init_vmcb is also called when the vCPU is reset, and it is not followed by another call to svm_vcpu_after_set_cpuid because the CPUID is already set. This mistake makes the VMSAVE/VMLOAD intercept to be set when it is not needed, and harms performance of the nested guest. Extract the relevant parts of svm_vcpu_after_set_cpuid so that they can be called again on reset. Fixes: adc2a23734ac ("KVM: nSVM: improve SYSENTER emulation on AMD") Signed-off-by: Maxim Levitsky Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 61 ++++++++++++++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 270a091d57b0..53d593016be5 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1161,6 +1161,38 @@ static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu, } } +static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (guest_cpuid_is_intel(vcpu)) { + /* + * We must intercept SYSENTER_EIP and SYSENTER_ESP + * accesses because the processor only stores 32 bits. + * For the same reason we cannot use virtual VMLOAD/VMSAVE. + */ + svm_set_intercept(svm, INTERCEPT_VMLOAD); + svm_set_intercept(svm, INTERCEPT_VMSAVE); + svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; + + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0); + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0); + } else { + /* + * If hardware supports Virtual VMLOAD VMSAVE then enable it + * in VMCB and clear intercepts to avoid #VMEXIT. + */ + if (vls) { + svm_clr_intercept(svm, INTERCEPT_VMLOAD); + svm_clr_intercept(svm, INTERCEPT_VMSAVE); + svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; + } + /* No need to intercept these MSRs */ + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1); + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1); + } +} + static void init_vmcb(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -1307,6 +1339,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu) } svm_hv_init_vmcb(svm->vmcb); + init_vmcb_after_set_cpuid(vcpu); vmcb_mark_all_dirty(svm->vmcb); @@ -4043,33 +4076,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_NESTED); } - - if (guest_cpuid_is_intel(vcpu)) { - /* - * We must intercept SYSENTER_EIP and SYSENTER_ESP - * accesses because the processor only stores 32 bits. - * For the same reason we cannot use virtual VMLOAD/VMSAVE. - */ - svm_set_intercept(svm, INTERCEPT_VMLOAD); - svm_set_intercept(svm, INTERCEPT_VMSAVE); - svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; - - set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0); - set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0); - } else { - /* - * If hardware supports Virtual VMLOAD VMSAVE then enable it - * in VMCB and clear intercepts to avoid #VMEXIT. - */ - if (vls) { - svm_clr_intercept(svm, INTERCEPT_VMLOAD); - svm_clr_intercept(svm, INTERCEPT_VMSAVE); - svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; - } - /* No need to intercept these MSRs */ - set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1); - set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1); - } + init_vmcb_after_set_cpuid(vcpu); } static bool svm_has_wbinvd_exit(void) -- cgit v1.2.3 From f800650a4ed2f4e99d3333b1f5b50c591f559ed0 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky Date: Tue, 14 Sep 2021 18:48:23 +0300 Subject: KVM: x86: SVM: add module param to control TSC scaling This allows to easily simulate a CPU without this feature. Signed-off-by: Maxim Levitsky Message-Id: <20210914154825.104886-13-mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 53d593016be5..1ff904653a3e 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -190,6 +190,9 @@ module_param(vgif, int, 0444); static int lbrv = true; module_param(lbrv, int, 0444); +static int tsc_scaling = true; +module_param(tsc_scaling, int, 0444); + /* * enable / disable AVIC. Because the defaults differ for APICv * support between VMX and SVM we cannot use module_param_named. @@ -470,7 +473,7 @@ static int has_svm(void) static void svm_hardware_disable(void) { /* Make sure we clean up behind us */ - if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) + if (tsc_scaling) wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); cpu_svm_disable(); @@ -513,6 +516,10 @@ static int svm_hardware_enable(void) wrmsrl(MSR_VM_HSAVE_PA, __sme_page_pa(sd->save_area)); if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { + /* + * Set the default value, even if we don't use TSC scaling + * to avoid having stale value in the msr + */ wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); } @@ -980,10 +987,15 @@ static __init int svm_hardware_setup(void) if (boot_cpu_has(X86_FEATURE_FXSR_OPT)) kvm_enable_efer_bits(EFER_FFXSR); - if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { - kvm_has_tsc_control = true; - kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX; - kvm_tsc_scaling_ratio_frac_bits = 32; + if (tsc_scaling) { + if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { + tsc_scaling = false; + } else { + pr_info("TSC scaling supported\n"); + kvm_has_tsc_control = true; + kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX; + kvm_tsc_scaling_ratio_frac_bits = 32; + } } tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX); @@ -1496,7 +1508,7 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) vmsave(__sme_page_pa(sd->save_area)); } - if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { + if (tsc_scaling) { u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) { __this_cpu_write(current_tsc_ratio, tsc_ratio); -- cgit v1.2.3 From 5228eb96a4875f8cf5d61d486e3795ac14df8904 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky Date: Tue, 14 Sep 2021 18:48:24 +0300 Subject: KVM: x86: nSVM: implement nested TSC scaling This was tested by booting a nested guest with TSC=1Ghz, observing the clocks, and doing about 100 cycles of migration. Note that qemu patch is needed to support migration because of a new MSR that needs to be placed in the migration state. The patch will be sent to the qemu mailing list soon. Signed-off-by: Maxim Levitsky Message-Id: <20210914154825.104886-14-mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 29 +++++++++++++++++++++++++++-- arch/x86/kvm/svm/svm.c | 31 ++++++++++++++++++++++++++++--- arch/x86/kvm/svm/svm.h | 7 ++++++- arch/x86/kvm/vmx/vmx.c | 1 + arch/x86/kvm/x86.c | 1 + 5 files changed, 63 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index ec3e926b20dd..31fd4bd334c2 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -538,8 +538,17 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) if (nested_npt_enabled(svm)) nested_svm_init_mmu_context(vcpu); - svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset = - vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset; + vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( + vcpu->arch.l1_tsc_offset, + svm->nested.ctl.tsc_offset, + svm->tsc_ratio_msr); + + svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset; + + if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) { + WARN_ON(!svm->tsc_scaling_enabled); + nested_svm_update_tsc_ratio_msr(vcpu); + } svm->vmcb->control.int_ctl = (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | @@ -824,6 +833,12 @@ int nested_svm_vmexit(struct vcpu_svm *svm) vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); } + if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) { + WARN_ON(!svm->tsc_scaling_enabled); + vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; + svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio); + } + svm->nested.ctl.nested_cr3 = 0; /* @@ -1211,6 +1226,16 @@ int nested_svm_exit_special(struct vcpu_svm *svm) return NESTED_EXIT_CONTINUE; } +void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + vcpu->arch.tsc_scaling_ratio = + kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio, + svm->tsc_ratio_msr); + svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio); +} + static int svm_get_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, u32 user_data_size) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 1ff904653a3e..89077160d463 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -940,6 +940,9 @@ static __init void svm_set_cpu_caps(void) if (npt_enabled) kvm_cpu_cap_set(X86_FEATURE_NPT); + if (tsc_scaling) + kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR); + /* Nested VM can receive #VMEXIT instead of triggering #GP */ kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK); } @@ -1132,7 +1135,9 @@ static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu) static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) { - return kvm_default_tsc_scaling_ratio; + struct vcpu_svm *svm = to_svm(vcpu); + + return svm->tsc_ratio_msr; } static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) @@ -1144,7 +1149,7 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); } -static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) +void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) { wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); } @@ -1356,7 +1361,6 @@ static void init_vmcb(struct kvm_vcpu *vcpu) vmcb_mark_all_dirty(svm->vmcb); enable_gif(svm); - } static void __svm_vcpu_reset(struct kvm_vcpu *vcpu) @@ -1367,6 +1371,7 @@ static void __svm_vcpu_reset(struct kvm_vcpu *vcpu) svm_init_osvw(vcpu); vcpu->arch.microcode_version = 0x01000065; + svm->tsc_ratio_msr = kvm_default_tsc_scaling_ratio; if (sev_es_guest(vcpu->kvm)) sev_es_vcpu_reset(svm); @@ -2718,6 +2723,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) struct vcpu_svm *svm = to_svm(vcpu); switch (msr_info->index) { + case MSR_AMD64_TSC_RATIO: + if (!msr_info->host_initiated && !svm->tsc_scaling_enabled) + return 1; + msr_info->data = svm->tsc_ratio_msr; + break; case MSR_STAR: msr_info->data = svm->vmcb01.ptr->save.star; break; @@ -2867,6 +2877,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) u32 ecx = msr->index; u64 data = msr->data; switch (ecx) { + case MSR_AMD64_TSC_RATIO: + if (!msr->host_initiated && !svm->tsc_scaling_enabled) + return 1; + + if (data & TSC_RATIO_RSVD) + return 1; + + svm->tsc_ratio_msr = data; + + if (svm->tsc_scaling_enabled && is_guest_mode(vcpu)) + nested_svm_update_tsc_ratio_msr(vcpu); + + break; case MSR_IA32_CR_PAT: if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) return 1; @@ -4062,6 +4085,8 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && guest_cpuid_has(vcpu, X86_FEATURE_NRIPS); + svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR); + svm_recalc_instruction_intercepts(vcpu, svm); /* For sev guests, the memory encryption bit is not reserved in CR3. */ diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index c19859e62dec..0d7bbe548ac3 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -140,6 +140,8 @@ struct vcpu_svm { u64 next_rip; u64 spec_ctrl; + + u64 tsc_ratio_msr; /* * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be * translated into the appropriate L2_CFG bits on the host to @@ -160,7 +162,8 @@ struct vcpu_svm { unsigned long int3_rip; /* cached guest cpuid flags for faster access */ - bool nrips_enabled : 1; + bool nrips_enabled : 1; + bool tsc_scaling_enabled : 1; u32 ldr_reg; u32 dfr_reg; @@ -483,6 +486,8 @@ int nested_svm_check_permissions(struct kvm_vcpu *vcpu); int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); int nested_svm_exit_special(struct vcpu_svm *svm); +void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); +void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier); void nested_load_control_from_vmcb12(struct vcpu_svm *svm, struct vmcb_control_area *control); void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b8bfb46d58f4..1c8b2b6e7ed9 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6435,6 +6435,7 @@ static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index) case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: return nested; case MSR_AMD64_VIRT_SPEC_CTRL: + case MSR_AMD64_TSC_RATIO: /* This is AMD only. */ return false; default: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ee1b9e168d46..c028ff7d6fb9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1361,6 +1361,7 @@ static const u32 emulated_msrs_all[] = { MSR_PLATFORM_INFO, MSR_MISC_FEATURES_ENABLES, MSR_AMD64_VIRT_SPEC_CTRL, + MSR_AMD64_TSC_RATIO, MSR_IA32_POWER_CTL, MSR_IA32_UCODE_REV, -- cgit v1.2.3 From 3e44dce4d0aeb3582b87c7bb8a79d69fa406eee5 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 6 Sep 2021 20:25:47 +0800 Subject: KVM: X86: Move PTE present check from loop body to __shadow_walk_next() So far, the loop bodies already ensure the PTE is present before calling __shadow_walk_next(): Some loop bodies simply exit with a !PRESENT directly and some other loop bodies, i.e. FNAME(fetch) and __direct_map() do not currently guard their walks with is_shadow_present_pte, but only because they install present non-leaf SPTEs in the loop itself. But checking pte present in __shadow_walk_next() (which is called from shadow_walk_okay()) is more prudent; walking past a !PRESENT SPTE would lead to attempting to read a the next level SPTE from a garbage iter->shadow_addr. It also allows to remove the is_shadow_present_pte() checks from the loop bodies. Reviewed-by: Sean Christopherson Signed-off-by: Lai Jiangshan Message-Id: <20210906122547.263316-2-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 13 ++----------- arch/x86/kvm/mmu/paging_tmpl.h | 2 +- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 73aa15e89311..7ef9c001d1b6 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2220,7 +2220,7 @@ static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, u64 spte) { - if (is_last_spte(spte, iterator->level)) { + if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) { iterator->level = 0; return; } @@ -3189,9 +3189,6 @@ static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte) for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) { sptep = iterator.sptep; *spte = old_spte; - - if (!is_shadow_present_pte(old_spte)) - break; } return sptep; @@ -3759,9 +3756,6 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level spte = mmu_spte_get_lockless(iterator.sptep); sptes[leaf] = spte; - - if (!is_shadow_present_pte(spte)) - break; } return leaf; @@ -3877,11 +3871,8 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) u64 spte; walk_shadow_page_lockless_begin(vcpu); - for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { + for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) clear_sp_write_flooding_count(iterator.sptep); - if (!is_shadow_present_pte(spte)) - break; - } walk_shadow_page_lockless_end(vcpu); } diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 08f466ac36ff..b908d2ff6d4c 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -1002,7 +1002,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) FNAME(prefetch_gpte)(vcpu, sp, sptep, gpte, false); } - if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) + if (!sp->unsync_children) break; } write_unlock(&vcpu->kvm->mmu_lock); -- cgit v1.2.3 From 6b6fcd2804a2dcc86068859188708ee57c7ca19f Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 16 Sep 2021 18:15:32 +0000 Subject: kvm: x86: abstract locking around pvclock_update_vm_gtod_copy Updates to the kvmclock parameters needs to do a complicated dance of KVM_REQ_MCLOCK_INPROGRESS and KVM_REQ_CLOCK_UPDATE in addition to taking pvclock_gtod_sync_lock. Place that in two functions that can be called on all of master clock update, KVM_SET_CLOCK, and Hyper-V reenlightenment. Reviewed-by: Marcelo Tosatti Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/x86.c | 62 +++++++++++++++++++---------------------- 2 files changed, 29 insertions(+), 34 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0cb35ef26ab3..069e75288514 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1862,7 +1862,6 @@ u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier); unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); -void kvm_make_mclock_inprogress_request(struct kvm *kvm); void kvm_make_scan_ioapic_request(struct kvm *kvm); void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, unsigned long *vcpu_bitmap); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c028ff7d6fb9..759ba849e10b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2743,35 +2743,42 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm) #endif } -void kvm_make_mclock_inprogress_request(struct kvm *kvm) +static void kvm_make_mclock_inprogress_request(struct kvm *kvm) { kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); } -static void kvm_gen_update_masterclock(struct kvm *kvm) +static void kvm_start_pvclock_update(struct kvm *kvm) { -#ifdef CONFIG_X86_64 - int i; - struct kvm_vcpu *vcpu; struct kvm_arch *ka = &kvm->arch; - unsigned long flags; - - kvm_hv_invalidate_tsc_page(kvm); kvm_make_mclock_inprogress_request(kvm); /* no guest entries from this point */ - spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); - pvclock_update_vm_gtod_copy(kvm); - spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); + spin_lock_irq(&ka->pvclock_gtod_sync_lock); +} +static void kvm_end_pvclock_update(struct kvm *kvm) +{ + struct kvm_arch *ka = &kvm->arch; + struct kvm_vcpu *vcpu; + int i; + + spin_unlock_irq(&ka->pvclock_gtod_sync_lock); kvm_for_each_vcpu(i, vcpu, kvm) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); /* guest entries allowed */ kvm_for_each_vcpu(i, vcpu, kvm) kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); -#endif +} + +static void kvm_update_masterclock(struct kvm *kvm) +{ + kvm_hv_invalidate_tsc_page(kvm); + kvm_start_pvclock_update(kvm); + pvclock_update_vm_gtod_copy(kvm); + kvm_end_pvclock_update(kvm); } u64 get_kvmclock_ns(struct kvm *kvm) @@ -6067,12 +6074,10 @@ set_pit2_out: goto out; r = 0; - /* - * TODO: userspace has to take care of races with VCPU_RUN, so - * kvm_gen_update_masterclock() can be cut down to locked - * pvclock_update_vm_gtod_copy(). - */ - kvm_gen_update_masterclock(kvm); + + kvm_hv_invalidate_tsc_page(kvm); + kvm_start_pvclock_update(kvm); + pvclock_update_vm_gtod_copy(kvm); /* * This pairs with kvm_guest_time_update(): when masterclock is @@ -6081,15 +6086,12 @@ set_pit2_out: * is slightly ahead) here we risk going negative on unsigned * 'system_time' when 'user_ns.clock' is very small. */ - spin_lock_irq(&ka->pvclock_gtod_sync_lock); if (kvm->arch.use_master_clock) now_ns = ka->master_kernel_ns; else now_ns = get_kvmclock_base_ns(); ka->kvmclock_offset = user_ns.clock - now_ns; - spin_unlock_irq(&ka->pvclock_gtod_sync_lock); - - kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE); + kvm_end_pvclock_update(kvm); break; } case KVM_GET_CLOCK: { @@ -8102,14 +8104,13 @@ static void tsc_khz_changed(void *data) static void kvm_hyperv_tsc_notifier(void) { struct kvm *kvm; - struct kvm_vcpu *vcpu; int cpu; - unsigned long flags; mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) kvm_make_mclock_inprogress_request(kvm); + /* no guest entries from this point */ hyperv_stop_tsc_emulation(); /* TSC frequency always matches when on Hyper-V */ @@ -8120,16 +8121,11 @@ static void kvm_hyperv_tsc_notifier(void) list_for_each_entry(kvm, &vm_list, vm_list) { struct kvm_arch *ka = &kvm->arch; - spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); + spin_lock_irq(&ka->pvclock_gtod_sync_lock); pvclock_update_vm_gtod_copy(kvm); - spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); - - kvm_for_each_vcpu(cpu, vcpu, kvm) - kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); - - kvm_for_each_vcpu(cpu, vcpu, kvm) - kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); + kvm_end_pvclock_update(kvm); } + mutex_unlock(&kvm_lock); } #endif @@ -9406,7 +9402,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) __kvm_migrate_timers(vcpu); if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) - kvm_gen_update_masterclock(vcpu->kvm); + kvm_update_masterclock(vcpu->kvm); if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) kvm_gen_kvmclock_update(vcpu); if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { -- cgit v1.2.3 From 45e6c2fac097b4a3f72db339714a4dd6d789b81b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 16 Sep 2021 18:15:33 +0000 Subject: KVM: x86: extract KVM_GET_CLOCK/KVM_SET_CLOCK to separate functions No functional change intended. Reviewed-by: Marcelo Tosatti Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 99 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 52 insertions(+), 47 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 759ba849e10b..ed86e437d707 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5817,6 +5817,54 @@ int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state) } #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ +static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp) +{ + struct kvm_clock_data data; + u64 now_ns; + + now_ns = get_kvmclock_ns(kvm); + user_ns.clock = now_ns; + user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0; + memset(&user_ns.pad, 0, sizeof(user_ns.pad)); + + if (copy_to_user(argp, &data, sizeof(data))) + return -EFAULT; + + return 0; +} + +static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp) +{ + struct kvm_arch *ka = &kvm->arch; + struct kvm_clock_data data; + u64 now_ns; + + if (copy_from_user(&data, argp, sizeof(data))) + return -EFAULT; + + if (data.flags) + return -EINVAL; + + kvm_hv_invalidate_tsc_page(kvm); + kvm_start_pvclock_update(kvm); + pvclock_update_vm_gtod_copy(kvm); + + /* + * This pairs with kvm_guest_time_update(): when masterclock is + * in use, we use master_kernel_ns + kvmclock_offset to set + * unsigned 'system_time' so if we use get_kvmclock_ns() (which + * is slightly ahead) here we risk going negative on unsigned + * 'system_time' when 'data.clock' is very small. + */ + if (kvm->arch.use_master_clock) + now_ns = ka->master_kernel_ns; + else + now_ns = get_kvmclock_base_ns(); + ka->kvmclock_offset = data.clock - now_ns; + kvm_end_pvclock_update(kvm); + return 0; +} + long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -6060,55 +6108,12 @@ set_pit2_out: break; } #endif - case KVM_SET_CLOCK: { - struct kvm_arch *ka = &kvm->arch; - struct kvm_clock_data user_ns; - u64 now_ns; - - r = -EFAULT; - if (copy_from_user(&user_ns, argp, sizeof(user_ns))) - goto out; - - r = -EINVAL; - if (user_ns.flags) - goto out; - - r = 0; - - kvm_hv_invalidate_tsc_page(kvm); - kvm_start_pvclock_update(kvm); - pvclock_update_vm_gtod_copy(kvm); - - /* - * This pairs with kvm_guest_time_update(): when masterclock is - * in use, we use master_kernel_ns + kvmclock_offset to set - * unsigned 'system_time' so if we use get_kvmclock_ns() (which - * is slightly ahead) here we risk going negative on unsigned - * 'system_time' when 'user_ns.clock' is very small. - */ - if (kvm->arch.use_master_clock) - now_ns = ka->master_kernel_ns; - else - now_ns = get_kvmclock_base_ns(); - ka->kvmclock_offset = user_ns.clock - now_ns; - kvm_end_pvclock_update(kvm); + case KVM_SET_CLOCK: + r = kvm_vm_ioctl_set_clock(kvm, argp); break; - } - case KVM_GET_CLOCK: { - struct kvm_clock_data user_ns; - u64 now_ns; - - now_ns = get_kvmclock_ns(kvm); - user_ns.clock = now_ns; - user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0; - memset(&user_ns.pad, 0, sizeof(user_ns.pad)); - - r = -EFAULT; - if (copy_to_user(argp, &user_ns, sizeof(user_ns))) - goto out; - r = 0; + case KVM_GET_CLOCK: + r = kvm_vm_ioctl_get_clock(kvm, argp); break; - } case KVM_MEMORY_ENCRYPT_OP: { r = -ENOTTY; if (kvm_x86_ops.mem_enc_op) -- cgit v1.2.3 From 55c0cefbdbdaca7347e20a2b91320b418abc617e Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 16 Sep 2021 18:15:34 +0000 Subject: KVM: x86: Fix potential race in KVM_GET_CLOCK Sean noticed that KVM_GET_CLOCK was checking kvm_arch.use_master_clock outside of the pvclock sync lock. This is problematic, as the clock value written to the user may or may not actually correspond to a stable TSC. Fix the race by populating the entire kvm_clock_data structure behind the pvclock_gtod_sync_lock. Suggested-by: Sean Christopherson Signed-off-by: Oliver Upton Message-Id: <20210916181538.968978-4-oupton@google.com> Reviewed-by: Marcelo Tosatti Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ed86e437d707..79535fe83a04 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2781,19 +2781,20 @@ static void kvm_update_masterclock(struct kvm *kvm) kvm_end_pvclock_update(kvm); } -u64 get_kvmclock_ns(struct kvm *kvm) +static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) { struct kvm_arch *ka = &kvm->arch; struct pvclock_vcpu_time_info hv_clock; unsigned long flags; - u64 ret; spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); if (!ka->use_master_clock) { spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); - return get_kvmclock_base_ns() + ka->kvmclock_offset; + data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset; + return; } + data->flags |= KVM_CLOCK_TSC_STABLE; hv_clock.tsc_timestamp = ka->master_cycle_now; hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); @@ -2805,13 +2806,26 @@ u64 get_kvmclock_ns(struct kvm *kvm) kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, &hv_clock.tsc_shift, &hv_clock.tsc_to_system_mul); - ret = __pvclock_read_cycles(&hv_clock, rdtsc()); - } else - ret = get_kvmclock_base_ns() + ka->kvmclock_offset; + data->clock = __pvclock_read_cycles(&hv_clock, rdtsc()); + } else { + data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset; + } put_cpu(); +} - return ret; +u64 get_kvmclock_ns(struct kvm *kvm) +{ + struct kvm_clock_data data; + + /* + * Zero flags as it's accessed RMW, leave everything else uninitialized + * as clock is always written and no other fields are consumed. + */ + data.flags = 0; + + get_kvmclock(kvm, &data); + return data.clock; } static void kvm_setup_pvclock_page(struct kvm_vcpu *v, @@ -5820,13 +5834,9 @@ int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state) static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp) { struct kvm_clock_data data; - u64 now_ns; - - now_ns = get_kvmclock_ns(kvm); - user_ns.clock = now_ns; - user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0; - memset(&user_ns.pad, 0, sizeof(user_ns.pad)); + memset(&data, 0, sizeof(data)); + get_kvmclock(kvm, &data); if (copy_to_user(argp, &data, sizeof(data))) return -EFAULT; -- cgit v1.2.3 From d055f028a5334114563ce31c4fe21f24f7ee72c3 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 6 Aug 2021 05:21:17 -0400 Subject: KVM: MMU: pass unadulterated gpa to direct_page_fault Do not bother removing the low bits of the gpa. This masking dates back to the very first commit of KVM but it is unnecessary, as exemplified by the other call in kvm_tdp_page_fault. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7ef9c001d1b6..376e90f4f413 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4018,7 +4018,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code); /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */ - return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault, + return direct_page_fault(vcpu, gpa, error_code, prefault, PG_LEVEL_2M, false); } -- cgit v1.2.3 From 6defd9bb178cc18bd9a45a3aec9c8ef8ffc417ad Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 6 Aug 2021 03:52:18 -0400 Subject: KVM: MMU: Introduce struct kvm_page_fault Create a single structure for arguments that are passed from kvm_mmu_do_page_fault to the page fault handlers. Later the structure will grow to include various output parameters that are passed back to the next steps in the page fault handling. Suggested-by: Isaku Yamahata Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.h | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index e9688a9f7b57..0553ef92946e 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -114,17 +114,45 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) vcpu->arch.mmu->shadow_root_level); } +struct kvm_page_fault { + /* arguments to kvm_mmu_do_page_fault. */ + const gpa_t addr; + const u32 error_code; + const bool prefault; + + /* Derived from error_code. */ + const bool exec; + const bool write; + const bool present; + const bool rsvd; + const bool user; + + /* Derived from mmu. */ + const bool is_tdp; +}; + int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, bool prefault); static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err, bool prefault) { + struct kvm_page_fault fault = { + .addr = cr2_or_gpa, + .error_code = err, + .exec = err & PFERR_FETCH_MASK, + .write = err & PFERR_WRITE_MASK, + .present = err & PFERR_PRESENT_MASK, + .rsvd = err & PFERR_RSVD_MASK, + .user = err & PFERR_USER_MASK, + .prefault = prefault, + .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault), + }; #ifdef CONFIG_RETPOLINE - if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault)) - return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault); + if (fault.is_tdp) + return kvm_tdp_page_fault(vcpu, fault.addr, fault.error_code, fault.prefault); #endif - return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault); + return vcpu->arch.mmu->page_fault(vcpu, fault.addr, fault.error_code, fault.prefault); } /* -- cgit v1.2.3 From c501040abc420977ea1998d0abc994435879f7d6 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 6 Aug 2021 04:35:50 -0400 Subject: KVM: MMU: change mmu->page_fault() arguments to kvm_page_fault Pass struct kvm_page_fault to mmu->page_fault() instead of extracting the arguments from the struct. FNAME(page_fault) can use the precomputed bools from the error code. Suggested-by: Isaku Yamahata Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 4 ++-- arch/x86/kvm/mmu.h | 7 +++---- arch/x86/kvm/mmu/mmu.c | 15 ++++++++------- arch/x86/kvm/mmu/paging_tmpl.h | 22 +++++++++++----------- 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 069e75288514..8470d4673068 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -407,6 +407,7 @@ struct kvm_mmu_root_info { #define KVM_HAVE_MMU_RWLOCK struct kvm_mmu_page; +struct kvm_page_fault; /* * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit, @@ -416,8 +417,7 @@ struct kvm_mmu_page; struct kvm_mmu { unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu); u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); - int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err, - bool prefault); + int (*page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); void (*inject_page_fault)(struct kvm_vcpu *vcpu, struct x86_exception *fault); gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa, diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 0553ef92946e..ee58177bc282 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -131,8 +131,7 @@ struct kvm_page_fault { const bool is_tdp; }; -int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, - bool prefault); +int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err, bool prefault) @@ -150,9 +149,9 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, }; #ifdef CONFIG_RETPOLINE if (fault.is_tdp) - return kvm_tdp_page_fault(vcpu, fault.addr, fault.error_code, fault.prefault); + return kvm_tdp_page_fault(vcpu, &fault); #endif - return vcpu->arch.mmu->page_fault(vcpu, fault.addr, fault.error_code, fault.prefault); + return vcpu->arch.mmu->page_fault(vcpu, &fault); } /* diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 376e90f4f413..3ca4b1c69e03 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4012,13 +4012,14 @@ out_unlock: return r; } -static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, - u32 error_code, bool prefault) +static int nonpaging_page_fault(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault) { pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code); /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */ - return direct_page_fault(vcpu, gpa, error_code, prefault, + return direct_page_fault(vcpu, fault->addr, + fault->error_code, fault->prefault, PG_LEVEL_2M, false); } @@ -4055,10 +4056,10 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, } EXPORT_SYMBOL_GPL(kvm_handle_page_fault); -int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, - bool prefault) +int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { int max_level; + gpa_t gpa = fault->addr; for (max_level = KVM_MAX_HUGEPAGE_LEVEL; max_level > PG_LEVEL_4K; @@ -4070,8 +4071,8 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, break; } - return direct_page_fault(vcpu, gpa, error_code, prefault, - max_level, true); + return direct_page_fault(vcpu, gpa, fault->error_code, + fault->prefault, max_level, true); } static void nonpaging_init_context(struct kvm_mmu *context) diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index b908d2ff6d4c..8eee1200117a 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -833,11 +833,10 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, * Returns: 1 if we need to emulate the instruction, 0 otherwise, or * a negative value on error. */ -static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, - bool prefault) +static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { - bool write_fault = error_code & PFERR_WRITE_MASK; - bool user_fault = error_code & PFERR_USER_MASK; + gpa_t addr = fault->addr; + u32 error_code = fault->error_code; struct guest_walker walker; int r; kvm_pfn_t pfn; @@ -847,6 +846,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, int max_level; pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); + WARN_ON_ONCE(fault->is_tdp); /* * If PFEC.RSVD is set, this is a shadow page fault. @@ -864,7 +864,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, */ if (!r) { pgprintk("%s: guest page fault\n", __func__); - if (!prefault) + if (!fault->prefault) kvm_inject_emulated_page_fault(vcpu, &walker.fault); return RET_PF_RETRY; @@ -882,7 +882,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, vcpu->arch.write_fault_to_shadow_pgtable = false; is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, - &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable); + &walker, fault->user, &vcpu->arch.write_fault_to_shadow_pgtable); if (is_self_change_mapping) max_level = PG_LEVEL_4K; @@ -892,8 +892,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); - if (kvm_faultin_pfn(vcpu, prefault, walker.gfn, addr, &pfn, &hva, - write_fault, &map_writable, &r)) + if (kvm_faultin_pfn(vcpu, fault->prefault, walker.gfn, addr, &pfn, &hva, + fault->write, &map_writable, &r)) return r; if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r)) @@ -903,8 +903,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, * Do not change pte_access if the pfn is a mmio page, otherwise * we will cache the incorrect access into mmio spte. */ - if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) && - !is_cr0_wp(vcpu->arch.mmu) && !user_fault && !is_noslot_pfn(pfn)) { + if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) && + !is_cr0_wp(vcpu->arch.mmu) && !fault->user && !is_noslot_pfn(pfn)) { walker.pte_access |= ACC_WRITE_MASK; walker.pte_access &= ~ACC_USER_MASK; @@ -928,7 +928,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, if (r) goto out_unlock; r = FNAME(fetch)(vcpu, addr, &walker, error_code, max_level, pfn, - map_writable, prefault); + map_writable, fault->prefault); kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); out_unlock: -- cgit v1.2.3 From 4326e57ef40ae81a4db057b611666a432b7c996b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 6 Aug 2021 04:21:58 -0400 Subject: KVM: MMU: change direct_page_fault() arguments to kvm_page_fault Add fields to struct kvm_page_fault corresponding to the arguments of direct_page_fault(). The fields are initialized in the callers, and direct_page_fault() receives a struct kvm_page_fault instead of having to extract the arguments out of it. Also adjust FNAME(page_fault) to store the max_level in struct kvm_page_fault, to keep it similar to the direct map path. Suggested-by: Isaku Yamahata Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.h | 5 +++++ arch/x86/kvm/mmu/mmu.c | 43 +++++++++++++++++++----------------------- arch/x86/kvm/mmu/paging_tmpl.h | 7 +++---- 3 files changed, 27 insertions(+), 28 deletions(-) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index ee58177bc282..8d001b56f7b5 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -129,6 +129,9 @@ struct kvm_page_fault { /* Derived from mmu. */ const bool is_tdp; + + /* Input to FNAME(fetch), __direct_map and kvm_tdp_mmu_map. */ + u8 max_level; }; int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); @@ -146,6 +149,8 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, .user = err & PFERR_USER_MASK, .prefault = prefault, .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault), + + .max_level = KVM_MAX_HUGEPAGE_LEVEL, }; #ifdef CONFIG_RETPOLINE if (fault.is_tdp) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3ca4b1c69e03..7685b4270d8c 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3949,11 +3949,11 @@ out_retry: return true; } -static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, - bool prefault, int max_level, bool is_tdp) +static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { + gpa_t gpa = fault->addr; + u32 error_code = fault->error_code; bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); - bool write = error_code & PFERR_WRITE_MASK; bool map_writable; gfn_t gfn = gpa >> PAGE_SHIFT; @@ -3976,11 +3976,11 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); - if (kvm_faultin_pfn(vcpu, prefault, gfn, gpa, &pfn, &hva, - write, &map_writable, &r)) + if (kvm_faultin_pfn(vcpu, fault->prefault, gfn, gpa, &pfn, &hva, + fault->write, &map_writable, &r)) return r; - if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r)) + if (handle_abnormal_pfn(vcpu, fault->is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r)) return r; r = RET_PF_RETRY; @@ -3997,11 +3997,11 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, goto out_unlock; if (is_tdp_mmu_fault) - r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level, - pfn, prefault); + r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, fault->max_level, + pfn, fault->prefault); else - r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn, - prefault, is_tdp); + r = __direct_map(vcpu, gpa, error_code, map_writable, fault->max_level, pfn, + fault->prefault, fault->is_tdp); out_unlock: if (is_tdp_mmu_fault) @@ -4015,12 +4015,11 @@ out_unlock: static int nonpaging_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { - pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code); + pgprintk("%s: gva %lx error %x\n", __func__, fault->addr, fault->error_code); /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */ - return direct_page_fault(vcpu, fault->addr, - fault->error_code, fault->prefault, - PG_LEVEL_2M, false); + fault->max_level = PG_LEVEL_2M; + return direct_page_fault(vcpu, fault); } int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, @@ -4058,21 +4057,17 @@ EXPORT_SYMBOL_GPL(kvm_handle_page_fault); int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { - int max_level; - gpa_t gpa = fault->addr; - - for (max_level = KVM_MAX_HUGEPAGE_LEVEL; - max_level > PG_LEVEL_4K; - max_level--) { - int page_num = KVM_PAGES_PER_HPAGE(max_level); - gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1); + while (fault->max_level > PG_LEVEL_4K) { + int page_num = KVM_PAGES_PER_HPAGE(fault->max_level); + gfn_t base = (fault->addr >> PAGE_SHIFT) & ~(page_num - 1); if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num)) break; + + --fault->max_level; } - return direct_page_fault(vcpu, gpa, fault->error_code, - fault->prefault, max_level, true); + return direct_page_fault(vcpu, fault); } static void nonpaging_init_context(struct kvm_mmu *context) diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 8eee1200117a..a39881a8ba78 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -843,7 +843,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault hva_t hva; unsigned long mmu_seq; bool map_writable, is_self_change_mapping; - int max_level; pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); WARN_ON_ONCE(fault->is_tdp); @@ -885,9 +884,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault &walker, fault->user, &vcpu->arch.write_fault_to_shadow_pgtable); if (is_self_change_mapping) - max_level = PG_LEVEL_4K; + fault->max_level = PG_LEVEL_4K; else - max_level = walker.level; + fault->max_level = walker.level; mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); @@ -927,7 +926,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault r = make_mmu_pages_available(vcpu); if (r) goto out_unlock; - r = FNAME(fetch)(vcpu, addr, &walker, error_code, max_level, pfn, + r = FNAME(fetch)(vcpu, addr, &walker, error_code, fault->max_level, pfn, map_writable, fault->prefault); kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); -- cgit v1.2.3 From b8a5d551151537278fbb45d2c3dcc739b00c797b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 6 Aug 2021 04:21:58 -0400 Subject: KVM: MMU: change page_fault_handle_page_track() arguments to kvm_page_fault Add fields to struct kvm_page_fault corresponding to the arguments of page_fault_handle_page_track(). The fields are initialized in the callers, and page_fault_handle_page_track() receives a struct kvm_page_fault instead of having to extract the arguments out of it. Suggested-by: Isaku Yamahata Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.h | 3 +++ arch/x86/kvm/mmu/mmu.c | 18 +++++++++--------- arch/x86/kvm/mmu/paging_tmpl.h | 7 ++++--- 3 files changed, 16 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 8d001b56f7b5..a5c2d4069964 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -132,6 +132,9 @@ struct kvm_page_fault { /* Input to FNAME(fetch), __direct_map and kvm_tdp_mmu_map. */ u8 max_level; + + /* Shifted addr, or result of guest page table walk if addr is a gva. */ + gfn_t gfn; }; int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7685b4270d8c..41dc6796b80b 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3846,20 +3846,19 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) } static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, - u32 error_code, gfn_t gfn) + struct kvm_page_fault *fault) { - if (unlikely(error_code & PFERR_RSVD_MASK)) + if (unlikely(fault->rsvd)) return false; - if (!(error_code & PFERR_PRESENT_MASK) || - !(error_code & PFERR_WRITE_MASK)) + if (!fault->present || !fault->write) return false; /* * guest is writing the page which is write tracked which can * not be fixed by page fault handler. */ - if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) + if (kvm_page_track_is_active(vcpu, fault->gfn, KVM_PAGE_TRACK_WRITE)) return true; return false; @@ -3956,13 +3955,13 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); bool map_writable; - gfn_t gfn = gpa >> PAGE_SHIFT; unsigned long mmu_seq; kvm_pfn_t pfn; hva_t hva; int r; - if (page_fault_handle_page_track(vcpu, error_code, gfn)) + fault->gfn = gpa >> PAGE_SHIFT; + if (page_fault_handle_page_track(vcpu, fault)) return RET_PF_EMULATE; r = fast_page_fault(vcpu, gpa, error_code); @@ -3976,11 +3975,12 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); - if (kvm_faultin_pfn(vcpu, fault->prefault, gfn, gpa, &pfn, &hva, + if (kvm_faultin_pfn(vcpu, fault->prefault, fault->gfn, gpa, &pfn, &hva, fault->write, &map_writable, &r)) return r; - if (handle_abnormal_pfn(vcpu, fault->is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r)) + if (handle_abnormal_pfn(vcpu, fault->is_tdp ? 0 : gpa, + fault->gfn, pfn, ACC_ALL, &r)) return r; r = RET_PF_RETRY; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index a39881a8ba78..44a19dde5e70 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -869,7 +869,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault return RET_PF_RETRY; } - if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) { + fault->gfn = walker.gfn; + if (page_fault_handle_page_track(vcpu, fault)) { shadow_page_table_clear_flood(vcpu, addr); return RET_PF_EMULATE; } @@ -891,11 +892,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); - if (kvm_faultin_pfn(vcpu, fault->prefault, walker.gfn, addr, &pfn, &hva, + if (kvm_faultin_pfn(vcpu, fault->prefault, fault->gfn, addr, &pfn, &hva, fault->write, &map_writable, &r)) return r; - if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r)) + if (handle_abnormal_pfn(vcpu, addr, fault->gfn, pfn, walker.pte_access, &r)) return r; /* -- cgit v1.2.3 From 3647cd04b7d07c0c47d4dd11900a3d7bed8b9797 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Sat, 7 Aug 2021 08:57:34 -0400 Subject: KVM: MMU: change kvm_faultin_pfn() arguments to kvm_page_fault Add fields to struct kvm_page_fault corresponding to outputs of kvm_faultin_pfn(). For now they have to be extracted again from struct kvm_page_fault in the subsequent steps, but this is temporary until other functions in the chain are switched over as well. Suggested-by: Isaku Yamahata Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.h | 5 +++++ arch/x86/kvm/mmu/mmu.c | 50 +++++++++++++++++++----------------------- arch/x86/kvm/mmu/paging_tmpl.h | 19 +++++++--------- 3 files changed, 36 insertions(+), 38 deletions(-) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index a5c2d4069964..6697571197a5 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -135,6 +135,11 @@ struct kvm_page_fault { /* Shifted addr, or result of guest page table walk if addr is a gva. */ gfn_t gfn; + + /* Outputs of kvm_faultin_pfn. */ + kvm_pfn_t pfn; + hva_t hva; + bool map_writable; }; int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 41dc6796b80b..c2d2d019634b 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3889,11 +3889,9 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); } -static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, - gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva, - bool write, bool *writable, int *r) +static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, int *r) { - struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn); bool async; /* @@ -3907,8 +3905,8 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, if (!kvm_is_visible_memslot(slot)) { /* Don't expose private memslots to L2. */ if (is_guest_mode(vcpu)) { - *pfn = KVM_PFN_NOSLOT; - *writable = false; + fault->pfn = KVM_PFN_NOSLOT; + fault->map_writable = false; return false; } /* @@ -3925,23 +3923,25 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, } async = false; - *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, - write, writable, hva); + fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, &async, + fault->write, &fault->map_writable, + &fault->hva); if (!async) return false; /* *pfn has correct page already */ - if (!prefault && kvm_can_do_async_pf(vcpu)) { - trace_kvm_try_async_get_page(cr2_or_gpa, gfn); - if (kvm_find_async_pf_gfn(vcpu, gfn)) { - trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn); + if (!fault->prefault && kvm_can_do_async_pf(vcpu)) { + trace_kvm_try_async_get_page(fault->addr, fault->gfn); + if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) { + trace_kvm_async_pf_doublefault(fault->addr, fault->gfn); kvm_make_request(KVM_REQ_APF_HALT, vcpu); goto out_retry; - } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn)) + } else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn)) goto out_retry; } - *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, - write, writable, hva); + fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL, + fault->write, &fault->map_writable, + &fault->hva); out_retry: *r = RET_PF_RETRY; @@ -3953,11 +3953,8 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault gpa_t gpa = fault->addr; u32 error_code = fault->error_code; bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); - bool map_writable; unsigned long mmu_seq; - kvm_pfn_t pfn; - hva_t hva; int r; fault->gfn = gpa >> PAGE_SHIFT; @@ -3975,12 +3972,11 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); - if (kvm_faultin_pfn(vcpu, fault->prefault, fault->gfn, gpa, &pfn, &hva, - fault->write, &map_writable, &r)) + if (kvm_faultin_pfn(vcpu, fault, &r)) return r; if (handle_abnormal_pfn(vcpu, fault->is_tdp ? 0 : gpa, - fault->gfn, pfn, ACC_ALL, &r)) + fault->gfn, fault->pfn, ACC_ALL, &r)) return r; r = RET_PF_RETRY; @@ -3990,25 +3986,25 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault else write_lock(&vcpu->kvm->mmu_lock); - if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva)) + if (!is_noslot_pfn(fault->pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva)) goto out_unlock; r = make_mmu_pages_available(vcpu); if (r) goto out_unlock; if (is_tdp_mmu_fault) - r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, fault->max_level, - pfn, fault->prefault); + r = kvm_tdp_mmu_map(vcpu, gpa, error_code, fault->map_writable, fault->max_level, + fault->pfn, fault->prefault); else - r = __direct_map(vcpu, gpa, error_code, map_writable, fault->max_level, pfn, - fault->prefault, fault->is_tdp); + r = __direct_map(vcpu, gpa, error_code, fault->map_writable, fault->max_level, + fault->pfn, fault->prefault, fault->is_tdp); out_unlock: if (is_tdp_mmu_fault) read_unlock(&vcpu->kvm->mmu_lock); else write_unlock(&vcpu->kvm->mmu_lock); - kvm_release_pfn_clean(pfn); + kvm_release_pfn_clean(fault->pfn); return r; } diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 44a19dde5e70..72f0b415be63 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -839,10 +839,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault u32 error_code = fault->error_code; struct guest_walker walker; int r; - kvm_pfn_t pfn; - hva_t hva; unsigned long mmu_seq; - bool map_writable, is_self_change_mapping; + bool is_self_change_mapping; pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); WARN_ON_ONCE(fault->is_tdp); @@ -892,11 +890,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); - if (kvm_faultin_pfn(vcpu, fault->prefault, fault->gfn, addr, &pfn, &hva, - fault->write, &map_writable, &r)) + if (kvm_faultin_pfn(vcpu, fault, &r)) return r; - if (handle_abnormal_pfn(vcpu, addr, fault->gfn, pfn, walker.pte_access, &r)) + if (handle_abnormal_pfn(vcpu, addr, fault->gfn, fault->pfn, walker.pte_access, &r)) return r; /* @@ -904,7 +901,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault * we will cache the incorrect access into mmio spte. */ if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) && - !is_cr0_wp(vcpu->arch.mmu) && !fault->user && !is_noslot_pfn(pfn)) { + !is_cr0_wp(vcpu->arch.mmu) && !fault->user && !is_noslot_pfn(fault->pfn)) { walker.pte_access |= ACC_WRITE_MASK; walker.pte_access &= ~ACC_USER_MASK; @@ -920,20 +917,20 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault r = RET_PF_RETRY; write_lock(&vcpu->kvm->mmu_lock); - if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva)) + if (!is_noslot_pfn(fault->pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva)) goto out_unlock; kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); r = make_mmu_pages_available(vcpu); if (r) goto out_unlock; - r = FNAME(fetch)(vcpu, addr, &walker, error_code, fault->max_level, pfn, - map_writable, fault->prefault); + r = FNAME(fetch)(vcpu, addr, &walker, error_code, fault->max_level, fault->pfn, + fault->map_writable, fault->prefault); kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); out_unlock: write_unlock(&vcpu->kvm->mmu_lock); - kvm_release_pfn_clean(pfn); + kvm_release_pfn_clean(fault->pfn); return r; } -- cgit v1.2.3 From 3a13f4fea3c156dcb8aecf9f1637d9a80a31c29d Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 6 Aug 2021 04:35:50 -0400 Subject: KVM: MMU: change handle_abnormal_pfn() arguments to kvm_page_fault Pass struct kvm_page_fault to handle_abnormal_pfn() instead of extracting the arguments from the struct. Suggested-by: Isaku Yamahata Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 18 +++++++++--------- arch/x86/kvm/mmu/paging_tmpl.h | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c2d2d019634b..6821d05c0557 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3060,18 +3060,19 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) return -EFAULT; } -static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, - kvm_pfn_t pfn, unsigned int access, - int *ret_val) +static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, + unsigned int access, int *ret_val) { /* The pfn is invalid, report the error! */ - if (unlikely(is_error_pfn(pfn))) { - *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); + if (unlikely(is_error_pfn(fault->pfn))) { + *ret_val = kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn); return true; } - if (unlikely(is_noslot_pfn(pfn))) { - vcpu_cache_mmio_info(vcpu, gva, gfn, + if (unlikely(is_noslot_pfn(fault->pfn))) { + gva_t gva = fault->is_tdp ? 0 : fault->addr; + + vcpu_cache_mmio_info(vcpu, gva, fault->gfn, access & shadow_mmio_access_mask); /* * If MMIO caching is disabled, emulate immediately without @@ -3975,8 +3976,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (kvm_faultin_pfn(vcpu, fault, &r)) return r; - if (handle_abnormal_pfn(vcpu, fault->is_tdp ? 0 : gpa, - fault->gfn, fault->pfn, ACC_ALL, &r)) + if (handle_abnormal_pfn(vcpu, fault, ACC_ALL, &r)) return r; r = RET_PF_RETRY; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 72f0b415be63..0fa7a678b907 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -893,7 +893,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (kvm_faultin_pfn(vcpu, fault, &r)) return r; - if (handle_abnormal_pfn(vcpu, addr, fault->gfn, fault->pfn, walker.pte_access, &r)) + if (handle_abnormal_pfn(vcpu, fault, walker.pte_access, &r)) return r; /* -- cgit v1.2.3 From 43b74355ef8ba815b6065bf15d3661b2685337b8 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 6 Aug 2021 04:35:50 -0400 Subject: KVM: MMU: change __direct_map() arguments to kvm_page_fault Pass struct kvm_page_fault to __direct_map() instead of extracting the arguments from the struct. Suggested-by: Isaku Yamahata Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 32 +++++++++++++------------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 6821d05c0557..c84e978d76b0 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2982,34 +2982,29 @@ void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, } } -static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, - int map_writable, int max_level, kvm_pfn_t pfn, - bool prefault, bool is_tdp) +static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); - bool write = error_code & PFERR_WRITE_MASK; - bool exec = error_code & PFERR_FETCH_MASK; - bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled; + bool huge_page_disallowed = fault->exec && nx_huge_page_workaround_enabled; struct kvm_shadow_walk_iterator it; struct kvm_mmu_page *sp; int level, req_level, ret; - gfn_t gfn = gpa >> PAGE_SHIFT; - gfn_t base_gfn = gfn; + gfn_t base_gfn = fault->gfn; - level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn, + level = kvm_mmu_hugepage_adjust(vcpu, fault->gfn, fault->max_level, &fault->pfn, huge_page_disallowed, &req_level); - trace_kvm_mmu_spte_requested(gpa, level, pfn); - for_each_shadow_entry(vcpu, gpa, it) { + trace_kvm_mmu_spte_requested(fault->addr, level, fault->pfn); + for_each_shadow_entry(vcpu, fault->addr, it) { /* * We cannot overwrite existing page tables with an NX * large page, as the leaf could be executable. */ if (nx_huge_page_workaround_enabled) - disallowed_hugepage_adjust(*it.sptep, gfn, it.level, - &pfn, &level); + disallowed_hugepage_adjust(*it.sptep, fault->gfn, it.level, + &fault->pfn, &level); - base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); + base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); if (it.level == level) break; @@ -3021,14 +3016,14 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, it.level - 1, true, ACC_ALL); link_shadow_page(vcpu, it.sptep, sp); - if (is_tdp && huge_page_disallowed && + if (fault->is_tdp && huge_page_disallowed && req_level >= it.level) account_huge_nx_page(vcpu->kvm, sp); } ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, - write, level, base_gfn, pfn, prefault, - map_writable); + fault->write, level, base_gfn, fault->pfn, + fault->prefault, fault->map_writable); if (ret == RET_PF_SPURIOUS) return ret; @@ -3996,8 +3991,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault r = kvm_tdp_mmu_map(vcpu, gpa, error_code, fault->map_writable, fault->max_level, fault->pfn, fault->prefault); else - r = __direct_map(vcpu, gpa, error_code, fault->map_writable, fault->max_level, - fault->pfn, fault->prefault, fault->is_tdp); + r = __direct_map(vcpu, fault); out_unlock: if (is_tdp_mmu_fault) -- cgit v1.2.3 From 9c03b1821a89c1f18983a385c6542c267dbd8459 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 6 Aug 2021 04:35:50 -0400 Subject: KVM: MMU: change FNAME(fetch)() arguments to kvm_page_fault Pass struct kvm_page_fault to FNAME(fetch)() instead of extracting the arguments from the struct. Suggested-by: Isaku Yamahata Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/paging_tmpl.h | 52 ++++++++++++++++++------------------------ 1 file changed, 22 insertions(+), 30 deletions(-) diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 0fa7a678b907..afd2ad8c5173 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -655,21 +655,18 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, * If the guest tries to write a write-protected page, we need to * emulate this operation, return 1 to indicate this case. */ -static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, - struct guest_walker *gw, u32 error_code, - int max_level, kvm_pfn_t pfn, bool map_writable, - bool prefault) +static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, + struct guest_walker *gw) { bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); - bool write_fault = error_code & PFERR_WRITE_MASK; - bool exec = error_code & PFERR_FETCH_MASK; - bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled; + bool huge_page_disallowed = fault->exec && nx_huge_page_workaround_enabled; struct kvm_mmu_page *sp = NULL; struct kvm_shadow_walk_iterator it; unsigned int direct_access, access; int top_level, level, req_level, ret; - gfn_t base_gfn = gw->gfn; + gfn_t base_gfn = fault->gfn; + WARN_ON_ONCE(gw->gfn != base_gfn); direct_access = gw->pte_access; top_level = vcpu->arch.mmu->root_level; @@ -687,7 +684,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) goto out_gpte_changed; - for (shadow_walk_init(&it, vcpu, addr); + for (shadow_walk_init(&it, vcpu, fault->addr); shadow_walk_okay(&it) && it.level > gw->level; shadow_walk_next(&it)) { gfn_t table_gfn; @@ -699,7 +696,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, if (!is_shadow_present_pte(*it.sptep)) { table_gfn = gw->table_gfn[it.level - 2]; access = gw->pt_access[it.level - 2]; - sp = kvm_mmu_get_page(vcpu, table_gfn, addr, + sp = kvm_mmu_get_page(vcpu, table_gfn, fault->addr, it.level-1, false, access); /* * We must synchronize the pagetable before linking it @@ -733,10 +730,10 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, link_shadow_page(vcpu, it.sptep, sp); } - level = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn, + level = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, fault->max_level, &fault->pfn, huge_page_disallowed, &req_level); - trace_kvm_mmu_spte_requested(addr, gw->level, pfn); + trace_kvm_mmu_spte_requested(fault->addr, gw->level, fault->pfn); for (; shadow_walk_okay(&it); shadow_walk_next(&it)) { clear_sp_write_flooding_count(it.sptep); @@ -746,10 +743,10 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, * large page, as the leaf could be executable. */ if (nx_huge_page_workaround_enabled) - disallowed_hugepage_adjust(*it.sptep, gw->gfn, it.level, - &pfn, &level); + disallowed_hugepage_adjust(*it.sptep, fault->gfn, it.level, + &fault->pfn, &level); - base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); + base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); if (it.level == level) break; @@ -758,7 +755,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, drop_large_spte(vcpu, it.sptep); if (!is_shadow_present_pte(*it.sptep)) { - sp = kvm_mmu_get_page(vcpu, base_gfn, addr, + sp = kvm_mmu_get_page(vcpu, base_gfn, fault->addr, it.level - 1, true, direct_access); link_shadow_page(vcpu, it.sptep, sp); if (huge_page_disallowed && req_level >= it.level) @@ -766,8 +763,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, } } - ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, - it.level, base_gfn, pfn, prefault, map_writable); + ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, fault->write, + it.level, base_gfn, fault->pfn, fault->prefault, + fault->map_writable); if (ret == RET_PF_SPURIOUS) return ret; @@ -835,26 +833,21 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, */ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { - gpa_t addr = fault->addr; - u32 error_code = fault->error_code; struct guest_walker walker; int r; unsigned long mmu_seq; bool is_self_change_mapping; - pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); + pgprintk("%s: addr %lx err %x\n", __func__, fault->addr, fault->error_code); WARN_ON_ONCE(fault->is_tdp); /* + * Look up the guest pte for the faulting address. * If PFEC.RSVD is set, this is a shadow page fault. * The bit needs to be cleared before walking guest page tables. */ - error_code &= ~PFERR_RSVD_MASK; - - /* - * Look up the guest pte for the faulting address. - */ - r = FNAME(walk_addr)(&walker, vcpu, addr, error_code); + r = FNAME(walk_addr)(&walker, vcpu, fault->addr, + fault->error_code & ~PFERR_RSVD_MASK); /* * The page is not mapped by the guest. Let the guest handle it. @@ -869,7 +862,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault fault->gfn = walker.gfn; if (page_fault_handle_page_track(vcpu, fault)) { - shadow_page_table_clear_flood(vcpu, addr); + shadow_page_table_clear_flood(vcpu, fault->addr); return RET_PF_EMULATE; } @@ -924,8 +917,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault r = make_mmu_pages_available(vcpu); if (r) goto out_unlock; - r = FNAME(fetch)(vcpu, addr, &walker, error_code, fault->max_level, fault->pfn, - fault->map_writable, fault->prefault); + r = FNAME(fetch)(vcpu, fault, &walker); kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); out_unlock: -- cgit v1.2.3 From 2f6305dd567695a334599e30f69ae44b95083fea Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 6 Aug 2021 04:35:50 -0400 Subject: KVM: MMU: change kvm_tdp_mmu_map() arguments to kvm_page_fault Pass struct kvm_page_fault to kvm_tdp_mmu_map() instead of extracting the arguments from the struct. Suggested-by: Isaku Yamahata Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 3 +-- arch/x86/kvm/mmu/tdp_mmu.c | 23 +++++++++-------------- arch/x86/kvm/mmu/tdp_mmu.h | 4 +--- 3 files changed, 11 insertions(+), 19 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c84e978d76b0..b2020b481db2 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3988,8 +3988,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault goto out_unlock; if (is_tdp_mmu_fault) - r = kvm_tdp_mmu_map(vcpu, gpa, error_code, fault->map_writable, fault->max_level, - fault->pfn, fault->prefault); + r = kvm_tdp_mmu_map(vcpu, fault); else r = __direct_map(vcpu, fault); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 7a5a24ca50e4..4a5bb0b5b639 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -985,35 +985,30 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing * page tables and SPTEs to translate the faulting guest physical address. */ -int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, - int map_writable, int max_level, kvm_pfn_t pfn, - bool prefault) +int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); - bool write = error_code & PFERR_WRITE_MASK; - bool exec = error_code & PFERR_FETCH_MASK; - bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled; + bool huge_page_disallowed = fault->exec && nx_huge_page_workaround_enabled; struct kvm_mmu *mmu = vcpu->arch.mmu; struct tdp_iter iter; struct kvm_mmu_page *sp; u64 *child_pt; u64 new_spte; int ret; - gfn_t gfn = gpa >> PAGE_SHIFT; int level; int req_level; - level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn, + level = kvm_mmu_hugepage_adjust(vcpu, fault->gfn, fault->max_level, &fault->pfn, huge_page_disallowed, &req_level); - trace_kvm_mmu_spte_requested(gpa, level, pfn); + trace_kvm_mmu_spte_requested(fault->addr, level, fault->pfn); rcu_read_lock(); - tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { + tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { if (nx_huge_page_workaround_enabled) - disallowed_hugepage_adjust(iter.old_spte, gfn, - iter.level, &pfn, &level); + disallowed_hugepage_adjust(iter.old_spte, fault->gfn, + iter.level, &fault->pfn, &level); if (iter.level == level) break; @@ -1069,8 +1064,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, return RET_PF_RETRY; } - ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter, - pfn, prefault); + ret = tdp_mmu_map_handle_target_level(vcpu, fault->write, fault->map_writable, &iter, + fault->pfn, fault->prefault); rcu_read_unlock(); return ret; diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index 358f447d4012..ceaf7ff3ca7c 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -48,9 +48,7 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm); void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm); -int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, - int map_writable, int max_level, kvm_pfn_t pfn, - bool prefault); +int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, bool flush); -- cgit v1.2.3 From cdc47767a03922a6497ff3ca81f4066991aa2fd1 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 6 Aug 2021 04:35:50 -0400 Subject: KVM: MMU: change tdp_mmu_map_handle_target_level() arguments to kvm_page_fault Pass struct kvm_page_fault to tdp_mmu_map_handle_target_level() instead of extracting the arguments from the struct. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 4a5bb0b5b639..6cfba8c28ea2 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -929,21 +929,20 @@ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) * Installs a last-level SPTE to handle a TDP page fault. * (NPT/EPT violation/misconfiguration) */ -static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, - int map_writable, - struct tdp_iter *iter, - kvm_pfn_t pfn, bool prefault) +static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault, + struct tdp_iter *iter) { u64 new_spte; int ret = RET_PF_FIXED; int make_spte_ret = 0; - if (unlikely(is_noslot_pfn(pfn))) + if (unlikely(is_noslot_pfn(fault->pfn))) new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); else make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, - pfn, iter->old_spte, prefault, true, - map_writable, !shadow_accessed_mask, + fault->pfn, iter->old_spte, fault->prefault, true, + fault->map_writable, !shadow_accessed_mask, &new_spte); if (new_spte == iter->old_spte) @@ -957,7 +956,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, * the vCPU would have the same fault again. */ if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { - if (write) + if (fault->write) ret = RET_PF_EMULATE; } @@ -1064,8 +1063,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) return RET_PF_RETRY; } - ret = tdp_mmu_map_handle_target_level(vcpu, fault->write, fault->map_writable, &iter, - fault->pfn, fault->prefault); + ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter); rcu_read_unlock(); return ret; -- cgit v1.2.3 From 3c8ad5a675d9aaf6b8f99bf8b2879bab75af26c3 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 6 Aug 2021 04:35:50 -0400 Subject: KVM: MMU: change fast_page_fault() arguments to kvm_page_fault Pass struct kvm_page_fault to fast_page_fault() instead of extracting the arguments from the struct. Suggested-by: Isaku Yamahata Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 39 +++++++++++++++++---------------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index b2020b481db2..36cbe5cba085 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3083,18 +3083,17 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa return false; } -static bool page_fault_can_be_fast(u32 error_code) +static bool page_fault_can_be_fast(struct kvm_page_fault *fault) { /* * Do not fix the mmio spte with invalid generation number which * need to be updated by slow page fault path. */ - if (unlikely(error_code & PFERR_RSVD_MASK)) + if (fault->rsvd) return false; /* See if the page fault is due to an NX violation */ - if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)) - == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)))) + if (unlikely(fault->exec && fault->present)) return false; /* @@ -3111,9 +3110,7 @@ static bool page_fault_can_be_fast(u32 error_code) * accesses to a present page. */ - return shadow_acc_track_mask != 0 || - ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK)) - == (PFERR_WRITE_MASK | PFERR_PRESENT_MASK)); + return shadow_acc_track_mask != 0 || (fault->write && fault->present); } /* @@ -3155,12 +3152,12 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, return true; } -static bool is_access_allowed(u32 fault_err_code, u64 spte) +static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte) { - if (fault_err_code & PFERR_FETCH_MASK) + if (fault->exec) return is_executable_pte(spte); - if (fault_err_code & PFERR_WRITE_MASK) + if (fault->write) return is_writable_pte(spte); /* Fault was on Read access */ @@ -3193,7 +3190,7 @@ static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte) /* * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS. */ -static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) +static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { struct kvm_mmu_page *sp; int ret = RET_PF_INVALID; @@ -3201,7 +3198,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) u64 *sptep = NULL; uint retry_count = 0; - if (!page_fault_can_be_fast(error_code)) + if (!page_fault_can_be_fast(fault)) return ret; walk_shadow_page_lockless_begin(vcpu); @@ -3210,9 +3207,9 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) u64 new_spte; if (is_tdp_mmu(vcpu->arch.mmu)) - sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, gpa, &spte); + sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte); else - sptep = fast_pf_get_last_sptep(vcpu, gpa, &spte); + sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte); if (!is_shadow_present_pte(spte)) break; @@ -3231,7 +3228,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) * Need not check the access of upper level table entries since * they are always ACC_ALL. */ - if (is_access_allowed(error_code, spte)) { + if (is_access_allowed(fault, spte)) { ret = RET_PF_SPURIOUS; break; } @@ -3246,7 +3243,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) * be removed in the fast path only if the SPTE was * write-protected for dirty-logging or access tracking. */ - if ((error_code & PFERR_WRITE_MASK) && + if (fault->write && spte_can_locklessly_be_made_writable(spte)) { new_spte |= PT_WRITABLE_MASK; @@ -3267,7 +3264,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) /* Verify that the fault can be handled in the fast path */ if (new_spte == spte || - !is_access_allowed(error_code, new_spte)) + !is_access_allowed(fault, new_spte)) break; /* @@ -3288,7 +3285,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) } while (true); - trace_fast_page_fault(vcpu, gpa, error_code, sptep, spte, ret); + trace_fast_page_fault(vcpu, fault->addr, fault->error_code, sptep, spte, ret); walk_shadow_page_lockless_end(vcpu); return ret; @@ -3946,18 +3943,16 @@ out_retry: static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { - gpa_t gpa = fault->addr; - u32 error_code = fault->error_code; bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); unsigned long mmu_seq; int r; - fault->gfn = gpa >> PAGE_SHIFT; + fault->gfn = fault->addr >> PAGE_SHIFT; if (page_fault_handle_page_track(vcpu, fault)) return RET_PF_EMULATE; - r = fast_page_fault(vcpu, gpa, error_code); + r = fast_page_fault(vcpu, fault); if (r != RET_PF_INVALID) return r; -- cgit v1.2.3 From 73a3c659478a2eae331b63ce1d61fd0a43fe7d8c Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Sat, 7 Aug 2021 09:21:53 -0400 Subject: KVM: MMU: change kvm_mmu_hugepage_adjust() arguments to kvm_page_fault Pass struct kvm_page_fault to kvm_mmu_hugepage_adjust() instead of extracting the arguments from the struct; the results are also stored in the struct, so the callers are adjusted consequently. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.h | 35 ++++++++++++++++++++++-- arch/x86/kvm/mmu/mmu.c | 60 +++++++++++++++++++---------------------- arch/x86/kvm/mmu/mmu_internal.h | 12 ++------- arch/x86/kvm/mmu/paging_tmpl.h | 16 +++++------ arch/x86/kvm/mmu/tdp_mmu.c | 21 ++++++--------- 5 files changed, 77 insertions(+), 67 deletions(-) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 6697571197a5..01a4d1bc5053 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -127,12 +127,34 @@ struct kvm_page_fault { const bool rsvd; const bool user; - /* Derived from mmu. */ + /* Derived from mmu and global state. */ const bool is_tdp; + const bool nx_huge_page_workaround_enabled; - /* Input to FNAME(fetch), __direct_map and kvm_tdp_mmu_map. */ + /* + * Whether a >4KB mapping can be created or is forbidden due to NX + * hugepages. + */ + bool huge_page_disallowed; + + /* + * Maximum page size that can be created for this fault; input to + * FNAME(fetch), __direct_map and kvm_tdp_mmu_map. + */ u8 max_level; + /* + * Page size that can be created based on the max_level and the + * page size used by the host mapping. + */ + u8 req_level; + + /* + * Page size that will be created based on the req_level and + * huge_page_disallowed. + */ + u8 goal_level; + /* Shifted addr, or result of guest page table walk if addr is a gva. */ gfn_t gfn; @@ -144,6 +166,12 @@ struct kvm_page_fault { int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); +extern int nx_huge_pages; +static inline bool is_nx_huge_page_enabled(void) +{ + return READ_ONCE(nx_huge_pages); +} + static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err, bool prefault) { @@ -157,8 +185,11 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, .user = err & PFERR_USER_MASK, .prefault = prefault, .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault), + .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(), .max_level = KVM_MAX_HUGEPAGE_LEVEL, + .req_level = PG_LEVEL_4K, + .goal_level = PG_LEVEL_4K, }; #ifdef CONFIG_RETPOLINE if (fault.is_tdp) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 36cbe5cba085..877d0bda0f5e 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2920,48 +2920,45 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm, return min(host_level, max_level); } -int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, - int max_level, kvm_pfn_t *pfnp, - bool huge_page_disallowed, int *req_level) +void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { struct kvm_memory_slot *slot; - kvm_pfn_t pfn = *pfnp; kvm_pfn_t mask; - int level; - *req_level = PG_LEVEL_4K; + fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled; - if (unlikely(max_level == PG_LEVEL_4K)) - return PG_LEVEL_4K; + if (unlikely(fault->max_level == PG_LEVEL_4K)) + return; - if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn)) - return PG_LEVEL_4K; + if (is_error_noslot_pfn(fault->pfn) || kvm_is_reserved_pfn(fault->pfn)) + return; - slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true); + slot = gfn_to_memslot_dirty_bitmap(vcpu, fault->gfn, true); if (!slot) - return PG_LEVEL_4K; + return; /* * Enforce the iTLB multihit workaround after capturing the requested * level, which will be used to do precise, accurate accounting. */ - *req_level = level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level); - if (level == PG_LEVEL_4K || huge_page_disallowed) - return PG_LEVEL_4K; + fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, + fault->gfn, fault->pfn, + fault->max_level); + if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed) + return; /* * mmu_notifier_retry() was successful and mmu_lock is held, so * the pmd can't be split from under us. */ - mask = KVM_PAGES_PER_HPAGE(level) - 1; - VM_BUG_ON((gfn & mask) != (pfn & mask)); - *pfnp = pfn & ~mask; - - return level; + fault->goal_level = fault->req_level; + mask = KVM_PAGES_PER_HPAGE(fault->goal_level) - 1; + VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask)); + fault->pfn &= ~mask; } void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, - kvm_pfn_t *pfnp, int *goal_levelp) + kvm_pfn_t *pfnp, u8 *goal_levelp) { int level = *goal_levelp; @@ -2984,28 +2981,25 @@ void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { - bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); - bool huge_page_disallowed = fault->exec && nx_huge_page_workaround_enabled; struct kvm_shadow_walk_iterator it; struct kvm_mmu_page *sp; - int level, req_level, ret; + int ret; gfn_t base_gfn = fault->gfn; - level = kvm_mmu_hugepage_adjust(vcpu, fault->gfn, fault->max_level, &fault->pfn, - huge_page_disallowed, &req_level); + kvm_mmu_hugepage_adjust(vcpu, fault); - trace_kvm_mmu_spte_requested(fault->addr, level, fault->pfn); + trace_kvm_mmu_spte_requested(fault->addr, fault->goal_level, fault->pfn); for_each_shadow_entry(vcpu, fault->addr, it) { /* * We cannot overwrite existing page tables with an NX * large page, as the leaf could be executable. */ - if (nx_huge_page_workaround_enabled) + if (fault->nx_huge_page_workaround_enabled) disallowed_hugepage_adjust(*it.sptep, fault->gfn, it.level, - &fault->pfn, &level); + &fault->pfn, &fault->goal_level); base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); - if (it.level == level) + if (it.level == fault->goal_level) break; drop_large_spte(vcpu, it.sptep); @@ -3016,13 +3010,13 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) it.level - 1, true, ACC_ALL); link_shadow_page(vcpu, it.sptep, sp); - if (fault->is_tdp && huge_page_disallowed && - req_level >= it.level) + if (fault->is_tdp && fault->huge_page_disallowed && + fault->req_level >= it.level) account_huge_nx_page(vcpu->kvm, sp); } ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, - fault->write, level, base_gfn, fault->pfn, + fault->write, fault->goal_level, base_gfn, fault->pfn, fault->prefault, fault->map_writable); if (ret == RET_PF_SPURIOUS) return ret; diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 2ba12ef46cb0..ae0c7bc3b19b 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -118,12 +118,6 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) kvm_x86_ops.cpu_dirty_log_size; } -extern int nx_huge_pages; -static inline bool is_nx_huge_page_enabled(void) -{ - return READ_ONCE(nx_huge_pages); -} - int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync, bool speculative); @@ -164,11 +158,9 @@ enum { int kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t pfn, int max_level); -int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, - int max_level, kvm_pfn_t *pfnp, - bool huge_page_disallowed, int *req_level); +void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, - kvm_pfn_t *pfnp, int *goal_levelp); + kvm_pfn_t *pfnp, u8 *goal_levelp); void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index afd2ad8c5173..20f616963ff4 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -658,12 +658,10 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, struct guest_walker *gw) { - bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); - bool huge_page_disallowed = fault->exec && nx_huge_page_workaround_enabled; struct kvm_mmu_page *sp = NULL; struct kvm_shadow_walk_iterator it; unsigned int direct_access, access; - int top_level, level, req_level, ret; + int top_level, ret; gfn_t base_gfn = fault->gfn; WARN_ON_ONCE(gw->gfn != base_gfn); @@ -730,8 +728,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, link_shadow_page(vcpu, it.sptep, sp); } - level = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, fault->max_level, &fault->pfn, - huge_page_disallowed, &req_level); + kvm_mmu_hugepage_adjust(vcpu, fault); trace_kvm_mmu_spte_requested(fault->addr, gw->level, fault->pfn); @@ -742,12 +739,12 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, * We cannot overwrite existing page tables with an NX * large page, as the leaf could be executable. */ - if (nx_huge_page_workaround_enabled) + if (fault->nx_huge_page_workaround_enabled) disallowed_hugepage_adjust(*it.sptep, fault->gfn, it.level, - &fault->pfn, &level); + &fault->pfn, &fault->goal_level); base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); - if (it.level == level) + if (it.level == fault->goal_level) break; validate_direct_spte(vcpu, it.sptep, direct_access); @@ -758,7 +755,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, sp = kvm_mmu_get_page(vcpu, base_gfn, fault->addr, it.level - 1, true, direct_access); link_shadow_page(vcpu, it.sptep, sp); - if (huge_page_disallowed && req_level >= it.level) + if (fault->huge_page_disallowed && + fault->req_level >= it.level) account_huge_nx_page(vcpu->kvm, sp); } } diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 6cfba8c28ea2..b48256b88930 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -986,30 +986,25 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, */ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { - bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); - bool huge_page_disallowed = fault->exec && nx_huge_page_workaround_enabled; struct kvm_mmu *mmu = vcpu->arch.mmu; struct tdp_iter iter; struct kvm_mmu_page *sp; u64 *child_pt; u64 new_spte; int ret; - int level; - int req_level; - level = kvm_mmu_hugepage_adjust(vcpu, fault->gfn, fault->max_level, &fault->pfn, - huge_page_disallowed, &req_level); + kvm_mmu_hugepage_adjust(vcpu, fault); - trace_kvm_mmu_spte_requested(fault->addr, level, fault->pfn); + trace_kvm_mmu_spte_requested(fault->addr, fault->goal_level, fault->pfn); rcu_read_lock(); tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { - if (nx_huge_page_workaround_enabled) + if (fault->nx_huge_page_workaround_enabled) disallowed_hugepage_adjust(iter.old_spte, fault->gfn, - iter.level, &fault->pfn, &level); + iter.level, &fault->pfn, &fault->goal_level); - if (iter.level == level) + if (iter.level == fault->goal_level) break; /* @@ -1047,8 +1042,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) if (tdp_mmu_set_spte_atomic_no_dirty_log(vcpu->kvm, &iter, new_spte)) { tdp_mmu_link_page(vcpu->kvm, sp, - huge_page_disallowed && - req_level >= iter.level); + fault->huge_page_disallowed && + fault->req_level >= iter.level); trace_kvm_mmu_get_page(sp, true); } else { @@ -1058,7 +1053,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) } } - if (iter.level != level) { + if (iter.level != fault->goal_level) { rcu_read_unlock(); return RET_PF_RETRY; } -- cgit v1.2.3 From 536f0e6ace95aa8d7b6d5522f0d56ae34e9fc39c Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 6 Aug 2021 04:35:50 -0400 Subject: KVM: MMU: change disallowed_hugepage_adjust() arguments to kvm_page_fault Pass struct kvm_page_fault to disallowed_hugepage_adjust() instead of extracting the arguments from the struct. Tweak a bit the conditions to avoid long lines. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 19 ++++++++----------- arch/x86/kvm/mmu/mmu_internal.h | 3 +-- arch/x86/kvm/mmu/paging_tmpl.h | 3 +-- arch/x86/kvm/mmu/tdp_mmu.c | 3 +-- 4 files changed, 11 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 877d0bda0f5e..7491dc685842 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2957,12 +2957,10 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault fault->pfn &= ~mask; } -void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, - kvm_pfn_t *pfnp, u8 *goal_levelp) +void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level) { - int level = *goal_levelp; - - if (cur_level == level && level > PG_LEVEL_4K && + if (cur_level > PG_LEVEL_4K && + cur_level == fault->goal_level && is_shadow_present_pte(spte) && !is_large_pte(spte)) { /* @@ -2972,10 +2970,10 @@ void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, * patching back for them into pfn the next 9 bits of * the address. */ - u64 page_mask = KVM_PAGES_PER_HPAGE(level) - - KVM_PAGES_PER_HPAGE(level - 1); - *pfnp |= gfn & page_mask; - (*goal_levelp)--; + u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) - + KVM_PAGES_PER_HPAGE(cur_level - 1); + fault->pfn |= fault->gfn & page_mask; + fault->goal_level--; } } @@ -2995,8 +2993,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) * large page, as the leaf could be executable. */ if (fault->nx_huge_page_workaround_enabled) - disallowed_hugepage_adjust(*it.sptep, fault->gfn, it.level, - &fault->pfn, &fault->goal_level); + disallowed_hugepage_adjust(fault, *it.sptep, it.level); base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); if (it.level == fault->goal_level) diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index ae0c7bc3b19b..f0295ad51f69 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -159,8 +159,7 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t pfn, int max_level); void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); -void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, - kvm_pfn_t *pfnp, u8 *goal_levelp); +void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level); void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 20f616963ff4..4a263f4511a5 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -740,8 +740,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, * large page, as the leaf could be executable. */ if (fault->nx_huge_page_workaround_enabled) - disallowed_hugepage_adjust(*it.sptep, fault->gfn, it.level, - &fault->pfn, &fault->goal_level); + disallowed_hugepage_adjust(fault, *it.sptep, it.level); base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); if (it.level == fault->goal_level) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index b48256b88930..737af596adaf 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1001,8 +1001,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { if (fault->nx_huge_page_workaround_enabled) - disallowed_hugepage_adjust(iter.old_spte, fault->gfn, - iter.level, &fault->pfn, &fault->goal_level); + disallowed_hugepage_adjust(fault, iter.old_spte, iter.level); if (iter.level == fault->goal_level) break; -- cgit v1.2.3 From f0066d94c92dc5cf7f1a272a1bd324b0fc575292 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 6 Aug 2021 04:35:50 -0400 Subject: KVM: MMU: change tracepoints arguments to kvm_page_fault Pass struct kvm_page_fault to tracepoints instead of extracting the arguments from the struct. This also lets the kvm_mmu_spte_requested tracepoint pick the gfn directly from fault->gfn, instead of using the address. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 4 ++-- arch/x86/kvm/mmu/mmutrace.h | 18 +++++++++--------- arch/x86/kvm/mmu/paging_tmpl.h | 2 +- arch/x86/kvm/mmu/tdp_mmu.c | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7491dc685842..5ba0a844f576 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2986,7 +2986,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) kvm_mmu_hugepage_adjust(vcpu, fault); - trace_kvm_mmu_spte_requested(fault->addr, fault->goal_level, fault->pfn); + trace_kvm_mmu_spte_requested(fault); for_each_shadow_entry(vcpu, fault->addr, it) { /* * We cannot overwrite existing page tables with an NX @@ -3276,7 +3276,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) } while (true); - trace_fast_page_fault(vcpu, fault->addr, fault->error_code, sptep, spte, ret); + trace_fast_page_fault(vcpu, fault, sptep, spte, ret); walk_shadow_page_lockless_end(vcpu); return ret; diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h index 2924a4081a19..b8151bbca36a 100644 --- a/arch/x86/kvm/mmu/mmutrace.h +++ b/arch/x86/kvm/mmu/mmutrace.h @@ -252,9 +252,9 @@ TRACE_EVENT( TRACE_EVENT( fast_page_fault, - TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code, + TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, u64 *sptep, u64 old_spte, int ret), - TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, ret), + TP_ARGS(vcpu, fault, sptep, old_spte, ret), TP_STRUCT__entry( __field(int, vcpu_id) @@ -268,8 +268,8 @@ TRACE_EVENT( TP_fast_assign( __entry->vcpu_id = vcpu->vcpu_id; - __entry->cr2_or_gpa = cr2_or_gpa; - __entry->error_code = error_code; + __entry->cr2_or_gpa = fault->addr; + __entry->error_code = fault->error_code; __entry->sptep = sptep; __entry->old_spte = old_spte; __entry->new_spte = *sptep; @@ -367,8 +367,8 @@ TRACE_EVENT( TRACE_EVENT( kvm_mmu_spte_requested, - TP_PROTO(gpa_t addr, int level, kvm_pfn_t pfn), - TP_ARGS(addr, level, pfn), + TP_PROTO(struct kvm_page_fault *fault), + TP_ARGS(fault), TP_STRUCT__entry( __field(u64, gfn) @@ -377,9 +377,9 @@ TRACE_EVENT( ), TP_fast_assign( - __entry->gfn = addr >> PAGE_SHIFT; - __entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); - __entry->level = level; + __entry->gfn = fault->gfn; + __entry->pfn = fault->pfn | (fault->gfn & (KVM_PAGES_PER_HPAGE(fault->goal_level) - 1)); + __entry->level = fault->goal_level; ), TP_printk("gfn %llx pfn %llx level %d", diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 4a263f4511a5..6bc0dbc0baff 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -730,7 +730,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, kvm_mmu_hugepage_adjust(vcpu, fault); - trace_kvm_mmu_spte_requested(fault->addr, gw->level, fault->pfn); + trace_kvm_mmu_spte_requested(fault); for (; shadow_walk_okay(&it); shadow_walk_next(&it)) { clear_sp_write_flooding_count(it.sptep); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 737af596adaf..3bf85a8c7d15 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -995,7 +995,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) kvm_mmu_hugepage_adjust(vcpu, fault); - trace_kvm_mmu_spte_requested(fault->addr, fault->goal_level, fault->pfn); + trace_kvm_mmu_spte_requested(fault); rcu_read_lock(); -- cgit v1.2.3 From b1a429fb18011505acad931f409415c8bb5b5c28 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Sep 2021 20:25:46 +0800 Subject: KVM: x86/mmu: Verify shadow walk doesn't terminate early in page faults WARN and bail if the shadow walk for faulting in a SPTE terminates early, i.e. doesn't reach the expected level because the walk encountered a terminal SPTE. The shadow walks for page faults are subtle in that they install non-leaf SPTEs (zapping leaf SPTEs if necessary!) in the loop body, and consume the newly created non-leaf SPTE in the loop control, e.g. __shadow_walk_next(). In other words, the walks guarantee that the walk will stop if and only if the target level is reached by installing non-leaf SPTEs to guarantee the walk remains valid. Opportunistically use fault->goal-level instead of it.level in FNAME(fetch) to further clarify that KVM always installs the leaf SPTE at the target level. Reviewed-by: Lai Jiangshan Signed-off-by: Sean Christopherson Signed-off-by: Lai Jiangshan Message-Id: <20210906122547.263316-1-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 3 +++ arch/x86/kvm/mmu/paging_tmpl.h | 7 +++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 5ba0a844f576..2ddbabad5bd2 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3012,6 +3012,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) account_huge_nx_page(vcpu->kvm, sp); } + if (WARN_ON_ONCE(it.level != fault->goal_level)) + return -EFAULT; + ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, fault->write, fault->goal_level, base_gfn, fault->pfn, fault->prefault, fault->map_writable); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 6bc0dbc0baff..7a8a2d14a3c7 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -760,9 +760,12 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, } } + if (WARN_ON_ONCE(it.level != fault->goal_level)) + return -EFAULT; + ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, fault->write, - it.level, base_gfn, fault->pfn, fault->prefault, - fault->map_writable); + fault->goal_level, base_gfn, fault->pfn, + fault->prefault, fault->map_writable); if (ret == RET_PF_SPURIOUS) return ret; -- cgit v1.2.3 From 68be1306caea8948738cab04014ca4506b590d38 Mon Sep 17 00:00:00 2001 From: David Matlack Date: Fri, 13 Aug 2021 20:35:00 +0000 Subject: KVM: x86/mmu: Fold rmap_recycle into rmap_add Consolidate rmap_recycle and rmap_add into a single function since they are only ever called together (and only from one place). This has a nice side effect of eliminating an extra kvm_vcpu_gfn_to_memslot(). In addition it makes mmu_set_spte(), which is a very long function, a little shorter. No functional change intended. Signed-off-by: David Matlack Message-Id: <20210813203504.2742757-3-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 40 ++++++++++++++-------------------------- 1 file changed, 14 insertions(+), 26 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 2ddbabad5bd2..8b6dc276935f 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1071,20 +1071,6 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu) return kvm_mmu_memory_cache_nr_free_objects(mc); } -static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) -{ - struct kvm_memory_slot *slot; - struct kvm_mmu_page *sp; - struct kvm_rmap_head *rmap_head; - - sp = sptep_to_sp(spte); - kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); - slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); - return pte_list_add(vcpu, spte, rmap_head); -} - - static void rmap_remove(struct kvm *kvm, u64 *spte) { struct kvm_memslots *slots; @@ -1097,9 +1083,9 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); /* - * Unlike rmap_add and rmap_recycle, rmap_remove does not run in the - * context of a vCPU so have to determine which memslots to use based - * on context information in sp->role. + * Unlike rmap_add, rmap_remove does not run in the context of a vCPU + * so we have to determine which memslots to use based on context + * information in sp->role. */ slots = kvm_memslots_for_spte_role(kvm, sp->role); @@ -1639,19 +1625,24 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, #define RMAP_RECYCLE_THRESHOLD 1000 -static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) +static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) { struct kvm_memory_slot *slot; - struct kvm_rmap_head *rmap_head; struct kvm_mmu_page *sp; + struct kvm_rmap_head *rmap_head; + int rmap_count; sp = sptep_to_sp(spte); + kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); + rmap_count = pte_list_add(vcpu, spte, rmap_head); - kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0)); - kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, - KVM_PAGES_PER_HPAGE(sp->role.level)); + if (rmap_count > RMAP_RECYCLE_THRESHOLD) { + kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0)); + kvm_flush_remote_tlbs_with_address( + vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); + } } bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) @@ -2713,7 +2704,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, bool host_writable) { int was_rmapped = 0; - int rmap_count; int set_spte_ret; int ret = RET_PF_FIXED; bool flush = false; @@ -2772,9 +2762,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (!was_rmapped) { kvm_update_page_stats(vcpu->kvm, level, 1); - rmap_count = rmap_add(vcpu, sptep, gfn); - if (rmap_count > RMAP_RECYCLE_THRESHOLD) - rmap_recycle(vcpu, sptep, gfn); + rmap_add(vcpu, sptep, gfn); } return ret; -- cgit v1.2.3 From bcc4f2bc5026633198c0f8b7dd8b0e5e15de5c9d Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 24 Sep 2021 04:52:23 -0400 Subject: KVM: MMU: mark page dirty in make_spte This simplifies set_spte, which we want to remove, and unifies code between the shadow MMU and the TDP MMU. The warning will be added back later to make_spte as well. There is a small disadvantage in the TDP MMU; it may unnecessarily mark a page as dirty twice if two vCPUs end up mapping the same page twice. However, this is a very small cost for a case that is already rare. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 3 --- arch/x86/kvm/mmu/spte.c | 3 +++ arch/x86/kvm/mmu/tdp_mmu.c | 21 +-------------------- 3 files changed, 4 insertions(+), 23 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 8b6dc276935f..5a757953b98b 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2688,9 +2688,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, can_unsync, host_writable, sp_ad_disabled(sp), &spte); - if (spte & PT_WRITABLE_MASK) - kvm_vcpu_mark_page_dirty(vcpu, gfn); - if (*sptep == spte) ret |= SET_SPTE_SPURIOUS; else if (mmu_spte_update(sptep, spte)) diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index a33c581aabd6..66be9452ded1 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -179,6 +179,9 @@ out: "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level, get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level)); + if (spte & PT_WRITABLE_MASK) + kvm_vcpu_mark_page_dirty(vcpu, gfn); + *new_spte = spte; return ret; } diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 3bf85a8c7d15..b41b6f5ea82b 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -542,26 +542,7 @@ static inline bool tdp_mmu_map_set_spte_atomic(struct kvm_vcpu *vcpu, struct tdp_iter *iter, u64 new_spte) { - struct kvm *kvm = vcpu->kvm; - - if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte)) - return false; - - /* - * Use kvm_vcpu_gfn_to_memslot() instead of going through - * handle_changed_spte_dirty_log() to leverage vcpu->last_used_slot. - */ - if (is_writable_pte(new_spte)) { - struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, iter->gfn); - - if (slot && kvm_slot_dirty_track_enabled(slot)) { - /* Enforced by kvm_mmu_hugepage_adjust. */ - WARN_ON_ONCE(iter->level > PG_LEVEL_4K); - mark_page_dirty_in_slot(kvm, slot, iter->gfn); - } - } - - return true; + return tdp_mmu_set_spte_atomic_no_dirty_log(vcpu->kvm, iter, new_spte); } static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, -- cgit v1.2.3 From 6ccf44388206e60bd0ba46d00f8570a0588d812e Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 23 Sep 2021 11:20:48 -0400 Subject: KVM: MMU: unify tdp_mmu_map_set_spte_atomic and tdp_mmu_set_spte_atomic_no_dirty_log tdp_mmu_map_set_spte_atomic is not taking care of dirty logging anymore, the only difference that remains is that it takes a vCPU instead of the struct kvm. Merge the two functions. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 40 ++++++++++------------------------------ 1 file changed, 10 insertions(+), 30 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index b41b6f5ea82b..2d92a5b54ded 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -489,8 +489,8 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, } /* - * tdp_mmu_set_spte_atomic_no_dirty_log - Set a TDP MMU SPTE atomically - * and handle the associated bookkeeping, but do not mark the page dirty + * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically + * and handle the associated bookkeeping. Do not mark the page dirty * in KVM's dirty bitmaps. * * @kvm: kvm instance @@ -499,9 +499,9 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, * Returns: true if the SPTE was set, false if it was not. If false is returned, * this function will have no side-effects. */ -static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm, - struct tdp_iter *iter, - u64 new_spte) +static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm, + struct tdp_iter *iter, + u64 new_spte) { lockdep_assert_held_read(&kvm->mmu_lock); @@ -527,24 +527,6 @@ static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm, return true; } -/* - * tdp_mmu_map_set_spte_atomic - Set a leaf TDP MMU SPTE atomically to resolve a - * TDP page fault. - * - * @vcpu: The vcpu instance that took the TDP page fault. - * @iter: a tdp_iter instance currently on the SPTE that should be set - * @new_spte: The value the SPTE should be set to - * - * Returns: true if the SPTE was set, false if it was not. If false is returned, - * this function will have no side-effects. - */ -static inline bool tdp_mmu_map_set_spte_atomic(struct kvm_vcpu *vcpu, - struct tdp_iter *iter, - u64 new_spte) -{ - return tdp_mmu_set_spte_atomic_no_dirty_log(vcpu->kvm, iter, new_spte); -} - static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, struct tdp_iter *iter) { @@ -554,7 +536,7 @@ static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, * immediately installing a present entry in its place * before the TLBs are flushed. */ - if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, REMOVED_SPTE)) + if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE)) return false; kvm_flush_remote_tlbs_with_address(kvm, iter->gfn, @@ -928,7 +910,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, if (new_spte == iter->old_spte) ret = RET_PF_SPURIOUS; - else if (!tdp_mmu_map_set_spte_atomic(vcpu, iter, new_spte)) + else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) return RET_PF_RETRY; /* @@ -1020,7 +1002,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) new_spte = make_nonleaf_spte(child_pt, !shadow_accessed_mask); - if (tdp_mmu_set_spte_atomic_no_dirty_log(vcpu->kvm, &iter, new_spte)) { + if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, new_spte)) { tdp_mmu_link_page(vcpu->kvm, sp, fault->huge_page_disallowed && fault->req_level >= iter.level); @@ -1208,8 +1190,7 @@ retry: new_spte = iter.old_spte & ~PT_WRITABLE_MASK; - if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter, - new_spte)) { + if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) { /* * The iter must explicitly re-read the SPTE because * the atomic cmpxchg failed. @@ -1277,8 +1258,7 @@ retry: continue; } - if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter, - new_spte)) { + if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) { /* * The iter must explicitly re-read the SPTE because * the atomic cmpxchg failed. -- cgit v1.2.3 From e710c5f6be0eb36f8f2e98efbc02f1b31021c29d Mon Sep 17 00:00:00 2001 From: David Matlack Date: Fri, 24 Sep 2021 05:05:26 -0400 Subject: KVM: x86/mmu: Pass the memslot around via struct kvm_page_fault The memslot for the faulting gfn is used throughout the page fault handling code, so capture it in kvm_page_fault as soon as we know the gfn and use it in the page fault handling code that has direct access to the kvm_page_fault struct. Replace various tests using is_noslot_pfn with more direct tests on fault->slot being NULL. This, in combination with the subsequent patch, improves "Populate memory time" in dirty_log_perf_test by 5% when using the legacy MMU. There is no discerable improvement to the performance of the TDP MMU. No functional change intended. Suggested-by: Ben Gardon Signed-off-by: David Matlack Message-Id: <20210813203504.2742757-4-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.h | 3 +++ arch/x86/kvm/mmu/mmu.c | 32 ++++++++++++-------------------- arch/x86/kvm/mmu/paging_tmpl.h | 6 ++++-- arch/x86/kvm/mmu/tdp_mmu.c | 2 +- 4 files changed, 20 insertions(+), 23 deletions(-) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 01a4d1bc5053..75367af1a6d3 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -158,6 +158,9 @@ struct kvm_page_fault { /* Shifted addr, or result of guest page table walk if addr is a gva. */ gfn_t gfn; + /* The memslot containing gfn. May be NULL. */ + struct kvm_memory_slot *slot; + /* Outputs of kvm_faultin_pfn. */ kvm_pfn_t pfn; hva_t hva; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 5a757953b98b..754578458cb7 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2907,7 +2907,7 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm, void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { - struct kvm_memory_slot *slot; + struct kvm_memory_slot *slot = fault->slot; kvm_pfn_t mask; fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled; @@ -2918,8 +2918,7 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (is_error_noslot_pfn(fault->pfn) || kvm_is_reserved_pfn(fault->pfn)) return; - slot = gfn_to_memslot_dirty_bitmap(vcpu, fault->gfn, true); - if (!slot) + if (kvm_slot_dirty_track_enabled(slot)) return; /* @@ -3043,7 +3042,7 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa return true; } - if (unlikely(is_noslot_pfn(fault->pfn))) { + if (unlikely(!fault->slot)) { gva_t gva = fault->is_tdp ? 0 : fault->addr; vcpu_cache_mmio_info(vcpu, gva, fault->gfn, @@ -3097,13 +3096,9 @@ static bool page_fault_can_be_fast(struct kvm_page_fault *fault) * someone else modified the SPTE from its original value. */ static bool -fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, +fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, u64 *sptep, u64 old_spte, u64 new_spte) { - gfn_t gfn; - - WARN_ON(!sp->role.direct); - /* * Theoretically we could also set dirty bit (and flush TLB) here in * order to eliminate unnecessary PML logging. See comments in @@ -3119,14 +3114,8 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, if (cmpxchg64(sptep, old_spte, new_spte) != old_spte) return false; - if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) { - /* - * The gfn of direct spte is stable since it is - * calculated by sp->gfn. - */ - gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); - kvm_vcpu_mark_page_dirty(vcpu, gfn); - } + if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) + mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn); return true; } @@ -3251,7 +3240,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) * since the gfn is not stable for indirect shadow page. See * Documentation/virt/kvm/locking.rst to get more detail. */ - if (fast_pf_fix_direct_spte(vcpu, sp, sptep, spte, new_spte)) { + if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) { ret = RET_PF_FIXED; break; } @@ -3863,7 +3852,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, int *r) { - struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn); + struct kvm_memory_slot *slot = fault->slot; bool async; /* @@ -3877,6 +3866,7 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, if (!kvm_is_visible_memslot(slot)) { /* Don't expose private memslots to L2. */ if (is_guest_mode(vcpu)) { + fault->slot = NULL; fault->pfn = KVM_PFN_NOSLOT; fault->map_writable = false; return false; @@ -3928,6 +3918,8 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault int r; fault->gfn = fault->addr >> PAGE_SHIFT; + fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn); + if (page_fault_handle_page_track(vcpu, fault)) return RET_PF_EMULATE; @@ -3955,7 +3947,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault else write_lock(&vcpu->kvm->mmu_lock); - if (!is_noslot_pfn(fault->pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva)) + if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva)) goto out_unlock; r = make_mmu_pages_available(vcpu); if (r) diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 7a8a2d14a3c7..e4c7bf3deac8 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -861,6 +861,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault } fault->gfn = walker.gfn; + fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn); + if (page_fault_handle_page_track(vcpu, fault)) { shadow_page_table_clear_flood(vcpu, fault->addr); return RET_PF_EMULATE; @@ -894,7 +896,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault * we will cache the incorrect access into mmio spte. */ if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) && - !is_cr0_wp(vcpu->arch.mmu) && !fault->user && !is_noslot_pfn(fault->pfn)) { + !is_cr0_wp(vcpu->arch.mmu) && !fault->user && fault->slot) { walker.pte_access |= ACC_WRITE_MASK; walker.pte_access &= ~ACC_USER_MASK; @@ -910,7 +912,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault r = RET_PF_RETRY; write_lock(&vcpu->kvm->mmu_lock); - if (!is_noslot_pfn(fault->pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva)) + if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva)) goto out_unlock; kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 2d92a5b54ded..3e10658cf0d7 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -900,7 +900,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int ret = RET_PF_FIXED; int make_spte_ret = 0; - if (unlikely(is_noslot_pfn(fault->pfn))) + if (unlikely(!fault->slot)) new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); else make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, -- cgit v1.2.3 From 888104138cb8e88c3825efc07b0000c195346387 Mon Sep 17 00:00:00 2001 From: David Matlack Date: Fri, 13 Aug 2021 20:35:02 +0000 Subject: KVM: x86/mmu: Avoid memslot lookup in page_fault_handle_page_track Now that kvm_page_fault has a pointer to the memslot it can be passed down to the page tracking code to avoid a redundant slot lookup. No functional change intended. Signed-off-by: David Matlack Message-Id: <20210813203504.2742757-5-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 2 ++ arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/mmu/page_track.c | 20 +++++++++++++------- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 6a5f3acf2b33..9cd9230e5cc8 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -61,6 +61,8 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm, enum kvm_page_track_mode mode); bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, enum kvm_page_track_mode mode); +bool kvm_slot_page_track_is_active(struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode); void kvm_page_track_register_notifier(struct kvm *kvm, diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 754578458cb7..d63fe7b10bd1 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3819,7 +3819,7 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, * guest is writing the page which is write tracked which can * not be fixed by page fault handler. */ - if (kvm_page_track_is_active(vcpu, fault->gfn, KVM_PAGE_TRACK_WRITE)) + if (kvm_slot_page_track_is_active(fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE)) return true; return false; diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 21427e84a82e..859800f7bb95 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -136,19 +136,14 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm, } EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page); -/* - * check if the corresponding access on the specified guest page is tracked. - */ -bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, - enum kvm_page_track_mode mode) +bool kvm_slot_page_track_is_active(struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode) { - struct kvm_memory_slot *slot; int index; if (WARN_ON(!page_track_mode_is_valid(mode))) return false; - slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); if (!slot) return false; @@ -156,6 +151,17 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, return !!READ_ONCE(slot->arch.gfn_track[mode][index]); } +/* + * check if the corresponding access on the specified guest page is tracked. + */ +bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, + enum kvm_page_track_mode mode) +{ + struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + + return kvm_slot_page_track_is_active(slot, gfn, mode); +} + void kvm_page_track_cleanup(struct kvm *kvm) { struct kvm_page_track_notifier_head *head; -- cgit v1.2.3 From d786c7783b01a01346f77e8e785030b5096c191a Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 17 Aug 2021 07:22:32 -0400 Subject: KVM: MMU: inline set_spte in mmu_set_spte Since the two callers of set_spte do different things with the results, inlining it actually makes the code simpler to reason about. For example, mmu_set_spte looks quite like tdp_mmu_map_handle_target_level, but the similarity is hidden by set_spte. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index d63fe7b10bd1..6ba7c60bd4f8 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2700,10 +2700,12 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, kvm_pfn_t pfn, bool speculative, bool host_writable) { + struct kvm_mmu_page *sp = sptep_to_sp(sptep); int was_rmapped = 0; - int set_spte_ret; int ret = RET_PF_FIXED; bool flush = false; + int make_spte_ret; + u64 spte; pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, *sptep, write_fault, gfn); @@ -2734,30 +2736,29 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, was_rmapped = 1; } - set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn, - speculative, true, host_writable); - if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { + make_spte_ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, + true, host_writable, sp_ad_disabled(sp), &spte); + + if (*sptep == spte) { + ret = RET_PF_SPURIOUS; + } else { + trace_kvm_mmu_set_spte(level, gfn, sptep); + flush |= mmu_spte_update(sptep, spte); + } + + if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { if (write_fault) ret = RET_PF_EMULATE; } - if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush) + if (flush) kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, KVM_PAGES_PER_HPAGE(level)); - /* - * The fault is fully spurious if and only if the new SPTE and old SPTE - * are identical, and emulation is not required. - */ - if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) { - WARN_ON_ONCE(!was_rmapped); - return RET_PF_SPURIOUS; - } - pgprintk("%s: setting spte %llx\n", __func__, *sptep); - trace_kvm_mmu_set_spte(level, gfn, sptep); if (!was_rmapped) { + WARN_ON_ONCE(ret == RET_PF_SPURIOUS); kvm_update_page_stats(vcpu->kvm, level, 1); rmap_add(vcpu, sptep, gfn); } -- cgit v1.2.3 From 4758d47e0d685c5e2ee999c355c52d25210c2fbc Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 17 Aug 2021 07:22:32 -0400 Subject: KVM: MMU: inline set_spte in FNAME(sync_page) Since the two callers of set_spte do different things with the results, inlining it actually makes the code simpler to reason about. For example, FNAME(sync_page) already has a struct kvm_mmu_page *, but set_spte had to fish it back out of sptep's private page data. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 21 --------------------- arch/x86/kvm/mmu/paging_tmpl.h | 21 ++++++++++++--------- 2 files changed, 12 insertions(+), 30 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 6ba7c60bd4f8..19c2fd2189a3 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2674,27 +2674,6 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync, return 0; } -static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, - unsigned int pte_access, int level, - gfn_t gfn, kvm_pfn_t pfn, bool speculative, - bool can_unsync, bool host_writable) -{ - u64 spte; - struct kvm_mmu_page *sp; - int ret; - - sp = sptep_to_sp(sptep); - - ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, - can_unsync, host_writable, sp_ad_disabled(sp), &spte); - - if (*sptep == spte) - ret |= SET_SPTE_SPURIOUS; - else if (mmu_spte_update(sptep, spte)) - ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; - return ret; -} - static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned int pte_access, bool write_fault, int level, gfn_t gfn, kvm_pfn_t pfn, bool speculative, diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index e4c7bf3deac8..500962dceda0 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -1061,7 +1061,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) int i; bool host_writable; gpa_t first_pte_gpa; - int set_spte_ret = 0; + bool flush = false; /* * Ignore various flags when verifying that it's safe to sync a shadow @@ -1091,6 +1091,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); for (i = 0; i < PT64_ENT_PER_PAGE; i++) { + u64 *sptep, spte; unsigned pte_access; pt_element_t gpte; gpa_t pte_gpa; @@ -1106,7 +1107,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) return -1; if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { - set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; + flush = true; continue; } @@ -1120,19 +1121,21 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) if (gfn != sp->gfns[i]) { drop_spte(vcpu->kvm, &sp->spt[i]); - set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; + flush = true; continue; } - host_writable = sp->spt[i] & shadow_host_writable_mask; + sptep = &sp->spt[i]; + spte = *sptep; + host_writable = spte & shadow_host_writable_mask; + make_spte(vcpu, pte_access, PG_LEVEL_4K, gfn, + spte_to_pfn(spte), spte, true, false, + host_writable, sp_ad_disabled(sp), &spte); - set_spte_ret |= set_spte(vcpu, &sp->spt[i], - pte_access, PG_LEVEL_4K, - gfn, spte_to_pfn(sp->spt[i]), - true, false, host_writable); + flush |= mmu_spte_update(sptep, spte); } - return set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH; + return flush; } #undef pt_element_t -- cgit v1.2.3 From ad67e4806e4c2d920e2045b3fafc60ddbc3017f5 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 17 Aug 2021 07:32:09 -0400 Subject: KVM: MMU: clean up make_spte return value Now that make_spte is called directly by the shadow MMU (rather than wrapped by set_spte), it only has to return one boolean value. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 8 ++++---- arch/x86/kvm/mmu/mmu_internal.h | 5 ----- arch/x86/kvm/mmu/spte.c | 8 ++++---- arch/x86/kvm/mmu/spte.h | 7 +------ arch/x86/kvm/mmu/tdp_mmu.c | 6 +++--- 5 files changed, 12 insertions(+), 22 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 19c2fd2189a3..dcbe7df2f890 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2683,7 +2683,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, int was_rmapped = 0; int ret = RET_PF_FIXED; bool flush = false; - int make_spte_ret; + bool wrprot; u64 spte; pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, @@ -2715,8 +2715,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, was_rmapped = 1; } - make_spte_ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, - true, host_writable, sp_ad_disabled(sp), &spte); + wrprot = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, + true, host_writable, sp_ad_disabled(sp), &spte); if (*sptep == spte) { ret = RET_PF_SPURIOUS; @@ -2725,7 +2725,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, flush |= mmu_spte_update(sptep, spte); } - if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { + if (wrprot) { if (write_fault) ret = RET_PF_EMULATE; } diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index f0295ad51f69..94f4e754facb 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -150,11 +150,6 @@ enum { RET_PF_SPURIOUS, }; -/* Bits which may be returned by set_spte() */ -#define SET_SPTE_WRITE_PROTECTED_PT BIT(0) -#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) -#define SET_SPTE_SPURIOUS BIT(2) - int kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t pfn, int max_level); diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 66be9452ded1..29ea996201b4 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -89,13 +89,13 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) E820_TYPE_RAM); } -int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, +bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, bool can_unsync, bool host_writable, bool ad_disabled, u64 *new_spte) { u64 spte = SPTE_MMU_PRESENT_MASK; - int ret = 0; + bool wrprot = false; if (ad_disabled) spte |= SPTE_TDP_AD_DISABLED_MASK; @@ -162,7 +162,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync, speculative)) { pgprintk("%s: found shadow page for %llx, marking ro\n", __func__, gfn); - ret |= SET_SPTE_WRITE_PROTECTED_PT; + wrprot = true; pte_access &= ~ACC_WRITE_MASK; spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); } @@ -183,7 +183,7 @@ out: kvm_vcpu_mark_page_dirty(vcpu, gfn); *new_spte = spte; - return ret; + return wrprot; } u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index eb7b227fc6cf..1998ec559196 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -334,12 +334,7 @@ static inline u64 get_mmio_spte_generation(u64 spte) return gen; } -/* Bits which may be returned by set_spte() */ -#define SET_SPTE_WRITE_PROTECTED_PT BIT(0) -#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) -#define SET_SPTE_SPURIOUS BIT(2) - -int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, +bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, bool can_unsync, bool host_writable, bool ad_disabled, u64 *new_spte); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 3e10658cf0d7..6de2c957edd6 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -898,12 +898,12 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, { u64 new_spte; int ret = RET_PF_FIXED; - int make_spte_ret = 0; + bool wrprot = false; if (unlikely(!fault->slot)) new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); else - make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, + wrprot = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, fault->pfn, iter->old_spte, fault->prefault, true, fault->map_writable, !shadow_accessed_mask, &new_spte); @@ -918,7 +918,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, * protected, emulation is needed. If the emulation was skipped, * the vCPU would have the same fault again. */ - if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { + if (wrprot) { if (fault->write) ret = RET_PF_EMULATE; } -- cgit v1.2.3 From eb5cd7ffe142a989c77e9989e3e9ea986dc418aa Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 17 Aug 2021 07:42:10 -0400 Subject: KVM: MMU: remove unnecessary argument to mmu_set_spte The level of the new SPTE can be found in the kvm_mmu_page struct; there is no need to pass it down. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 7 ++++--- arch/x86/kvm/mmu/paging_tmpl.h | 6 +++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index dcbe7df2f890..91303006faaf 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2675,11 +2675,12 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync, } static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, - unsigned int pte_access, bool write_fault, int level, + unsigned int pte_access, bool write_fault, gfn_t gfn, kvm_pfn_t pfn, bool speculative, bool host_writable) { struct kvm_mmu_page *sp = sptep_to_sp(sptep); + int level = sp->role.level; int was_rmapped = 0; int ret = RET_PF_FIXED; bool flush = false; @@ -2777,7 +2778,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, return -1; for (i = 0; i < ret; i++, gfn++, start++) { - mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn, + mmu_set_spte(vcpu, start, access, false, gfn, page_to_pfn(pages[i]), true, true); put_page(pages[i]); } @@ -2980,7 +2981,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) return -EFAULT; ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, - fault->write, fault->goal_level, base_gfn, fault->pfn, + fault->write, base_gfn, fault->pfn, fault->prefault, fault->map_writable); if (ret == RET_PF_SPURIOUS) return ret; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 500962dceda0..7f2c6eeed04f 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -582,7 +582,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, * we call mmu_set_spte() with host_writable = true because * pte_prefetch_gfn_to_pfn always gets a writable pfn. */ - mmu_set_spte(vcpu, spte, pte_access, false, PG_LEVEL_4K, gfn, pfn, + mmu_set_spte(vcpu, spte, pte_access, false, gfn, pfn, true, true); kvm_release_pfn_clean(pfn); @@ -764,8 +764,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, return -EFAULT; ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, fault->write, - fault->goal_level, base_gfn, fault->pfn, - fault->prefault, fault->map_writable); + base_gfn, fault->pfn, fault->prefault, + fault->map_writable); if (ret == RET_PF_SPURIOUS) return ret; -- cgit v1.2.3 From 87e888eafd5b2c7c48245b2a272f9d54264b1526 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 17 Aug 2021 07:34:04 -0400 Subject: KVM: MMU: set ad_disabled in TDP MMU role Prepare for removing the ad_disabled argument of make_spte; instead it can be found in the role of a struct kvm_mmu_page. First of all, the TDP MMU must set the role accurately. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 6de2c957edd6..1cdb5618bb76 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -167,6 +167,7 @@ static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu, role.direct = true; role.gpte_is_8_bytes = true; role.access = ACC_ALL; + role.ad_disabled = !shadow_accessed_mask; return role; } -- cgit v1.2.3 From 7158bee4b47519430f3ccad7cffea578533f364e Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 17 Aug 2021 07:43:19 -0400 Subject: KVM: MMU: pass kvm_mmu_page struct to make_spte The level and A/D bit support of the new SPTE can be found in the role, which is stored in the kvm_mmu_page struct. This merges two arguments into one. For the TDP MMU, the kvm_mmu_page was not used (kvm_tdp_mmu_map does not use it if the SPTE is already present) so we fetch it just before calling make_spte. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 4 ++-- arch/x86/kvm/mmu/paging_tmpl.h | 4 ++-- arch/x86/kvm/mmu/spte.c | 11 ++++++----- arch/x86/kvm/mmu/spte.h | 8 ++++---- arch/x86/kvm/mmu/tdp_mmu.c | 7 ++++--- 5 files changed, 18 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 91303006faaf..c208f001c302 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2716,8 +2716,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, was_rmapped = 1; } - wrprot = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, - true, host_writable, sp_ad_disabled(sp), &spte); + wrprot = make_spte(vcpu, sp, pte_access, gfn, pfn, *sptep, speculative, + true, host_writable, &spte); if (*sptep == spte) { ret = RET_PF_SPURIOUS; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 7f2c6eeed04f..fbbaa3f5fb4e 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -1128,9 +1128,9 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) sptep = &sp->spt[i]; spte = *sptep; host_writable = spte & shadow_host_writable_mask; - make_spte(vcpu, pte_access, PG_LEVEL_4K, gfn, + make_spte(vcpu, sp, pte_access, gfn, spte_to_pfn(spte), spte, true, false, - host_writable, sp_ad_disabled(sp), &spte); + host_writable, &spte); flush |= mmu_spte_update(sptep, spte); } diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 29ea996201b4..2c5c14fbfbe9 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -89,15 +89,16 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) E820_TYPE_RAM); } -bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, - gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, - bool can_unsync, bool host_writable, bool ad_disabled, - u64 *new_spte) +bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, + u64 old_spte, bool speculative, bool can_unsync, + bool host_writable, u64 *new_spte) { + int level = sp->role.level; u64 spte = SPTE_MMU_PRESENT_MASK; bool wrprot = false; - if (ad_disabled) + if (sp->role.ad_disabled) spte |= SPTE_TDP_AD_DISABLED_MASK; else if (kvm_vcpu_ad_need_write_protect(vcpu)) spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK; diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 1998ec559196..cbb02a961ac2 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -334,10 +334,10 @@ static inline u64 get_mmio_spte_generation(u64 spte) return gen; } -bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, - gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, - bool can_unsync, bool host_writable, bool ad_disabled, - u64 *new_spte); +bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, + u64 old_spte, bool speculative, bool can_unsync, + bool host_writable, u64 *new_spte); u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled); u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access); u64 mark_spte_for_access_track(u64 spte); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 1cdb5618bb76..6dbf28924bc2 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -897,17 +897,18 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, struct tdp_iter *iter) { + struct kvm_mmu_page *sp = sptep_to_sp(iter->sptep); u64 new_spte; int ret = RET_PF_FIXED; bool wrprot = false; + WARN_ON(sp->role.level != fault->goal_level); if (unlikely(!fault->slot)) new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); else - wrprot = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, + wrprot = make_spte(vcpu, sp, ACC_ALL, iter->gfn, fault->pfn, iter->old_spte, fault->prefault, true, - fault->map_writable, !shadow_accessed_mask, - &new_spte); + fault->map_writable, &new_spte); if (new_spte == iter->old_spte) ret = RET_PF_SPURIOUS; -- cgit v1.2.3 From a12f43818b3f8f2d85b961493ff134c19ffcd05b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 17 Aug 2021 07:49:47 -0400 Subject: KVM: MMU: pass struct kvm_page_fault to mmu_set_spte mmu_set_spte is called for either PTE prefetching or page faults. The three boolean arguments write_fault, speculative and host_writable are always respectively false/true/true for prefetching and coming from a struct kvm_page_fault for page faults. Let mmu_set_spte distinguish these two situation by accepting a possibly NULL struct kvm_page_fault argument. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 17 ++++++++++------- arch/x86/kvm/mmu/paging_tmpl.h | 13 +++---------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c208f001c302..4b304f60cf44 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2675,9 +2675,8 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync, } static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, - unsigned int pte_access, bool write_fault, - gfn_t gfn, kvm_pfn_t pfn, bool speculative, - bool host_writable) + unsigned int pte_access, gfn_t gfn, + kvm_pfn_t pfn, struct kvm_page_fault *fault) { struct kvm_mmu_page *sp = sptep_to_sp(sptep); int level = sp->role.level; @@ -2687,6 +2686,11 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, bool wrprot; u64 spte; + /* Prefetching always gets a writable pfn. */ + bool host_writable = !fault || fault->map_writable; + bool speculative = !fault || fault->prefault; + bool write_fault = fault && fault->write; + pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, *sptep, write_fault, gfn); @@ -2778,8 +2782,8 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, return -1; for (i = 0; i < ret; i++, gfn++, start++) { - mmu_set_spte(vcpu, start, access, false, gfn, - page_to_pfn(pages[i]), true, true); + mmu_set_spte(vcpu, start, access, gfn, + page_to_pfn(pages[i]), NULL); put_page(pages[i]); } @@ -2981,8 +2985,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) return -EFAULT; ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, - fault->write, base_gfn, fault->pfn, - fault->prefault, fault->map_writable); + base_gfn, fault->pfn, fault); if (ret == RET_PF_SPURIOUS) return ret; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index fbbaa3f5fb4e..8c07c42a4d73 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -578,13 +578,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, if (is_error_pfn(pfn)) return false; - /* - * we call mmu_set_spte() with host_writable = true because - * pte_prefetch_gfn_to_pfn always gets a writable pfn. - */ - mmu_set_spte(vcpu, spte, pte_access, false, gfn, pfn, - true, true); - + mmu_set_spte(vcpu, spte, pte_access, gfn, pfn, NULL); kvm_release_pfn_clean(pfn); return true; } @@ -763,9 +757,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, if (WARN_ON_ONCE(it.level != fault->goal_level)) return -EFAULT; - ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, fault->write, - base_gfn, fault->pfn, fault->prefault, - fault->map_writable); + ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, + base_gfn, fault->pfn, fault); if (ret == RET_PF_SPURIOUS) return ret; -- cgit v1.2.3 From 8a9f566ae4a4156343afb5cbfa79401c07647b1d Mon Sep 17 00:00:00 2001 From: David Matlack Date: Fri, 13 Aug 2021 20:35:03 +0000 Subject: KVM: x86/mmu: Avoid memslot lookup in rmap_add Avoid the memslot lookup in rmap_add, by passing it down from the fault handling code to mmu_set_spte and then to rmap_add. No functional change intended. Signed-off-by: David Matlack Message-Id: <20210813203504.2742757-6-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 27 +++++++-------------------- arch/x86/kvm/mmu/paging_tmpl.h | 12 +++++++++--- 2 files changed, 16 insertions(+), 23 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 4b304f60cf44..7ff2c6c896a8 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1625,16 +1625,15 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, #define RMAP_RECYCLE_THRESHOLD 1000 -static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) +static void rmap_add(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, + u64 *spte, gfn_t gfn) { - struct kvm_memory_slot *slot; struct kvm_mmu_page *sp; struct kvm_rmap_head *rmap_head; int rmap_count; sp = sptep_to_sp(spte); kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); - slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); rmap_count = pte_list_add(vcpu, spte, rmap_head); @@ -2674,8 +2673,8 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync, return 0; } -static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, - unsigned int pte_access, gfn_t gfn, +static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, + u64 *sptep, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, struct kvm_page_fault *fault) { struct kvm_mmu_page *sp = sptep_to_sp(sptep); @@ -2744,24 +2743,12 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (!was_rmapped) { WARN_ON_ONCE(ret == RET_PF_SPURIOUS); kvm_update_page_stats(vcpu->kvm, level, 1); - rmap_add(vcpu, sptep, gfn); + rmap_add(vcpu, slot, sptep, gfn); } return ret; } -static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, - bool no_dirty_log) -{ - struct kvm_memory_slot *slot; - - slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); - if (!slot) - return KVM_PFN_ERR_FAULT; - - return gfn_to_pfn_memslot_atomic(slot, gfn); -} - static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *start, u64 *end) @@ -2782,7 +2769,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, return -1; for (i = 0; i < ret; i++, gfn++, start++) { - mmu_set_spte(vcpu, start, access, gfn, + mmu_set_spte(vcpu, slot, start, access, gfn, page_to_pfn(pages[i]), NULL); put_page(pages[i]); } @@ -2984,7 +2971,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) if (WARN_ON_ONCE(it.level != fault->goal_level)) return -EFAULT; - ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, + ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL, base_gfn, fault->pfn, fault); if (ret == RET_PF_SPURIOUS) return ret; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 8c07c42a4d73..44361f7e70c8 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -561,6 +561,7 @@ static bool FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, pt_element_t gpte, bool no_dirty_log) { + struct kvm_memory_slot *slot; unsigned pte_access; gfn_t gfn; kvm_pfn_t pfn; @@ -573,12 +574,17 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, gfn = gpte_to_gfn(gpte); pte_access = sp->role.access & FNAME(gpte_access)(gpte); FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); - pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, + + slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log && (pte_access & ACC_WRITE_MASK)); + if (!slot) + return false; + + pfn = gfn_to_pfn_memslot_atomic(slot, gfn); if (is_error_pfn(pfn)) return false; - mmu_set_spte(vcpu, spte, pte_access, gfn, pfn, NULL); + mmu_set_spte(vcpu, slot, spte, pte_access, gfn, pfn, NULL); kvm_release_pfn_clean(pfn); return true; } @@ -757,7 +763,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, if (WARN_ON_ONCE(it.level != fault->goal_level)) return -EFAULT; - ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, + ret = mmu_set_spte(vcpu, fault->slot, it.sptep, gw->pte_access, base_gfn, fault->pfn, fault); if (ret == RET_PF_SPURIOUS) return ret; -- cgit v1.2.3 From 53597858dbf8daab8db99c7e448558fb0f970dbd Mon Sep 17 00:00:00 2001 From: David Matlack Date: Tue, 17 Aug 2021 08:46:45 -0400 Subject: KVM: x86/mmu: Avoid memslot lookup in make_spte and mmu_try_to_unsync_pages mmu_try_to_unsync_pages checks if page tracking is active for the given gfn, which requires knowing the memslot. We can pass down the memslot via make_spte to avoid this lookup. The memslot is also handy for make_spte's marking of the gfn as dirty: we can test whether dirty page tracking is enabled, and if so ensure that pages are mapped as writable with 4K granularity. Apart from the warning, no functional change is intended. Signed-off-by: David Matlack Message-Id: <20210813203504.2742757-7-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 2 -- arch/x86/kvm/mmu/mmu.c | 8 ++++---- arch/x86/kvm/mmu/mmu_internal.h | 4 ++-- arch/x86/kvm/mmu/page_track.c | 14 +++----------- arch/x86/kvm/mmu/paging_tmpl.h | 4 +++- arch/x86/kvm/mmu/spte.c | 10 +++++++--- arch/x86/kvm/mmu/spte.h | 1 + arch/x86/kvm/mmu/tdp_mmu.c | 2 +- 8 files changed, 21 insertions(+), 24 deletions(-) diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 9cd9230e5cc8..5c12f97ce934 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -59,8 +59,6 @@ void kvm_slot_page_track_add_page(struct kvm *kvm, void kvm_slot_page_track_remove_page(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode); -bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, - enum kvm_page_track_mode mode); bool kvm_slot_page_track_is_active(struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7ff2c6c896a8..91292009780a 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2572,8 +2572,8 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must * be write-protected. */ -int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync, - bool speculative) +int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, + gfn_t gfn, bool can_unsync, bool speculative) { struct kvm_mmu_page *sp; bool locked = false; @@ -2583,7 +2583,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync, * track machinery is used to write-protect upper-level shadow pages, * i.e. this guards the role.level == 4K assertion below! */ - if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) + if (kvm_slot_page_track_is_active(slot, gfn, KVM_PAGE_TRACK_WRITE)) return -EPERM; /* @@ -2719,7 +2719,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, was_rmapped = 1; } - wrprot = make_spte(vcpu, sp, pte_access, gfn, pfn, *sptep, speculative, + wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, speculative, true, host_writable, &spte); if (*sptep == spte) { diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 94f4e754facb..585146a712d2 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -118,8 +118,8 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) kvm_x86_ops.cpu_dirty_log_size; } -int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync, - bool speculative); +int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, + gfn_t gfn, bool can_unsync, bool speculative); void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 859800f7bb95..16e7176c97a5 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -136,6 +136,9 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm, } EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page); +/* + * check if the corresponding access on the specified guest page is tracked. + */ bool kvm_slot_page_track_is_active(struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode) { @@ -151,17 +154,6 @@ bool kvm_slot_page_track_is_active(struct kvm_memory_slot *slot, gfn_t gfn, return !!READ_ONCE(slot->arch.gfn_track[mode][index]); } -/* - * check if the corresponding access on the specified guest page is tracked. - */ -bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, - enum kvm_page_track_mode mode) -{ - struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - - return kvm_slot_page_track_is_active(slot, gfn, mode); -} - void kvm_page_track_cleanup(struct kvm *kvm) { struct kvm_page_track_notifier_head *head; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 44361f7e70c8..d8889e02c4b7 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -1091,6 +1091,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) for (i = 0; i < PT64_ENT_PER_PAGE; i++) { u64 *sptep, spte; + struct kvm_memory_slot *slot; unsigned pte_access; pt_element_t gpte; gpa_t pte_gpa; @@ -1127,7 +1128,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) sptep = &sp->spt[i]; spte = *sptep; host_writable = spte & shadow_host_writable_mask; - make_spte(vcpu, sp, pte_access, gfn, + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + make_spte(vcpu, sp, slot, pte_access, gfn, spte_to_pfn(spte), spte, true, false, host_writable, &spte); diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 2c5c14fbfbe9..871f6114b0fa 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -90,6 +90,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) } bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, bool can_unsync, bool host_writable, u64 *new_spte) @@ -160,7 +161,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, * e.g. it's write-tracked (upper-level SPs) or has one or more * shadow pages and unsync'ing pages is not allowed. */ - if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync, speculative)) { + if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, speculative)) { pgprintk("%s: found shadow page for %llx, marking ro\n", __func__, gfn); wrprot = true; @@ -180,8 +181,11 @@ out: "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level, get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level)); - if (spte & PT_WRITABLE_MASK) - kvm_vcpu_mark_page_dirty(vcpu, gfn); + if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) { + /* Enforced by kvm_mmu_hugepage_adjust. */ + WARN_ON(level > PG_LEVEL_4K); + mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); + } *new_spte = spte; return wrprot; diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index cbb02a961ac2..7c0b09461349 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -335,6 +335,7 @@ static inline u64 get_mmio_spte_generation(u64 spte) } bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, bool can_unsync, bool host_writable, u64 *new_spte); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 6dbf28924bc2..953f24ded6bc 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -906,7 +906,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, if (unlikely(!fault->slot)) new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); else - wrprot = make_spte(vcpu, sp, ACC_ALL, iter->gfn, + wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn, fault->pfn, iter->old_spte, fault->prefault, true, fault->map_writable, &new_spte); -- cgit v1.2.3 From b73a54321ad82e7c8401643041dd11e43cd0a5dd Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Thu, 23 Sep 2021 20:15:28 -0500 Subject: KVM: x86: Expose Predictive Store Forwarding Disable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Predictive Store Forwarding: AMD Zen3 processors feature a new technology called Predictive Store Forwarding (PSF). PSF is a hardware-based micro-architectural optimization designed to improve the performance of code execution by predicting address dependencies between loads and stores. How PSF works: It is very common for a CPU to execute a load instruction to an address that was recently written by a store. Modern CPUs implement a technique known as Store-To-Load-Forwarding (STLF) to improve performance in such cases. With STLF, data from the store is forwarded directly to the load without having to wait for it to be written to memory. In a typical CPU, STLF occurs after the address of both the load and store are calculated and determined to match. PSF expands on this by speculating on the relationship between loads and stores without waiting for the address calculation to complete. With PSF, the CPU learns over time the relationship between loads and stores. If STLF typically occurs between a particular store and load, the CPU will remember this. In typical code, PSF provides a performance benefit by speculating on the load result and allowing later instructions to begin execution sooner than they otherwise would be able to. The details of security analysis of AMD predictive store forwarding is documented here. https://www.amd.com/system/files/documents/security-analysis-predictive-store-forwarding.pdf Predictive Store Forwarding controls: There are two hardware control bits which influence the PSF feature: - MSR 48h bit 2 – Speculative Store Bypass (SSBD) - MSR 48h bit 7 – Predictive Store Forwarding Disable (PSFD) The PSF feature is disabled if either of these bits are set. These bits are controllable on a per-thread basis in an SMT system. By default, both SSBD and PSFD are 0 meaning that the speculation features are enabled. While the SSBD bit disables PSF and speculative store bypass, PSFD only disables PSF. PSFD may be desirable for software which is concerned with the speculative behavior of PSF but desires a smaller performance impact than setting SSBD. Support for PSFD is indicated in CPUID Fn8000_0008 EBX[28]. All processors that support PSF will also support PSFD. Linux kernel does not have the interface to enable/disable PSFD yet. Plan here is to expose the PSFD technology to KVM so that the guest kernel can make use of it if they wish to. Signed-off-by: Babu Moger Message-Id: <163244601049.30292.5855870305350227855.stgit@bmoger-ubuntu> [Keep feature private to KVM, as requested by Borislav Petkov. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 751aa85a3001..2d70edb0f323 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -53,9 +53,16 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted) return ret; } +/* + * This one is tied to SSB in the user API, and not + * visible in /proc/cpuinfo. + */ +#define KVM_X86_FEATURE_PSFD (13*32+28) /* Predictive Store Forwarding Disable */ + #define F feature_bit #define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0) + static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( struct kvm_cpuid_entry2 *entries, int nent, u32 function, u32 index) { @@ -500,7 +507,8 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_mask(CPUID_8000_0008_EBX, F(CLZERO) | F(XSAVEERPTR) | F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | - F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) + F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) | + __feature_bit(KVM_X86_FEATURE_PSFD) ); /* -- cgit v1.2.3 From 78b497f2e62d8c7514de5f83c80837bbb120e93e Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Fri, 3 Sep 2021 15:08:05 +0200 Subject: kvm: use kvfree() in kvm_arch_free_vm() By switching from kfree() to kvfree() in kvm_arch_free_vm() Arm64 can use the common variant. This can be accomplished by adding another macro __KVM_HAVE_ARCH_VM_FREE, which will be used only by x86 for now. Further simplification can be achieved by adding __kvm_arch_free_vm() doing the common part. Suggested-by: Paolo Bonzini Signed-off-by: Juergen Gross Message-Id: <20210903130808.30142-5-jgross@suse.com> Signed-off-by: Paolo Bonzini --- arch/arm64/include/asm/kvm_host.h | 1 - arch/arm64/kvm/arm.c | 8 -------- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/x86.c | 2 +- include/linux/kvm_host.h | 9 ++++++++- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f8be56d5342b..369c30e28301 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -771,7 +771,6 @@ int kvm_set_ipa_limit(void); #define __KVM_HAVE_ARCH_VM_ALLOC struct kvm *kvm_arch_alloc_vm(void); -void kvm_arch_free_vm(struct kvm *kvm); int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index fe102cd2e518..7838e9fb693e 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -297,14 +297,6 @@ struct kvm *kvm_arch_alloc_vm(void) return vzalloc(sizeof(struct kvm)); } -void kvm_arch_free_vm(struct kvm *kvm) -{ - if (!has_vhe()) - kfree(kvm); - else - vfree(kvm); -} - int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) { if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8470d4673068..1b280292bdff 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1540,6 +1540,8 @@ static inline struct kvm *kvm_arch_alloc_vm(void) { return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); } + +#define __KVM_HAVE_ARCH_VM_FREE void kvm_arch_free_vm(struct kvm *kvm); #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 79535fe83a04..03091a2e0822 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11130,7 +11130,7 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_free_vm(struct kvm *kvm) { kfree(to_kvm_hv(kvm)->hv_pa_pg); - vfree(kvm); + __kvm_arch_free_vm(kvm); } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3f87d6ad20bf..60a35d9fe259 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1081,10 +1081,17 @@ static inline struct kvm *kvm_arch_alloc_vm(void) { return kzalloc(sizeof(struct kvm), GFP_KERNEL); } +#endif + +static inline void __kvm_arch_free_vm(struct kvm *kvm) +{ + kvfree(kvm); +} +#ifndef __KVM_HAVE_ARCH_VM_FREE static inline void kvm_arch_free_vm(struct kvm *kvm) { - kfree(kvm); + __kvm_arch_free_vm(kvm); } #endif -- cgit v1.2.3 From 174a921b6975ef959dd82ee9e8844067a62e3ec1 Mon Sep 17 00:00:00 2001 From: Krish Sadhukhan Date: Mon, 20 Sep 2021 19:51:31 -0400 Subject: nSVM: Check for reserved encodings of TLB_CONTROL in nested VMCB According to section "TLB Flush" in APM vol 2, "Support for TLB_CONTROL commands other than the first two, is optional and is indicated by CPUID Fn8000_000A_EDX[FlushByAsid]. All encodings of TLB_CONTROL not defined in the APM are reserved." Signed-off-by: Krish Sadhukhan Message-Id: <20210920235134.101970-3-krish.sadhukhan@oracle.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 31fd4bd334c2..f8b7bc04b3e7 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -238,6 +238,18 @@ static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size) kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1); } +static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl) +{ + /* Nested FLUSHBYASID is not supported yet. */ + switch(tlb_ctl) { + case TLB_CONTROL_DO_NOTHING: + case TLB_CONTROL_FLUSH_ALL_ASID: + return true; + default: + return false; + } +} + static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, struct vmcb_control_area *control) { @@ -257,6 +269,9 @@ static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, IOPM_SIZE))) return false; + if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl))) + return false; + return true; } -- cgit v1.2.3 From e9d0c0c4f7ea47d25490cd466cefe6d5c1a4d0ec Mon Sep 17 00:00:00 2001 From: David Stevens Date: Wed, 22 Sep 2021 13:58:58 +0900 Subject: KVM: x86: add config for non-kvm users of page tracking Add a config option that allows kvm to determine whether or not there are any external users of page tracking. Signed-off-by: David Stevens Message-Id: <20210922045859.2011227-2-stevensd@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/Kconfig | 3 +++ drivers/gpu/drm/i915/Kconfig | 1 + 2 files changed, 4 insertions(+) diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index ac69894eab88..619186138176 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -129,4 +129,7 @@ config KVM_MMU_AUDIT This option adds a R/W kVM module parameter 'mmu_audit', which allows auditing of KVM MMU events at runtime. +config KVM_EXTERNAL_WRITE_TRACKING + bool + endif # VIRTUALIZATION diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index f960f5d7664e..107762427648 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -126,6 +126,7 @@ config DRM_I915_GVT_KVMGT depends on DRM_I915_GVT depends on KVM depends on VFIO_MDEV + select KVM_EXTERNAL_WRITE_TRACKING default n help Choose this option if you want to enable KVMGT support for -- cgit v1.2.3 From deae4a10f16649d9c8bfb89f38b61930fb938284 Mon Sep 17 00:00:00 2001 From: David Stevens Date: Wed, 22 Sep 2021 13:58:59 +0900 Subject: KVM: x86: only allocate gfn_track when necessary Avoid allocating the gfn_track arrays if nothing needs them. If there are no external to KVM users of the API (i.e. no GVT-g), then page tracking is only needed for shadow page tables. This means that when tdp is enabled and there are no external users, then the gfn_track arrays can be lazily allocated when the shadow MMU is actually used. This avoid allocations equal to .05% of guest memory when nested virtualization is not used, if the kernel is compiled without GVT-g. Signed-off-by: David Stevens Message-Id: <20210922045859.2011227-3-stevensd@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 8 ++++ arch/x86/include/asm/kvm_page_track.h | 8 +++- arch/x86/kvm/mmu/mmu.c | 11 +++++- arch/x86/kvm/mmu/page_track.c | 72 +++++++++++++++++++++++++++++++++-- arch/x86/kvm/x86.c | 2 +- 5 files changed, 93 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 1b280292bdff..5271fce6cd65 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1211,6 +1211,14 @@ struct kvm_arch { */ bool memslots_have_rmaps; + /* + * Set when the KVM mmu needs guest write access page tracking. If + * set, the necessary gfn_track arrays have been allocated for + * all memslots and should be allocated for any newly created or + * modified memslots. + */ + bool memslots_mmu_write_tracking; + #if IS_ENABLED(CONFIG_HYPERV) hpa_t hv_root_tdp; spinlock_t hv_root_tdp_lock; diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 5c12f97ce934..79d84a94f8eb 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -49,8 +49,11 @@ struct kvm_page_track_notifier_node { int kvm_page_track_init(struct kvm *kvm); void kvm_page_track_cleanup(struct kvm *kvm); +int kvm_page_track_enable_mmu_write_tracking(struct kvm *kvm); + void kvm_page_track_free_memslot(struct kvm_memory_slot *slot); -int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, +int kvm_page_track_create_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot, unsigned long npages); void kvm_slot_page_track_add_page(struct kvm *kvm, @@ -59,7 +62,8 @@ void kvm_slot_page_track_add_page(struct kvm *kvm, void kvm_slot_page_track_remove_page(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode); -bool kvm_slot_page_track_is_active(struct kvm_memory_slot *slot, gfn_t gfn, +bool kvm_slot_page_track_is_active(struct kvm_vcpu *vcpu, + struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode); void diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 91292009780a..24a9f4c3f5e7 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2583,7 +2583,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, * track machinery is used to write-protect upper-level shadow pages, * i.e. this guards the role.level == 4K assertion below! */ - if (kvm_slot_page_track_is_active(slot, gfn, KVM_PAGE_TRACK_WRITE)) + if (kvm_slot_page_track_is_active(vcpu, slot, gfn, KVM_PAGE_TRACK_WRITE)) return -EPERM; /* @@ -3431,6 +3431,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) if (r) return r; + r = kvm_page_track_enable_mmu_write_tracking(vcpu->kvm); + if (r) + return r; + write_lock(&vcpu->kvm->mmu_lock); r = make_mmu_pages_available(vcpu); if (r < 0) @@ -3790,7 +3794,7 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, * guest is writing the page which is write tracked which can * not be fixed by page fault handler. */ - if (kvm_slot_page_track_is_active(fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE)) + if (kvm_slot_page_track_is_active(vcpu, fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE)) return true; return false; @@ -5607,6 +5611,9 @@ void kvm_mmu_init_vm(struct kvm *kvm) */ kvm->arch.memslots_have_rmaps = true; + if (!tdp_enabled) + kvm->arch.memslots_mmu_write_tracking = true; + node->track_write = kvm_mmu_pte_write; node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; kvm_page_track_register_notifier(kvm, node); diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 16e7176c97a5..bb5d60bd4dbf 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -19,6 +19,16 @@ #include "mmu.h" #include "mmu_internal.h" +static bool write_tracking_enabled(struct kvm *kvm) +{ + /* + * Read memslots_mmu_write_tracking before gfn_track pointers. Pairs + * with smp_store_release in kvm_page_track_enable_mmu_write_tracking. + */ + return IS_ENABLED(CONFIG_KVM_EXTERNAL_WRITE_TRACKING) || + smp_load_acquire(&kvm->arch.memslots_mmu_write_tracking); +} + void kvm_page_track_free_memslot(struct kvm_memory_slot *slot) { int i; @@ -29,12 +39,16 @@ void kvm_page_track_free_memslot(struct kvm_memory_slot *slot) } } -int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, +int kvm_page_track_create_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot, unsigned long npages) { - int i; + int i; for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { + if (i == KVM_PAGE_TRACK_WRITE && !write_tracking_enabled(kvm)) + continue; + slot->arch.gfn_track[i] = kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]), GFP_KERNEL_ACCOUNT); @@ -57,6 +71,46 @@ static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode) return true; } +int kvm_page_track_enable_mmu_write_tracking(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *slot; + unsigned short **gfn_track; + int i; + + if (write_tracking_enabled(kvm)) + return 0; + + mutex_lock(&kvm->slots_arch_lock); + + if (write_tracking_enabled(kvm)) { + mutex_unlock(&kvm->slots_arch_lock); + return 0; + } + + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + kvm_for_each_memslot(slot, slots) { + gfn_track = slot->arch.gfn_track + KVM_PAGE_TRACK_WRITE; + *gfn_track = kvcalloc(slot->npages, sizeof(*gfn_track), + GFP_KERNEL_ACCOUNT); + if (*gfn_track == NULL) { + mutex_unlock(&kvm->slots_arch_lock); + return -ENOMEM; + } + } + } + + /* + * Ensure that memslots_mmu_write_tracking becomes true strictly + * after all the pointers are set. + */ + smp_store_release(&kvm->arch.memslots_mmu_write_tracking, true); + mutex_unlock(&kvm->slots_arch_lock); + + return 0; +} + static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode, short count) { @@ -92,6 +146,10 @@ void kvm_slot_page_track_add_page(struct kvm *kvm, if (WARN_ON(!page_track_mode_is_valid(mode))) return; + if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE && + !write_tracking_enabled(kvm))) + return; + update_gfn_track(slot, gfn, mode, 1); /* @@ -126,6 +184,10 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm, if (WARN_ON(!page_track_mode_is_valid(mode))) return; + if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE && + !write_tracking_enabled(kvm))) + return; + update_gfn_track(slot, gfn, mode, -1); /* @@ -139,7 +201,8 @@ EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page); /* * check if the corresponding access on the specified guest page is tracked. */ -bool kvm_slot_page_track_is_active(struct kvm_memory_slot *slot, gfn_t gfn, +bool kvm_slot_page_track_is_active(struct kvm_vcpu *vcpu, + struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode) { int index; @@ -150,6 +213,9 @@ bool kvm_slot_page_track_is_active(struct kvm_memory_slot *slot, gfn_t gfn, if (!slot) return false; + if (mode == KVM_PAGE_TRACK_WRITE && !write_tracking_enabled(vcpu->kvm)) + return false; + index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); return !!READ_ONCE(slot->arch.gfn_track[mode][index]); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 03091a2e0822..db7fa1398f0d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11476,7 +11476,7 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm, } } - if (kvm_page_track_create_memslot(slot, npages)) + if (kvm_page_track_create_memslot(kvm, slot, npages)) goto out_free; return 0; -- cgit v1.2.3 From 3f2401f47d29d669e2cb137709d10dd4c156a02f Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 27 Sep 2021 17:10:00 +0530 Subject: RISC-V: Add hypervisor extension related CSR defines This patch adds asm/kvm_csr.h for RISC-V hypervisor extension related defines. Signed-off-by: Anup Patel Reviewed-by: Paolo Bonzini Reviewed-by: Alexander Graf Message-Id: <20210927114016.1089328-2-anup.patel@wdc.com> Acked-by: Palmer Dabbelt Signed-off-by: Paolo Bonzini --- arch/riscv/include/asm/csr.h | 87 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 87ac65696871..5046f431645c 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -58,22 +58,32 @@ /* Interrupt causes (minus the high bit) */ #define IRQ_S_SOFT 1 +#define IRQ_VS_SOFT 2 #define IRQ_M_SOFT 3 #define IRQ_S_TIMER 5 +#define IRQ_VS_TIMER 6 #define IRQ_M_TIMER 7 #define IRQ_S_EXT 9 +#define IRQ_VS_EXT 10 #define IRQ_M_EXT 11 /* Exception causes */ #define EXC_INST_MISALIGNED 0 #define EXC_INST_ACCESS 1 +#define EXC_INST_ILLEGAL 2 #define EXC_BREAKPOINT 3 #define EXC_LOAD_ACCESS 5 #define EXC_STORE_ACCESS 7 #define EXC_SYSCALL 8 +#define EXC_HYPERVISOR_SYSCALL 9 +#define EXC_SUPERVISOR_SYSCALL 10 #define EXC_INST_PAGE_FAULT 12 #define EXC_LOAD_PAGE_FAULT 13 #define EXC_STORE_PAGE_FAULT 15 +#define EXC_INST_GUEST_PAGE_FAULT 20 +#define EXC_LOAD_GUEST_PAGE_FAULT 21 +#define EXC_VIRTUAL_INST_FAULT 22 +#define EXC_STORE_GUEST_PAGE_FAULT 23 /* PMP configuration */ #define PMP_R 0x01 @@ -85,6 +95,58 @@ #define PMP_A_NAPOT 0x18 #define PMP_L 0x80 +/* HSTATUS flags */ +#ifdef CONFIG_64BIT +#define HSTATUS_VSXL _AC(0x300000000, UL) +#define HSTATUS_VSXL_SHIFT 32 +#endif +#define HSTATUS_VTSR _AC(0x00400000, UL) +#define HSTATUS_VTW _AC(0x00200000, UL) +#define HSTATUS_VTVM _AC(0x00100000, UL) +#define HSTATUS_VGEIN _AC(0x0003f000, UL) +#define HSTATUS_VGEIN_SHIFT 12 +#define HSTATUS_HU _AC(0x00000200, UL) +#define HSTATUS_SPVP _AC(0x00000100, UL) +#define HSTATUS_SPV _AC(0x00000080, UL) +#define HSTATUS_GVA _AC(0x00000040, UL) +#define HSTATUS_VSBE _AC(0x00000020, UL) + +/* HGATP flags */ +#define HGATP_MODE_OFF _AC(0, UL) +#define HGATP_MODE_SV32X4 _AC(1, UL) +#define HGATP_MODE_SV39X4 _AC(8, UL) +#define HGATP_MODE_SV48X4 _AC(9, UL) + +#define HGATP32_MODE_SHIFT 31 +#define HGATP32_VMID_SHIFT 22 +#define HGATP32_VMID_MASK _AC(0x1FC00000, UL) +#define HGATP32_PPN _AC(0x003FFFFF, UL) + +#define HGATP64_MODE_SHIFT 60 +#define HGATP64_VMID_SHIFT 44 +#define HGATP64_VMID_MASK _AC(0x03FFF00000000000, UL) +#define HGATP64_PPN _AC(0x00000FFFFFFFFFFF, UL) + +#define HGATP_PAGE_SHIFT 12 + +#ifdef CONFIG_64BIT +#define HGATP_PPN HGATP64_PPN +#define HGATP_VMID_SHIFT HGATP64_VMID_SHIFT +#define HGATP_VMID_MASK HGATP64_VMID_MASK +#define HGATP_MODE_SHIFT HGATP64_MODE_SHIFT +#else +#define HGATP_PPN HGATP32_PPN +#define HGATP_VMID_SHIFT HGATP32_VMID_SHIFT +#define HGATP_VMID_MASK HGATP32_VMID_MASK +#define HGATP_MODE_SHIFT HGATP32_MODE_SHIFT +#endif + +/* VSIP & HVIP relation */ +#define VSIP_TO_HVIP_SHIFT (IRQ_VS_SOFT - IRQ_S_SOFT) +#define VSIP_VALID_MASK ((_AC(1, UL) << IRQ_S_SOFT) | \ + (_AC(1, UL) << IRQ_S_TIMER) | \ + (_AC(1, UL) << IRQ_S_EXT)) + /* symbolic CSR names: */ #define CSR_CYCLE 0xc00 #define CSR_TIME 0xc01 @@ -104,6 +166,31 @@ #define CSR_SIP 0x144 #define CSR_SATP 0x180 +#define CSR_VSSTATUS 0x200 +#define CSR_VSIE 0x204 +#define CSR_VSTVEC 0x205 +#define CSR_VSSCRATCH 0x240 +#define CSR_VSEPC 0x241 +#define CSR_VSCAUSE 0x242 +#define CSR_VSTVAL 0x243 +#define CSR_VSIP 0x244 +#define CSR_VSATP 0x280 + +#define CSR_HSTATUS 0x600 +#define CSR_HEDELEG 0x602 +#define CSR_HIDELEG 0x603 +#define CSR_HIE 0x604 +#define CSR_HTIMEDELTA 0x605 +#define CSR_HCOUNTEREN 0x606 +#define CSR_HGEIE 0x607 +#define CSR_HTIMEDELTAH 0x615 +#define CSR_HTVAL 0x643 +#define CSR_HIP 0x644 +#define CSR_HVIP 0x645 +#define CSR_HTINST 0x64a +#define CSR_HGATP 0x680 +#define CSR_HGEIP 0xe12 + #define CSR_MSTATUS 0x300 #define CSR_MISA 0x301 #define CSR_MIE 0x304 -- cgit v1.2.3 From 99cdc6c18c2d815e940e81b9b477d469bdd41788 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 27 Sep 2021 17:10:01 +0530 Subject: RISC-V: Add initial skeletal KVM support This patch adds initial skeletal KVM RISC-V support which has: 1. A simple implementation of arch specific VM functions except kvm_vm_ioctl_get_dirty_log() which will implemeted in-future as part of stage2 page loging. 2. Stubs of required arch specific VCPU functions except kvm_arch_vcpu_ioctl_run() which is semi-complete and extended by subsequent patches. 3. Stubs for required arch specific stage2 MMU functions. Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Reviewed-by: Alexander Graf Acked-by: Palmer Dabbelt --- arch/riscv/Kconfig | 1 + arch/riscv/Makefile | 1 + arch/riscv/include/asm/kvm_host.h | 84 ++++++++++ arch/riscv/include/asm/kvm_types.h | 7 + arch/riscv/include/uapi/asm/kvm.h | 47 ++++++ arch/riscv/kvm/Kconfig | 33 ++++ arch/riscv/kvm/Makefile | 13 ++ arch/riscv/kvm/main.c | 95 +++++++++++ arch/riscv/kvm/mmu.c | 80 ++++++++++ arch/riscv/kvm/vcpu.c | 314 +++++++++++++++++++++++++++++++++++++ arch/riscv/kvm/vcpu_exit.c | 35 +++++ arch/riscv/kvm/vm.c | 95 +++++++++++ 12 files changed, 805 insertions(+) create mode 100644 arch/riscv/include/asm/kvm_host.h create mode 100644 arch/riscv/include/asm/kvm_types.h create mode 100644 arch/riscv/include/uapi/asm/kvm.h create mode 100644 arch/riscv/kvm/Kconfig create mode 100644 arch/riscv/kvm/Makefile create mode 100644 arch/riscv/kvm/main.c create mode 100644 arch/riscv/kvm/mmu.c create mode 100644 arch/riscv/kvm/vcpu.c create mode 100644 arch/riscv/kvm/vcpu_exit.c create mode 100644 arch/riscv/kvm/vm.c diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 301a54233c7e..f5fe8a7f0e24 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -562,4 +562,5 @@ source "kernel/power/Kconfig" endmenu +source "arch/riscv/kvm/Kconfig" source "drivers/firmware/Kconfig" diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 0eb4568fbd29..58c1a28e20bb 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -100,6 +100,7 @@ endif head-y := arch/riscv/kernel/head.o core-$(CONFIG_RISCV_ERRATA_ALTERNATIVE) += arch/riscv/errata/ +core-$(CONFIG_KVM) += arch/riscv/kvm/ libs-y += arch/riscv/lib/ libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h new file mode 100644 index 000000000000..08a8f53bf814 --- /dev/null +++ b/arch/riscv/include/asm/kvm_host.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel + */ + +#ifndef __RISCV_KVM_HOST_H__ +#define __RISCV_KVM_HOST_H__ + +#include +#include +#include + +#ifdef CONFIG_64BIT +#define KVM_MAX_VCPUS (1U << 16) +#else +#define KVM_MAX_VCPUS (1U << 9) +#endif + +#define KVM_HALT_POLL_NS_DEFAULT 500000 + +#define KVM_VCPU_MAX_FEATURES 0 + +#define KVM_REQ_SLEEP \ + KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1) + +struct kvm_vm_stat { + struct kvm_vm_stat_generic generic; +}; + +struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; + u64 ecall_exit_stat; + u64 wfi_exit_stat; + u64 mmio_exit_user; + u64 mmio_exit_kernel; + u64 exits; +}; + +struct kvm_arch_memory_slot { +}; + +struct kvm_arch { + /* stage2 page table */ + pgd_t *pgd; + phys_addr_t pgd_phys; +}; + +struct kvm_cpu_trap { + unsigned long sepc; + unsigned long scause; + unsigned long stval; + unsigned long htval; + unsigned long htinst; +}; + +struct kvm_vcpu_arch { + /* Don't run the VCPU (blocked) */ + bool pause; + + /* SRCU lock index for in-kernel run loop */ + int srcu_idx; +}; + +static inline void kvm_arch_hardware_unsetup(void) {} +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} + +void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu); +int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm); +void kvm_riscv_stage2_free_pgd(struct kvm *kvm); +void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu); + +int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_cpu_trap *trap); + +static inline void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch) {} + +#endif /* __RISCV_KVM_HOST_H__ */ diff --git a/arch/riscv/include/asm/kvm_types.h b/arch/riscv/include/asm/kvm_types.h new file mode 100644 index 000000000000..e476b404eb67 --- /dev/null +++ b/arch/riscv/include/asm/kvm_types.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_KVM_TYPES_H +#define _ASM_RISCV_KVM_TYPES_H + +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40 + +#endif /* _ASM_RISCV_KVM_TYPES_H */ diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h new file mode 100644 index 000000000000..984d041a3e3b --- /dev/null +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel + */ + +#ifndef __LINUX_KVM_RISCV_H +#define __LINUX_KVM_RISCV_H + +#ifndef __ASSEMBLY__ + +#include +#include + +#define __KVM_HAVE_READONLY_MEM + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + +/* for KVM_GET_REGS and KVM_SET_REGS */ +struct kvm_regs { +}; + +/* for KVM_GET_FPU and KVM_SET_FPU */ +struct kvm_fpu { +}; + +/* KVM Debug exit structure */ +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +#endif + +#endif /* __LINUX_KVM_RISCV_H */ diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig new file mode 100644 index 000000000000..88edd477b3a8 --- /dev/null +++ b/arch/riscv/kvm/Kconfig @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# KVM configuration +# + +source "virt/kvm/Kconfig" + +menuconfig VIRTUALIZATION + bool "Virtualization" + help + Say Y here to get to see options for using your Linux host to run + other operating systems inside virtual machines (guests). + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and + disabled. + +if VIRTUALIZATION + +config KVM + tristate "Kernel-based Virtual Machine (KVM) support (EXPERIMENTAL)" + depends on RISCV_SBI && MMU + select PREEMPT_NOTIFIERS + select ANON_INODES + select KVM_MMIO + select HAVE_KVM_VCPU_ASYNC_IOCTL + select SRCU + help + Support hosting virtualized guest machines. + + If unsure, say N. + +endif # VIRTUALIZATION diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile new file mode 100644 index 000000000000..4732094391bf --- /dev/null +++ b/arch/riscv/kvm/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for RISC-V KVM support +# + +ccflags-y += -I $(srctree)/$(src) + +KVM := ../../../virt/kvm + +obj-$(CONFIG_KVM) += kvm.o + +kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/binary_stats.o \ + main.o vm.o mmu.o vcpu.o vcpu_exit.o diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c new file mode 100644 index 000000000000..47926f0c175d --- /dev/null +++ b/arch/riscv/kvm/main.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include +#include +#include +#include +#include + +long kvm_arch_dev_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + return -EINVAL; +} + +int kvm_arch_check_processor_compat(void *opaque) +{ + return 0; +} + +int kvm_arch_hardware_setup(void *opaque) +{ + return 0; +} + +int kvm_arch_hardware_enable(void) +{ + unsigned long hideleg, hedeleg; + + hedeleg = 0; + hedeleg |= (1UL << EXC_INST_MISALIGNED); + hedeleg |= (1UL << EXC_BREAKPOINT); + hedeleg |= (1UL << EXC_SYSCALL); + hedeleg |= (1UL << EXC_INST_PAGE_FAULT); + hedeleg |= (1UL << EXC_LOAD_PAGE_FAULT); + hedeleg |= (1UL << EXC_STORE_PAGE_FAULT); + csr_write(CSR_HEDELEG, hedeleg); + + hideleg = 0; + hideleg |= (1UL << IRQ_VS_SOFT); + hideleg |= (1UL << IRQ_VS_TIMER); + hideleg |= (1UL << IRQ_VS_EXT); + csr_write(CSR_HIDELEG, hideleg); + + csr_write(CSR_HCOUNTEREN, -1UL); + + csr_write(CSR_HVIP, 0); + + return 0; +} + +void kvm_arch_hardware_disable(void) +{ + csr_write(CSR_HEDELEG, 0); + csr_write(CSR_HIDELEG, 0); +} + +int kvm_arch_init(void *opaque) +{ + if (!riscv_isa_extension_available(NULL, h)) { + kvm_info("hypervisor extension not available\n"); + return -ENODEV; + } + + if (sbi_spec_is_0_1()) { + kvm_info("require SBI v0.2 or higher\n"); + return -ENODEV; + } + + if (sbi_probe_extension(SBI_EXT_RFENCE) <= 0) { + kvm_info("require SBI RFENCE extension\n"); + return -ENODEV; + } + + kvm_info("hypervisor extension available\n"); + + return 0; +} + +void kvm_arch_exit(void) +{ +} + +static int riscv_kvm_init(void) +{ + return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); +} +module_init(riscv_kvm_init); diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c new file mode 100644 index 000000000000..abfd2b22fa8e --- /dev/null +++ b/arch/riscv/kvm/mmu.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ +} + +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free) +{ +} + +void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) +{ +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ + /* TODO: */ +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + const struct kvm_userspace_memory_region *mem, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + /* TODO: */ +} + +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + const struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change) +{ + /* TODO: */ + return 0; +} + +void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu) +{ + /* TODO: */ +} + +int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm) +{ + /* TODO: */ + return 0; +} + +void kvm_riscv_stage2_free_pgd(struct kvm *kvm) +{ + /* TODO: */ +} + +void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu) +{ + /* TODO: */ +} diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c new file mode 100644 index 000000000000..810b7ef30c0b --- /dev/null +++ b/arch/riscv/kvm/vcpu.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { + KVM_GENERIC_VCPU_STATS(), + STATS_DESC_COUNTER(VCPU, ecall_exit_stat), + STATS_DESC_COUNTER(VCPU, wfi_exit_stat), + STATS_DESC_COUNTER(VCPU, mmio_exit_user), + STATS_DESC_COUNTER(VCPU, mmio_exit_kernel), + STATS_DESC_COUNTER(VCPU, exits) +}; + +const struct kvm_stats_header kvm_vcpu_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vcpu_stats_desc), +}; + +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) +{ + return 0; +} + +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) +{ + /* TODO: */ + return 0; +} + +void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ +} + +int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) +{ + /* TODO: */ + return 0; +} + +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + /* TODO: */ +} + +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) +{ + /* TODO: */ + return 0; +} + +void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) +{ +} + +void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) +{ +} + +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) +{ + /* TODO: */ + return 0; +} + +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) +{ + /* TODO: */ + return 0; +} + +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + /* TODO: */ + return false; +} + +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} + +long kvm_arch_vcpu_async_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + /* TODO; */ + return -ENOIOCTLCMD; +} + +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + /* TODO: */ + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) +{ + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + /* TODO: */ + return 0; +} + +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + /* TODO: */ + return 0; +} + +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) +{ + /* TODO; To be implemented later. */ + return -EINVAL; +} + +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + /* TODO: */ + + kvm_riscv_stage2_update_hgatp(vcpu); +} + +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) +{ + /* TODO: */ +} + +static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) +{ + /* TODO: */ +} + +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +{ + int ret; + struct kvm_cpu_trap trap; + struct kvm_run *run = vcpu->run; + + vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + + /* Process MMIO value returned from user-space */ + if (run->exit_reason == KVM_EXIT_MMIO) { + ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run); + if (ret) { + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); + return ret; + } + } + + if (run->immediate_exit) { + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); + return -EINTR; + } + + vcpu_load(vcpu); + + kvm_sigset_activate(vcpu); + + ret = 1; + run->exit_reason = KVM_EXIT_UNKNOWN; + while (ret > 0) { + /* Check conditions before entering the guest */ + cond_resched(); + + kvm_riscv_check_vcpu_requests(vcpu); + + preempt_disable(); + + local_irq_disable(); + + /* + * Exit if we have a signal pending so that we can deliver + * the signal to user space. + */ + if (signal_pending(current)) { + ret = -EINTR; + run->exit_reason = KVM_EXIT_INTR; + } + + /* + * Ensure we set mode to IN_GUEST_MODE after we disable + * interrupts and before the final VCPU requests check. + * See the comment in kvm_vcpu_exiting_guest_mode() and + * Documentation/virtual/kvm/vcpu-requests.rst + */ + vcpu->mode = IN_GUEST_MODE; + + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); + smp_mb__after_srcu_read_unlock(); + + if (ret <= 0 || + kvm_request_pending(vcpu)) { + vcpu->mode = OUTSIDE_GUEST_MODE; + local_irq_enable(); + preempt_enable(); + vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + continue; + } + + guest_enter_irqoff(); + + __kvm_riscv_switch_to(&vcpu->arch); + + vcpu->mode = OUTSIDE_GUEST_MODE; + vcpu->stat.exits++; + + /* + * Save SCAUSE, STVAL, HTVAL, and HTINST because we might + * get an interrupt between __kvm_riscv_switch_to() and + * local_irq_enable() which can potentially change CSRs. + */ + trap.sepc = 0; + trap.scause = csr_read(CSR_SCAUSE); + trap.stval = csr_read(CSR_STVAL); + trap.htval = csr_read(CSR_HTVAL); + trap.htinst = csr_read(CSR_HTINST); + + /* + * We may have taken a host interrupt in VS/VU-mode (i.e. + * while executing the guest). This interrupt is still + * pending, as we haven't serviced it yet! + * + * We're now back in HS-mode with interrupts disabled + * so enabling the interrupts now will have the effect + * of taking the interrupt again, in HS-mode this time. + */ + local_irq_enable(); + + /* + * We do local_irq_enable() before calling guest_exit() so + * that if a timer interrupt hits while running the guest + * we account that tick as being spent in the guest. We + * enable preemption after calling guest_exit() so that if + * we get preempted we make sure ticks after that is not + * counted as guest time. + */ + guest_exit(); + + preempt_enable(); + + vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + + ret = kvm_riscv_vcpu_exit(vcpu, run, &trap); + } + + kvm_sigset_deactivate(vcpu); + + vcpu_put(vcpu); + + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); + + return ret; +} diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c new file mode 100644 index 000000000000..4484e9200fe4 --- /dev/null +++ b/arch/riscv/kvm/vcpu_exit.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include + +/** + * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation + * or in-kernel IO emulation + * + * @vcpu: The VCPU pointer + * @run: The VCPU run struct containing the mmio data + */ +int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + /* TODO: */ + return 0; +} + +/* + * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on + * proper exit to userspace. + */ +int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_cpu_trap *trap) +{ + /* TODO: */ + return 0; +} diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c new file mode 100644 index 000000000000..22490803d904 --- /dev/null +++ b/arch/riscv/kvm/vm.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include +#include +#include + +const struct _kvm_stats_desc kvm_vm_stats_desc[] = { + KVM_GENERIC_VM_STATS() +}; +static_assert(ARRAY_SIZE(kvm_vm_stats_desc) == + sizeof(struct kvm_vm_stat) / sizeof(u64)); + +const struct kvm_stats_header kvm_vm_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vm_stats_desc), +}; + +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) +{ + /* TODO: To be added later. */ + return -EOPNOTSUPP; +} + +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) +{ + int r; + + r = kvm_riscv_stage2_alloc_pgd(kvm); + if (r) + return r; + + return 0; +} + +void kvm_arch_destroy_vm(struct kvm *kvm) +{ + int i; + + for (i = 0; i < KVM_MAX_VCPUS; ++i) { + if (kvm->vcpus[i]) { + kvm_vcpu_destroy(kvm->vcpus[i]); + kvm->vcpus[i] = NULL; + } + } + atomic_set(&kvm->online_vcpus, 0); +} + +int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) +{ + int r; + + switch (ext) { + case KVM_CAP_DEVICE_CTRL: + case KVM_CAP_USER_MEMORY: + case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: + case KVM_CAP_ONE_REG: + case KVM_CAP_READONLY_MEM: + case KVM_CAP_MP_STATE: + case KVM_CAP_IMMEDIATE_EXIT: + r = 1; + break; + case KVM_CAP_NR_VCPUS: + r = num_online_cpus(); + break; + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; + case KVM_CAP_NR_MEMSLOTS: + r = KVM_USER_MEM_SLOTS; + break; + default: + r = 0; + break; + } + + return r; +} + +long kvm_arch_vm_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + return -EINVAL; +} -- cgit v1.2.3 From a33c72faf2d73a35d85c8da4b65402a50aa7647c Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 27 Sep 2021 17:10:02 +0530 Subject: RISC-V: KVM: Implement VCPU create, init and destroy functions This patch implements VCPU create, init and destroy functions required by generic KVM module. We don't have much dynamic resources in struct kvm_vcpu_arch so these functions are quite simple for KVM RISC-V. Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Reviewed-by: Alexander Graf Acked-by: Palmer Dabbelt --- arch/riscv/include/asm/kvm_host.h | 69 +++++++++++++++++++++++++++++++++++++++ arch/riscv/kvm/vcpu.c | 55 ++++++++++++++++++++++++++----- 2 files changed, 115 insertions(+), 9 deletions(-) diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 08a8f53bf814..0db663cf74e4 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -57,7 +57,76 @@ struct kvm_cpu_trap { unsigned long htinst; }; +struct kvm_cpu_context { + unsigned long zero; + unsigned long ra; + unsigned long sp; + unsigned long gp; + unsigned long tp; + unsigned long t0; + unsigned long t1; + unsigned long t2; + unsigned long s0; + unsigned long s1; + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + unsigned long a4; + unsigned long a5; + unsigned long a6; + unsigned long a7; + unsigned long s2; + unsigned long s3; + unsigned long s4; + unsigned long s5; + unsigned long s6; + unsigned long s7; + unsigned long s8; + unsigned long s9; + unsigned long s10; + unsigned long s11; + unsigned long t3; + unsigned long t4; + unsigned long t5; + unsigned long t6; + unsigned long sepc; + unsigned long sstatus; + unsigned long hstatus; +}; + +struct kvm_vcpu_csr { + unsigned long vsstatus; + unsigned long vsie; + unsigned long vstvec; + unsigned long vsscratch; + unsigned long vsepc; + unsigned long vscause; + unsigned long vstval; + unsigned long hvip; + unsigned long vsatp; + unsigned long scounteren; +}; + struct kvm_vcpu_arch { + /* VCPU ran at least once */ + bool ran_atleast_once; + + /* ISA feature bits (similar to MISA) */ + unsigned long isa; + + /* CPU context of Guest VCPU */ + struct kvm_cpu_context guest_context; + + /* CPU CSR context of Guest VCPU */ + struct kvm_vcpu_csr guest_csr; + + /* CPU context upon Guest VCPU reset */ + struct kvm_cpu_context guest_reset_context; + + /* CPU CSR context upon Guest VCPU reset */ + struct kvm_vcpu_csr guest_reset_csr; + /* Don't run the VCPU (blocked) */ bool pause; diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 810b7ef30c0b..7b45aa23fba3 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -38,6 +38,27 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { sizeof(kvm_vcpu_stats_desc), }; +#define KVM_RISCV_ISA_ALLOWED (riscv_isa_extension_mask(a) | \ + riscv_isa_extension_mask(c) | \ + riscv_isa_extension_mask(d) | \ + riscv_isa_extension_mask(f) | \ + riscv_isa_extension_mask(i) | \ + riscv_isa_extension_mask(m) | \ + riscv_isa_extension_mask(s) | \ + riscv_isa_extension_mask(u)) + +static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; + + memcpy(csr, reset_csr, sizeof(*csr)); + + memcpy(cntx, reset_cntx, sizeof(*cntx)); +} + int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) { return 0; @@ -45,7 +66,25 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { - /* TODO: */ + struct kvm_cpu_context *cntx; + + /* Mark this VCPU never ran */ + vcpu->arch.ran_atleast_once = false; + + /* Setup ISA features available to VCPU */ + vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED; + + /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */ + cntx = &vcpu->arch.guest_reset_context; + cntx->sstatus = SR_SPP | SR_SPIE; + cntx->hstatus = 0; + cntx->hstatus |= HSTATUS_VTW; + cntx->hstatus |= HSTATUS_SPVP; + cntx->hstatus |= HSTATUS_SPV; + + /* Reset VCPU */ + kvm_riscv_reset_vcpu(vcpu); + return 0; } @@ -53,15 +92,10 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { } -int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) -{ - /* TODO: */ - return 0; -} - void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { - /* TODO: */ + /* Flush the pages pre-allocated for Stage2 page table mappings */ + kvm_riscv_stage2_flush_cache(vcpu); } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) @@ -197,6 +231,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) struct kvm_cpu_trap trap; struct kvm_run *run = vcpu->run; + /* Mark this VCPU ran at least once */ + vcpu->arch.ran_atleast_once = true; + vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); /* Process MMIO value returned from user-space */ @@ -270,7 +307,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) * get an interrupt between __kvm_riscv_switch_to() and * local_irq_enable() which can potentially change CSRs. */ - trap.sepc = 0; + trap.sepc = vcpu->arch.guest_context.sepc; trap.scause = csr_read(CSR_SCAUSE); trap.stval = csr_read(CSR_STVAL); trap.htval = csr_read(CSR_HTVAL); -- cgit v1.2.3 From cce69aff689ee53c64b19d052d88f5bbbfd322f6 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 27 Sep 2021 17:10:03 +0530 Subject: RISC-V: KVM: Implement VCPU interrupts and requests handling This patch implements VCPU interrupts and requests which are both asynchronous events. The VCPU interrupts can be set/unset using KVM_INTERRUPT ioctl from user-space. In future, the in-kernel IRQCHIP emulation will use kvm_riscv_vcpu_set_interrupt() and kvm_riscv_vcpu_unset_interrupt() functions to set/unset VCPU interrupts. Important VCPU requests implemented by this patch are: KVM_REQ_SLEEP - set whenever VCPU itself goes to sleep state KVM_REQ_VCPU_RESET - set whenever VCPU reset is requested The WFI trap-n-emulate (added later) will use KVM_REQ_SLEEP request and kvm_riscv_vcpu_has_interrupt() function. The KVM_REQ_VCPU_RESET request will be used by SBI emulation (added later) to power-up a VCPU in power-off state. The user-space can use the GET_MPSTATE/SET_MPSTATE ioctls to get/set power state of a VCPU. Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Reviewed-by: Alexander Graf Acked-by: Palmer Dabbelt --- arch/riscv/include/asm/kvm_host.h | 23 +++++ arch/riscv/include/uapi/asm/kvm.h | 3 + arch/riscv/kvm/vcpu.c | 184 +++++++++++++++++++++++++++++++++++--- 3 files changed, 197 insertions(+), 13 deletions(-) diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 0db663cf74e4..e307528bbdbf 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -127,6 +127,21 @@ struct kvm_vcpu_arch { /* CPU CSR context upon Guest VCPU reset */ struct kvm_vcpu_csr guest_reset_csr; + /* + * VCPU interrupts + * + * We have a lockless approach for tracking pending VCPU interrupts + * implemented using atomic bitops. The irqs_pending bitmap represent + * pending interrupts whereas irqs_pending_mask represent bits changed + * in irqs_pending. Our approach is modeled around multiple producer + * and single consumer problem where the consumer is the VCPU itself. + */ + unsigned long irqs_pending; + unsigned long irqs_pending_mask; + + /* VCPU power-off state */ + bool power_off; + /* Don't run the VCPU (blocked) */ bool pause; @@ -150,4 +165,12 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, static inline void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch) {} +int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); +int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); +void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu); +bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask); +void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); + #endif /* __RISCV_KVM_HOST_H__ */ diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 984d041a3e3b..3d3d703713c6 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -18,6 +18,9 @@ #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_INTERRUPT_SET -1U +#define KVM_INTERRUPT_UNSET -2U + /* for KVM_GET_REGS and KVM_SET_REGS */ struct kvm_regs { }; diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 7b45aa23fba3..3342c7305265 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -57,6 +58,9 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) memcpy(csr, reset_csr, sizeof(*csr)); memcpy(cntx, reset_cntx, sizeof(*cntx)); + + WRITE_ONCE(vcpu->arch.irqs_pending, 0); + WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0); } int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) @@ -100,8 +104,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { - /* TODO: */ - return 0; + return kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER); } void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) @@ -114,20 +117,18 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { - /* TODO: */ - return 0; + return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) && + !vcpu->arch.power_off && !vcpu->arch.pause); } int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { - /* TODO: */ - return 0; + return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; } bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) { - /* TODO: */ - return false; + return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; } vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) @@ -138,7 +139,21 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { - /* TODO; */ + struct kvm_vcpu *vcpu = filp->private_data; + void __user *argp = (void __user *)arg; + + if (ioctl == KVM_INTERRUPT) { + struct kvm_interrupt irq; + + if (copy_from_user(&irq, argp, sizeof(irq))) + return -EFAULT; + + if (irq.irq == KVM_INTERRUPT_SET) + return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT); + else + return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT); + } + return -ENOIOCTLCMD; } @@ -187,18 +202,123 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) return -EINVAL; } +void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + unsigned long mask, val; + + if (READ_ONCE(vcpu->arch.irqs_pending_mask)) { + mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0); + val = READ_ONCE(vcpu->arch.irqs_pending) & mask; + + csr->hvip &= ~mask; + csr->hvip |= val; + } +} + +void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu) +{ + unsigned long hvip; + struct kvm_vcpu_arch *v = &vcpu->arch; + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + + /* Read current HVIP and VSIE CSRs */ + csr->vsie = csr_read(CSR_VSIE); + + /* Sync-up HVIP.VSSIP bit changes does by Guest */ + hvip = csr_read(CSR_HVIP); + if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) { + if (hvip & (1UL << IRQ_VS_SOFT)) { + if (!test_and_set_bit(IRQ_VS_SOFT, + &v->irqs_pending_mask)) + set_bit(IRQ_VS_SOFT, &v->irqs_pending); + } else { + if (!test_and_set_bit(IRQ_VS_SOFT, + &v->irqs_pending_mask)) + clear_bit(IRQ_VS_SOFT, &v->irqs_pending); + } + } +} + +int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) +{ + if (irq != IRQ_VS_SOFT && + irq != IRQ_VS_TIMER && + irq != IRQ_VS_EXT) + return -EINVAL; + + set_bit(irq, &vcpu->arch.irqs_pending); + smp_mb__before_atomic(); + set_bit(irq, &vcpu->arch.irqs_pending_mask); + + kvm_vcpu_kick(vcpu); + + return 0; +} + +int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) +{ + if (irq != IRQ_VS_SOFT && + irq != IRQ_VS_TIMER && + irq != IRQ_VS_EXT) + return -EINVAL; + + clear_bit(irq, &vcpu->arch.irqs_pending); + smp_mb__before_atomic(); + set_bit(irq, &vcpu->arch.irqs_pending_mask); + + return 0; +} + +bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask) +{ + unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK) + << VSIP_TO_HVIP_SHIFT) & mask; + + return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false; +} + +void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu) +{ + vcpu->arch.power_off = true; + kvm_make_request(KVM_REQ_SLEEP, vcpu); + kvm_vcpu_kick(vcpu); +} + +void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu) +{ + vcpu->arch.power_off = false; + kvm_vcpu_wake_up(vcpu); +} + int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { - /* TODO: */ + if (vcpu->arch.power_off) + mp_state->mp_state = KVM_MP_STATE_STOPPED; + else + mp_state->mp_state = KVM_MP_STATE_RUNNABLE; + return 0; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { - /* TODO: */ - return 0; + int ret = 0; + + switch (mp_state->mp_state) { + case KVM_MP_STATE_RUNNABLE: + vcpu->arch.power_off = false; + break; + case KVM_MP_STATE_STOPPED: + kvm_riscv_vcpu_power_off(vcpu); + break; + default: + ret = -EINVAL; + } + + return ret; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, @@ -222,7 +342,33 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) { - /* TODO: */ + struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); + + if (kvm_request_pending(vcpu)) { + if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) { + rcuwait_wait_event(wait, + (!vcpu->arch.power_off) && (!vcpu->arch.pause), + TASK_INTERRUPTIBLE); + + if (vcpu->arch.power_off || vcpu->arch.pause) { + /* + * Awaken to handle a signal, request to + * sleep again later. + */ + kvm_make_request(KVM_REQ_SLEEP, vcpu); + } + } + + if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) + kvm_riscv_reset_vcpu(vcpu); + } +} + +static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + + csr_write(CSR_HVIP, csr->hvip); } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) @@ -286,6 +432,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); smp_mb__after_srcu_read_unlock(); + /* + * We might have got VCPU interrupts updated asynchronously + * so update it in HW. + */ + kvm_riscv_vcpu_flush_interrupts(vcpu); + + /* Update HVIP CSR for current CPU */ + kvm_riscv_update_hvip(vcpu); + if (ret <= 0 || kvm_request_pending(vcpu)) { vcpu->mode = OUTSIDE_GUEST_MODE; @@ -313,6 +468,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) trap.htval = csr_read(CSR_HTVAL); trap.htinst = csr_read(CSR_HTINST); + /* Syncup interrupts state with HW */ + kvm_riscv_vcpu_sync_interrupts(vcpu); + /* * We may have taken a host interrupt in VS/VU-mode (i.e. * while executing the guest). This interrupt is still -- cgit v1.2.3 From 92ad82002c39ede73b397d17ba7ab9842517dbe5 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 27 Sep 2021 17:10:04 +0530 Subject: RISC-V: KVM: Implement KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls For KVM RISC-V, we use KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls to access VCPU config and registers from user-space. We have three types of VCPU registers: 1. CONFIG - these are VCPU config and capabilities 2. CORE - these are VCPU general purpose registers 3. CSR - these are VCPU control and status registers The CONFIG register available to user-space is ISA. The ISA register is a read and write register where user-space can only write the desired VCPU ISA capabilities before running the VCPU. The CORE registers available to user-space are PC, RA, SP, GP, TP, A0-A7, T0-T6, S0-S11 and MODE. Most of these are RISC-V general registers except PC and MODE. The PC register represents program counter whereas the MODE register represent VCPU privilege mode (i.e. S/U-mode). The CSRs available to user-space are SSTATUS, SIE, STVEC, SSCRATCH, SEPC, SCAUSE, STVAL, SIP, and SATP. All of these are read/write registers. In future, more VCPU register types will be added (such as FP) for the KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Acked-by: Palmer Dabbelt --- arch/riscv/include/uapi/asm/kvm.h | 53 ++++++++- arch/riscv/kvm/vcpu.c | 241 +++++++++++++++++++++++++++++++++++++- 2 files changed, 290 insertions(+), 4 deletions(-) diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 3d3d703713c6..f7e9dc388d54 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -41,10 +41,61 @@ struct kvm_guest_debug_arch { struct kvm_sync_regs { }; -/* dummy definition */ +/* for KVM_GET_SREGS and KVM_SET_SREGS */ struct kvm_sregs { }; +/* CONFIG registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_config { + unsigned long isa; +}; + +/* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_core { + struct user_regs_struct regs; + unsigned long mode; +}; + +/* Possible privilege modes for kvm_riscv_core */ +#define KVM_RISCV_MODE_S 1 +#define KVM_RISCV_MODE_U 0 + +/* CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_csr { + unsigned long sstatus; + unsigned long sie; + unsigned long stvec; + unsigned long sscratch; + unsigned long sepc; + unsigned long scause; + unsigned long stval; + unsigned long sip; + unsigned long satp; + unsigned long scounteren; +}; + +#define KVM_REG_SIZE(id) \ + (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) + +/* If you need to interpret the index values, here is the key: */ +#define KVM_REG_RISCV_TYPE_MASK 0x00000000FF000000 +#define KVM_REG_RISCV_TYPE_SHIFT 24 + +/* Config registers are mapped as type 1 */ +#define KVM_REG_RISCV_CONFIG (0x01 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_CONFIG_REG(name) \ + (offsetof(struct kvm_riscv_config, name) / sizeof(unsigned long)) + +/* Core registers are mapped as type 2 */ +#define KVM_REG_RISCV_CORE (0x02 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_CORE_REG(name) \ + (offsetof(struct kvm_riscv_core, name) / sizeof(unsigned long)) + +/* Control and status registers are mapped as type 3 */ +#define KVM_REG_RISCV_CSR (0x03 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_CSR_REG(name) \ + (offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long)) + #endif #endif /* __LINUX_KVM_RISCV_H */ diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 3342c7305265..a15d97f3e5f6 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -18,7 +18,6 @@ #include #include #include -#include #include const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { @@ -136,6 +135,220 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } +static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_CONFIG); + unsigned long reg_val; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + + switch (reg_num) { + case KVM_REG_RISCV_CONFIG_REG(isa): + reg_val = vcpu->arch.isa; + break; + default: + return -EINVAL; + }; + + if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + +static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_CONFIG); + unsigned long reg_val; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + switch (reg_num) { + case KVM_REG_RISCV_CONFIG_REG(isa): + if (!vcpu->arch.ran_atleast_once) { + vcpu->arch.isa = reg_val; + vcpu->arch.isa &= riscv_isa_extension_base(NULL); + vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED; + } else { + return -EOPNOTSUPP; + } + break; + default: + return -EINVAL; + }; + + return 0; +} + +static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_CORE); + unsigned long reg_val; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) + return -EINVAL; + + if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) + reg_val = cntx->sepc; + else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && + reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6)) + reg_val = ((unsigned long *)cntx)[reg_num]; + else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) + reg_val = (cntx->sstatus & SR_SPP) ? + KVM_RISCV_MODE_S : KVM_RISCV_MODE_U; + else + return -EINVAL; + + if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + +static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_CORE); + unsigned long reg_val; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) + return -EINVAL; + + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) + cntx->sepc = reg_val; + else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && + reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6)) + ((unsigned long *)cntx)[reg_num] = reg_val; + else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) { + if (reg_val == KVM_RISCV_MODE_S) + cntx->sstatus |= SR_SPP; + else + cntx->sstatus &= ~SR_SPP; + } else + return -EINVAL; + + return 0; +} + +static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_CSR); + unsigned long reg_val; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) + return -EINVAL; + + if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { + kvm_riscv_vcpu_flush_interrupts(vcpu); + reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK; + } else + reg_val = ((unsigned long *)csr)[reg_num]; + + if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + +static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_CSR); + unsigned long reg_val; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) + return -EINVAL; + + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { + reg_val &= VSIP_VALID_MASK; + reg_val <<= VSIP_TO_HVIP_SHIFT; + } + + ((unsigned long *)csr)[reg_num] = reg_val; + + if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) + WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0); + + return 0; +} + +static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG) + return kvm_riscv_vcpu_set_reg_config(vcpu, reg); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE) + return kvm_riscv_vcpu_set_reg_core(vcpu, reg); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR) + return kvm_riscv_vcpu_set_reg_csr(vcpu, reg); + + return -EINVAL; +} + +static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG) + return kvm_riscv_vcpu_get_reg_config(vcpu, reg); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE) + return kvm_riscv_vcpu_get_reg_core(vcpu, reg); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR) + return kvm_riscv_vcpu_get_reg_csr(vcpu, reg); + + return -EINVAL; +} + long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -160,8 +373,30 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp, long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { - /* TODO: */ - return -EINVAL; + struct kvm_vcpu *vcpu = filp->private_data; + void __user *argp = (void __user *)arg; + long r = -EINVAL; + + switch (ioctl) { + case KVM_SET_ONE_REG: + case KVM_GET_ONE_REG: { + struct kvm_one_reg reg; + + r = -EFAULT; + if (copy_from_user(®, argp, sizeof(reg))) + break; + + if (ioctl == KVM_SET_ONE_REG) + r = kvm_riscv_vcpu_set_reg(vcpu, ®); + else + r = kvm_riscv_vcpu_get_reg(vcpu, ®); + break; + } + default: + break; + } + + return r; } int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, -- cgit v1.2.3 From 34bde9d8b9e6e5249db3c07cf1ebfe75c23c671c Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 27 Sep 2021 17:10:05 +0530 Subject: RISC-V: KVM: Implement VCPU world-switch This patch implements the VCPU world-switch for KVM RISC-V. The KVM RISC-V world-switch (i.e. __kvm_riscv_switch_to()) mostly switches general purpose registers, SSTATUS, STVEC, SSCRATCH and HSTATUS CSRs. Other CSRs are switched via vcpu_load() and vcpu_put() interface in kvm_arch_vcpu_load() and kvm_arch_vcpu_put() functions respectively. Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Reviewed-by: Alexander Graf Acked-by: Palmer Dabbelt --- arch/riscv/include/asm/kvm_host.h | 10 +- arch/riscv/kernel/asm-offsets.c | 78 +++++++++++++++ arch/riscv/kvm/Makefile | 2 +- arch/riscv/kvm/vcpu.c | 30 +++++- arch/riscv/kvm/vcpu_switch.S | 203 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 319 insertions(+), 4 deletions(-) create mode 100644 arch/riscv/kvm/vcpu_switch.S diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index e307528bbdbf..8ef0ca100e75 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -115,6 +115,14 @@ struct kvm_vcpu_arch { /* ISA feature bits (similar to MISA) */ unsigned long isa; + /* SSCRATCH, STVEC, and SCOUNTEREN of Host */ + unsigned long host_sscratch; + unsigned long host_stvec; + unsigned long host_scounteren; + + /* CPU context of Host */ + struct kvm_cpu_context host_context; + /* CPU context of Guest VCPU */ struct kvm_cpu_context guest_context; @@ -163,7 +171,7 @@ int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_cpu_trap *trap); -static inline void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch) {} +void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch); int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index 90f8ce64fa6f..2fac70303341 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -7,7 +7,9 @@ #define GENERATING_ASM_OFFSETS #include +#include #include +#include #include #include @@ -111,6 +113,82 @@ void asm_offsets(void) OFFSET(PT_BADADDR, pt_regs, badaddr); OFFSET(PT_CAUSE, pt_regs, cause); + OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero); + OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra); + OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp); + OFFSET(KVM_ARCH_GUEST_GP, kvm_vcpu_arch, guest_context.gp); + OFFSET(KVM_ARCH_GUEST_TP, kvm_vcpu_arch, guest_context.tp); + OFFSET(KVM_ARCH_GUEST_T0, kvm_vcpu_arch, guest_context.t0); + OFFSET(KVM_ARCH_GUEST_T1, kvm_vcpu_arch, guest_context.t1); + OFFSET(KVM_ARCH_GUEST_T2, kvm_vcpu_arch, guest_context.t2); + OFFSET(KVM_ARCH_GUEST_S0, kvm_vcpu_arch, guest_context.s0); + OFFSET(KVM_ARCH_GUEST_S1, kvm_vcpu_arch, guest_context.s1); + OFFSET(KVM_ARCH_GUEST_A0, kvm_vcpu_arch, guest_context.a0); + OFFSET(KVM_ARCH_GUEST_A1, kvm_vcpu_arch, guest_context.a1); + OFFSET(KVM_ARCH_GUEST_A2, kvm_vcpu_arch, guest_context.a2); + OFFSET(KVM_ARCH_GUEST_A3, kvm_vcpu_arch, guest_context.a3); + OFFSET(KVM_ARCH_GUEST_A4, kvm_vcpu_arch, guest_context.a4); + OFFSET(KVM_ARCH_GUEST_A5, kvm_vcpu_arch, guest_context.a5); + OFFSET(KVM_ARCH_GUEST_A6, kvm_vcpu_arch, guest_context.a6); + OFFSET(KVM_ARCH_GUEST_A7, kvm_vcpu_arch, guest_context.a7); + OFFSET(KVM_ARCH_GUEST_S2, kvm_vcpu_arch, guest_context.s2); + OFFSET(KVM_ARCH_GUEST_S3, kvm_vcpu_arch, guest_context.s3); + OFFSET(KVM_ARCH_GUEST_S4, kvm_vcpu_arch, guest_context.s4); + OFFSET(KVM_ARCH_GUEST_S5, kvm_vcpu_arch, guest_context.s5); + OFFSET(KVM_ARCH_GUEST_S6, kvm_vcpu_arch, guest_context.s6); + OFFSET(KVM_ARCH_GUEST_S7, kvm_vcpu_arch, guest_context.s7); + OFFSET(KVM_ARCH_GUEST_S8, kvm_vcpu_arch, guest_context.s8); + OFFSET(KVM_ARCH_GUEST_S9, kvm_vcpu_arch, guest_context.s9); + OFFSET(KVM_ARCH_GUEST_S10, kvm_vcpu_arch, guest_context.s10); + OFFSET(KVM_ARCH_GUEST_S11, kvm_vcpu_arch, guest_context.s11); + OFFSET(KVM_ARCH_GUEST_T3, kvm_vcpu_arch, guest_context.t3); + OFFSET(KVM_ARCH_GUEST_T4, kvm_vcpu_arch, guest_context.t4); + OFFSET(KVM_ARCH_GUEST_T5, kvm_vcpu_arch, guest_context.t5); + OFFSET(KVM_ARCH_GUEST_T6, kvm_vcpu_arch, guest_context.t6); + OFFSET(KVM_ARCH_GUEST_SEPC, kvm_vcpu_arch, guest_context.sepc); + OFFSET(KVM_ARCH_GUEST_SSTATUS, kvm_vcpu_arch, guest_context.sstatus); + OFFSET(KVM_ARCH_GUEST_HSTATUS, kvm_vcpu_arch, guest_context.hstatus); + OFFSET(KVM_ARCH_GUEST_SCOUNTEREN, kvm_vcpu_arch, guest_csr.scounteren); + + OFFSET(KVM_ARCH_HOST_ZERO, kvm_vcpu_arch, host_context.zero); + OFFSET(KVM_ARCH_HOST_RA, kvm_vcpu_arch, host_context.ra); + OFFSET(KVM_ARCH_HOST_SP, kvm_vcpu_arch, host_context.sp); + OFFSET(KVM_ARCH_HOST_GP, kvm_vcpu_arch, host_context.gp); + OFFSET(KVM_ARCH_HOST_TP, kvm_vcpu_arch, host_context.tp); + OFFSET(KVM_ARCH_HOST_T0, kvm_vcpu_arch, host_context.t0); + OFFSET(KVM_ARCH_HOST_T1, kvm_vcpu_arch, host_context.t1); + OFFSET(KVM_ARCH_HOST_T2, kvm_vcpu_arch, host_context.t2); + OFFSET(KVM_ARCH_HOST_S0, kvm_vcpu_arch, host_context.s0); + OFFSET(KVM_ARCH_HOST_S1, kvm_vcpu_arch, host_context.s1); + OFFSET(KVM_ARCH_HOST_A0, kvm_vcpu_arch, host_context.a0); + OFFSET(KVM_ARCH_HOST_A1, kvm_vcpu_arch, host_context.a1); + OFFSET(KVM_ARCH_HOST_A2, kvm_vcpu_arch, host_context.a2); + OFFSET(KVM_ARCH_HOST_A3, kvm_vcpu_arch, host_context.a3); + OFFSET(KVM_ARCH_HOST_A4, kvm_vcpu_arch, host_context.a4); + OFFSET(KVM_ARCH_HOST_A5, kvm_vcpu_arch, host_context.a5); + OFFSET(KVM_ARCH_HOST_A6, kvm_vcpu_arch, host_context.a6); + OFFSET(KVM_ARCH_HOST_A7, kvm_vcpu_arch, host_context.a7); + OFFSET(KVM_ARCH_HOST_S2, kvm_vcpu_arch, host_context.s2); + OFFSET(KVM_ARCH_HOST_S3, kvm_vcpu_arch, host_context.s3); + OFFSET(KVM_ARCH_HOST_S4, kvm_vcpu_arch, host_context.s4); + OFFSET(KVM_ARCH_HOST_S5, kvm_vcpu_arch, host_context.s5); + OFFSET(KVM_ARCH_HOST_S6, kvm_vcpu_arch, host_context.s6); + OFFSET(KVM_ARCH_HOST_S7, kvm_vcpu_arch, host_context.s7); + OFFSET(KVM_ARCH_HOST_S8, kvm_vcpu_arch, host_context.s8); + OFFSET(KVM_ARCH_HOST_S9, kvm_vcpu_arch, host_context.s9); + OFFSET(KVM_ARCH_HOST_S10, kvm_vcpu_arch, host_context.s10); + OFFSET(KVM_ARCH_HOST_S11, kvm_vcpu_arch, host_context.s11); + OFFSET(KVM_ARCH_HOST_T3, kvm_vcpu_arch, host_context.t3); + OFFSET(KVM_ARCH_HOST_T4, kvm_vcpu_arch, host_context.t4); + OFFSET(KVM_ARCH_HOST_T5, kvm_vcpu_arch, host_context.t5); + OFFSET(KVM_ARCH_HOST_T6, kvm_vcpu_arch, host_context.t6); + OFFSET(KVM_ARCH_HOST_SEPC, kvm_vcpu_arch, host_context.sepc); + OFFSET(KVM_ARCH_HOST_SSTATUS, kvm_vcpu_arch, host_context.sstatus); + OFFSET(KVM_ARCH_HOST_HSTATUS, kvm_vcpu_arch, host_context.hstatus); + OFFSET(KVM_ARCH_HOST_SSCRATCH, kvm_vcpu_arch, host_sscratch); + OFFSET(KVM_ARCH_HOST_STVEC, kvm_vcpu_arch, host_stvec); + OFFSET(KVM_ARCH_HOST_SCOUNTEREN, kvm_vcpu_arch, host_scounteren); + /* * THREAD_{F,X}* might be larger than a S-type offset can handle, but * these are used in performance-sensitive assembly so we can't resort diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 4732094391bf..9e8133c898dc 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -10,4 +10,4 @@ KVM := ../../../virt/kvm obj-$(CONFIG_KVM) += kvm.o kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/binary_stats.o \ - main.o vm.o mmu.o vcpu.o vcpu_exit.o + main.o vm.o mmu.o vcpu.o vcpu_exit.o vcpu_switch.o diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index a15d97f3e5f6..64f74290a90f 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -565,14 +565,40 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { - /* TODO: */ + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + + csr_write(CSR_VSSTATUS, csr->vsstatus); + csr_write(CSR_VSIE, csr->vsie); + csr_write(CSR_VSTVEC, csr->vstvec); + csr_write(CSR_VSSCRATCH, csr->vsscratch); + csr_write(CSR_VSEPC, csr->vsepc); + csr_write(CSR_VSCAUSE, csr->vscause); + csr_write(CSR_VSTVAL, csr->vstval); + csr_write(CSR_HVIP, csr->hvip); + csr_write(CSR_VSATP, csr->vsatp); kvm_riscv_stage2_update_hgatp(vcpu); + + vcpu->cpu = cpu; } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { - /* TODO: */ + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + + vcpu->cpu = -1; + + csr_write(CSR_HGATP, 0); + + csr->vsstatus = csr_read(CSR_VSSTATUS); + csr->vsie = csr_read(CSR_VSIE); + csr->vstvec = csr_read(CSR_VSTVEC); + csr->vsscratch = csr_read(CSR_VSSCRATCH); + csr->vsepc = csr_read(CSR_VSEPC); + csr->vscause = csr_read(CSR_VSCAUSE); + csr->vstval = csr_read(CSR_VSTVAL); + csr->hvip = csr_read(CSR_HVIP); + csr->vsatp = csr_read(CSR_VSATP); } static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) diff --git a/arch/riscv/kvm/vcpu_switch.S b/arch/riscv/kvm/vcpu_switch.S new file mode 100644 index 000000000000..5174b025ff4e --- /dev/null +++ b/arch/riscv/kvm/vcpu_switch.S @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include +#include + + .text + .altmacro + .option norelax + +ENTRY(__kvm_riscv_switch_to) + /* Save Host GPRs (except A0 and T0-T6) */ + REG_S ra, (KVM_ARCH_HOST_RA)(a0) + REG_S sp, (KVM_ARCH_HOST_SP)(a0) + REG_S gp, (KVM_ARCH_HOST_GP)(a0) + REG_S tp, (KVM_ARCH_HOST_TP)(a0) + REG_S s0, (KVM_ARCH_HOST_S0)(a0) + REG_S s1, (KVM_ARCH_HOST_S1)(a0) + REG_S a1, (KVM_ARCH_HOST_A1)(a0) + REG_S a2, (KVM_ARCH_HOST_A2)(a0) + REG_S a3, (KVM_ARCH_HOST_A3)(a0) + REG_S a4, (KVM_ARCH_HOST_A4)(a0) + REG_S a5, (KVM_ARCH_HOST_A5)(a0) + REG_S a6, (KVM_ARCH_HOST_A6)(a0) + REG_S a7, (KVM_ARCH_HOST_A7)(a0) + REG_S s2, (KVM_ARCH_HOST_S2)(a0) + REG_S s3, (KVM_ARCH_HOST_S3)(a0) + REG_S s4, (KVM_ARCH_HOST_S4)(a0) + REG_S s5, (KVM_ARCH_HOST_S5)(a0) + REG_S s6, (KVM_ARCH_HOST_S6)(a0) + REG_S s7, (KVM_ARCH_HOST_S7)(a0) + REG_S s8, (KVM_ARCH_HOST_S8)(a0) + REG_S s9, (KVM_ARCH_HOST_S9)(a0) + REG_S s10, (KVM_ARCH_HOST_S10)(a0) + REG_S s11, (KVM_ARCH_HOST_S11)(a0) + + /* Save Host and Restore Guest SSTATUS */ + REG_L t0, (KVM_ARCH_GUEST_SSTATUS)(a0) + csrrw t0, CSR_SSTATUS, t0 + REG_S t0, (KVM_ARCH_HOST_SSTATUS)(a0) + + /* Save Host and Restore Guest HSTATUS */ + REG_L t1, (KVM_ARCH_GUEST_HSTATUS)(a0) + csrrw t1, CSR_HSTATUS, t1 + REG_S t1, (KVM_ARCH_HOST_HSTATUS)(a0) + + /* Save Host and Restore Guest SCOUNTEREN */ + REG_L t2, (KVM_ARCH_GUEST_SCOUNTEREN)(a0) + csrrw t2, CSR_SCOUNTEREN, t2 + REG_S t2, (KVM_ARCH_HOST_SCOUNTEREN)(a0) + + /* Save Host SSCRATCH and change it to struct kvm_vcpu_arch pointer */ + csrrw t3, CSR_SSCRATCH, a0 + REG_S t3, (KVM_ARCH_HOST_SSCRATCH)(a0) + + /* Save Host STVEC and change it to return path */ + la t4, __kvm_switch_return + csrrw t4, CSR_STVEC, t4 + REG_S t4, (KVM_ARCH_HOST_STVEC)(a0) + + /* Restore Guest SEPC */ + REG_L t0, (KVM_ARCH_GUEST_SEPC)(a0) + csrw CSR_SEPC, t0 + + /* Restore Guest GPRs (except A0) */ + REG_L ra, (KVM_ARCH_GUEST_RA)(a0) + REG_L sp, (KVM_ARCH_GUEST_SP)(a0) + REG_L gp, (KVM_ARCH_GUEST_GP)(a0) + REG_L tp, (KVM_ARCH_GUEST_TP)(a0) + REG_L t0, (KVM_ARCH_GUEST_T0)(a0) + REG_L t1, (KVM_ARCH_GUEST_T1)(a0) + REG_L t2, (KVM_ARCH_GUEST_T2)(a0) + REG_L s0, (KVM_ARCH_GUEST_S0)(a0) + REG_L s1, (KVM_ARCH_GUEST_S1)(a0) + REG_L a1, (KVM_ARCH_GUEST_A1)(a0) + REG_L a2, (KVM_ARCH_GUEST_A2)(a0) + REG_L a3, (KVM_ARCH_GUEST_A3)(a0) + REG_L a4, (KVM_ARCH_GUEST_A4)(a0) + REG_L a5, (KVM_ARCH_GUEST_A5)(a0) + REG_L a6, (KVM_ARCH_GUEST_A6)(a0) + REG_L a7, (KVM_ARCH_GUEST_A7)(a0) + REG_L s2, (KVM_ARCH_GUEST_S2)(a0) + REG_L s3, (KVM_ARCH_GUEST_S3)(a0) + REG_L s4, (KVM_ARCH_GUEST_S4)(a0) + REG_L s5, (KVM_ARCH_GUEST_S5)(a0) + REG_L s6, (KVM_ARCH_GUEST_S6)(a0) + REG_L s7, (KVM_ARCH_GUEST_S7)(a0) + REG_L s8, (KVM_ARCH_GUEST_S8)(a0) + REG_L s9, (KVM_ARCH_GUEST_S9)(a0) + REG_L s10, (KVM_ARCH_GUEST_S10)(a0) + REG_L s11, (KVM_ARCH_GUEST_S11)(a0) + REG_L t3, (KVM_ARCH_GUEST_T3)(a0) + REG_L t4, (KVM_ARCH_GUEST_T4)(a0) + REG_L t5, (KVM_ARCH_GUEST_T5)(a0) + REG_L t6, (KVM_ARCH_GUEST_T6)(a0) + + /* Restore Guest A0 */ + REG_L a0, (KVM_ARCH_GUEST_A0)(a0) + + /* Resume Guest */ + sret + + /* Back to Host */ + .align 2 +__kvm_switch_return: + /* Swap Guest A0 with SSCRATCH */ + csrrw a0, CSR_SSCRATCH, a0 + + /* Save Guest GPRs (except A0) */ + REG_S ra, (KVM_ARCH_GUEST_RA)(a0) + REG_S sp, (KVM_ARCH_GUEST_SP)(a0) + REG_S gp, (KVM_ARCH_GUEST_GP)(a0) + REG_S tp, (KVM_ARCH_GUEST_TP)(a0) + REG_S t0, (KVM_ARCH_GUEST_T0)(a0) + REG_S t1, (KVM_ARCH_GUEST_T1)(a0) + REG_S t2, (KVM_ARCH_GUEST_T2)(a0) + REG_S s0, (KVM_ARCH_GUEST_S0)(a0) + REG_S s1, (KVM_ARCH_GUEST_S1)(a0) + REG_S a1, (KVM_ARCH_GUEST_A1)(a0) + REG_S a2, (KVM_ARCH_GUEST_A2)(a0) + REG_S a3, (KVM_ARCH_GUEST_A3)(a0) + REG_S a4, (KVM_ARCH_GUEST_A4)(a0) + REG_S a5, (KVM_ARCH_GUEST_A5)(a0) + REG_S a6, (KVM_ARCH_GUEST_A6)(a0) + REG_S a7, (KVM_ARCH_GUEST_A7)(a0) + REG_S s2, (KVM_ARCH_GUEST_S2)(a0) + REG_S s3, (KVM_ARCH_GUEST_S3)(a0) + REG_S s4, (KVM_ARCH_GUEST_S4)(a0) + REG_S s5, (KVM_ARCH_GUEST_S5)(a0) + REG_S s6, (KVM_ARCH_GUEST_S6)(a0) + REG_S s7, (KVM_ARCH_GUEST_S7)(a0) + REG_S s8, (KVM_ARCH_GUEST_S8)(a0) + REG_S s9, (KVM_ARCH_GUEST_S9)(a0) + REG_S s10, (KVM_ARCH_GUEST_S10)(a0) + REG_S s11, (KVM_ARCH_GUEST_S11)(a0) + REG_S t3, (KVM_ARCH_GUEST_T3)(a0) + REG_S t4, (KVM_ARCH_GUEST_T4)(a0) + REG_S t5, (KVM_ARCH_GUEST_T5)(a0) + REG_S t6, (KVM_ARCH_GUEST_T6)(a0) + + /* Save Guest SEPC */ + csrr t0, CSR_SEPC + REG_S t0, (KVM_ARCH_GUEST_SEPC)(a0) + + /* Restore Host STVEC */ + REG_L t1, (KVM_ARCH_HOST_STVEC)(a0) + csrw CSR_STVEC, t1 + + /* Save Guest A0 and Restore Host SSCRATCH */ + REG_L t2, (KVM_ARCH_HOST_SSCRATCH)(a0) + csrrw t2, CSR_SSCRATCH, t2 + REG_S t2, (KVM_ARCH_GUEST_A0)(a0) + + /* Save Guest and Restore Host SCOUNTEREN */ + REG_L t3, (KVM_ARCH_HOST_SCOUNTEREN)(a0) + csrrw t3, CSR_SCOUNTEREN, t3 + REG_S t3, (KVM_ARCH_GUEST_SCOUNTEREN)(a0) + + /* Save Guest and Restore Host HSTATUS */ + REG_L t4, (KVM_ARCH_HOST_HSTATUS)(a0) + csrrw t4, CSR_HSTATUS, t4 + REG_S t4, (KVM_ARCH_GUEST_HSTATUS)(a0) + + /* Save Guest and Restore Host SSTATUS */ + REG_L t5, (KVM_ARCH_HOST_SSTATUS)(a0) + csrrw t5, CSR_SSTATUS, t5 + REG_S t5, (KVM_ARCH_GUEST_SSTATUS)(a0) + + /* Restore Host GPRs (except A0 and T0-T6) */ + REG_L ra, (KVM_ARCH_HOST_RA)(a0) + REG_L sp, (KVM_ARCH_HOST_SP)(a0) + REG_L gp, (KVM_ARCH_HOST_GP)(a0) + REG_L tp, (KVM_ARCH_HOST_TP)(a0) + REG_L s0, (KVM_ARCH_HOST_S0)(a0) + REG_L s1, (KVM_ARCH_HOST_S1)(a0) + REG_L a1, (KVM_ARCH_HOST_A1)(a0) + REG_L a2, (KVM_ARCH_HOST_A2)(a0) + REG_L a3, (KVM_ARCH_HOST_A3)(a0) + REG_L a4, (KVM_ARCH_HOST_A4)(a0) + REG_L a5, (KVM_ARCH_HOST_A5)(a0) + REG_L a6, (KVM_ARCH_HOST_A6)(a0) + REG_L a7, (KVM_ARCH_HOST_A7)(a0) + REG_L s2, (KVM_ARCH_HOST_S2)(a0) + REG_L s3, (KVM_ARCH_HOST_S3)(a0) + REG_L s4, (KVM_ARCH_HOST_S4)(a0) + REG_L s5, (KVM_ARCH_HOST_S5)(a0) + REG_L s6, (KVM_ARCH_HOST_S6)(a0) + REG_L s7, (KVM_ARCH_HOST_S7)(a0) + REG_L s8, (KVM_ARCH_HOST_S8)(a0) + REG_L s9, (KVM_ARCH_HOST_S9)(a0) + REG_L s10, (KVM_ARCH_HOST_S10)(a0) + REG_L s11, (KVM_ARCH_HOST_S11)(a0) + + /* Return to C code */ + ret +ENDPROC(__kvm_riscv_switch_to) -- cgit v1.2.3 From 9f7013265112a92340cef5debec8d02ec8d1de06 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 27 Sep 2021 17:10:06 +0530 Subject: RISC-V: KVM: Handle MMIO exits for VCPU We will get stage2 page faults whenever Guest/VM access SW emulated MMIO device or unmapped Guest RAM. This patch implements MMIO read/write emulation by extracting MMIO details from the trapped load/store instruction and forwarding the MMIO read/write to user-space. The actual MMIO emulation will happen in user-space and KVM kernel module will only take care of register updates before resuming the trapped VCPU. The handling for stage2 page faults for unmapped Guest RAM will be implemeted by a separate patch later. [jiangyifei: ioeventfd and in-kernel mmio device support] Signed-off-by: Yifei Jiang Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Reviewed-by: Alexander Graf Acked-by: Palmer Dabbelt --- arch/riscv/include/asm/kvm_host.h | 22 ++ arch/riscv/kernel/asm-offsets.c | 6 + arch/riscv/kvm/Kconfig | 1 + arch/riscv/kvm/Makefile | 2 +- arch/riscv/kvm/mmu.c | 8 + arch/riscv/kvm/vcpu_exit.c | 592 +++++++++++++++++++++++++++++++++++++- arch/riscv/kvm/vcpu_switch.S | 23 ++ arch/riscv/kvm/vm.c | 1 + 8 files changed, 651 insertions(+), 4 deletions(-) diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 8ef0ca100e75..88b2f21efed8 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -49,6 +49,14 @@ struct kvm_arch { phys_addr_t pgd_phys; }; +struct kvm_mmio_decode { + unsigned long insn; + int insn_len; + int len; + int shift; + int return_handled; +}; + struct kvm_cpu_trap { unsigned long sepc; unsigned long scause; @@ -147,6 +155,9 @@ struct kvm_vcpu_arch { unsigned long irqs_pending; unsigned long irqs_pending_mask; + /* MMIO instruction details */ + struct kvm_mmio_decode mmio_decode; + /* VCPU power-off state */ bool power_off; @@ -162,11 +173,22 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} +int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, + struct kvm_memory_slot *memslot, + gpa_t gpa, unsigned long hva, bool is_write); void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu); int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm); void kvm_riscv_stage2_free_pgd(struct kvm *kvm); void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu); +void __kvm_riscv_unpriv_trap(void); + +unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, + bool read_insn, + unsigned long guest_addr, + struct kvm_cpu_trap *trap); +void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu, + struct kvm_cpu_trap *trap); int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_cpu_trap *trap); diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index 2fac70303341..91c77555d914 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -189,6 +189,12 @@ void asm_offsets(void) OFFSET(KVM_ARCH_HOST_STVEC, kvm_vcpu_arch, host_stvec); OFFSET(KVM_ARCH_HOST_SCOUNTEREN, kvm_vcpu_arch, host_scounteren); + OFFSET(KVM_ARCH_TRAP_SEPC, kvm_cpu_trap, sepc); + OFFSET(KVM_ARCH_TRAP_SCAUSE, kvm_cpu_trap, scause); + OFFSET(KVM_ARCH_TRAP_STVAL, kvm_cpu_trap, stval); + OFFSET(KVM_ARCH_TRAP_HTVAL, kvm_cpu_trap, htval); + OFFSET(KVM_ARCH_TRAP_HTINST, kvm_cpu_trap, htinst); + /* * THREAD_{F,X}* might be larger than a S-type offset can handle, but * these are used in performance-sensitive assembly so we can't resort diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig index 88edd477b3a8..b42979f84042 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig @@ -24,6 +24,7 @@ config KVM select ANON_INODES select KVM_MMIO select HAVE_KVM_VCPU_ASYNC_IOCTL + select HAVE_KVM_EVENTFD select SRCU help Support hosting virtualized guest machines. diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 9e8133c898dc..1e1c3e1e4e1b 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -10,4 +10,4 @@ KVM := ../../../virt/kvm obj-$(CONFIG_KVM) += kvm.o kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/binary_stats.o \ - main.o vm.o mmu.o vcpu.o vcpu_exit.o vcpu_switch.o + $(KVM)/eventfd.o main.o vm.o mmu.o vcpu.o vcpu_exit.o vcpu_switch.o diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index abfd2b22fa8e..8ec10ef861e7 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -58,6 +58,14 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, return 0; } +int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, + struct kvm_memory_slot *memslot, + gpa_t gpa, unsigned long hva, bool is_write) +{ + /* TODO: */ + return 0; +} + void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu) { /* TODO: */ diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c index 4484e9200fe4..dc66be032ad7 100644 --- a/arch/riscv/kvm/vcpu_exit.c +++ b/arch/riscv/kvm/vcpu_exit.c @@ -6,9 +6,518 @@ * Anup Patel */ +#include #include #include #include +#include + +#define INSN_MATCH_LB 0x3 +#define INSN_MASK_LB 0x707f +#define INSN_MATCH_LH 0x1003 +#define INSN_MASK_LH 0x707f +#define INSN_MATCH_LW 0x2003 +#define INSN_MASK_LW 0x707f +#define INSN_MATCH_LD 0x3003 +#define INSN_MASK_LD 0x707f +#define INSN_MATCH_LBU 0x4003 +#define INSN_MASK_LBU 0x707f +#define INSN_MATCH_LHU 0x5003 +#define INSN_MASK_LHU 0x707f +#define INSN_MATCH_LWU 0x6003 +#define INSN_MASK_LWU 0x707f +#define INSN_MATCH_SB 0x23 +#define INSN_MASK_SB 0x707f +#define INSN_MATCH_SH 0x1023 +#define INSN_MASK_SH 0x707f +#define INSN_MATCH_SW 0x2023 +#define INSN_MASK_SW 0x707f +#define INSN_MATCH_SD 0x3023 +#define INSN_MASK_SD 0x707f + +#define INSN_MATCH_C_LD 0x6000 +#define INSN_MASK_C_LD 0xe003 +#define INSN_MATCH_C_SD 0xe000 +#define INSN_MASK_C_SD 0xe003 +#define INSN_MATCH_C_LW 0x4000 +#define INSN_MASK_C_LW 0xe003 +#define INSN_MATCH_C_SW 0xc000 +#define INSN_MASK_C_SW 0xe003 +#define INSN_MATCH_C_LDSP 0x6002 +#define INSN_MASK_C_LDSP 0xe003 +#define INSN_MATCH_C_SDSP 0xe002 +#define INSN_MASK_C_SDSP 0xe003 +#define INSN_MATCH_C_LWSP 0x4002 +#define INSN_MASK_C_LWSP 0xe003 +#define INSN_MATCH_C_SWSP 0xc002 +#define INSN_MASK_C_SWSP 0xe003 + +#define INSN_16BIT_MASK 0x3 + +#define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK) + +#define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4) + +#ifdef CONFIG_64BIT +#define LOG_REGBYTES 3 +#else +#define LOG_REGBYTES 2 +#endif +#define REGBYTES (1 << LOG_REGBYTES) + +#define SH_RD 7 +#define SH_RS1 15 +#define SH_RS2 20 +#define SH_RS2C 2 + +#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1)) +#define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \ + (RV_X(x, 10, 3) << 3) | \ + (RV_X(x, 5, 1) << 6)) +#define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \ + (RV_X(x, 5, 2) << 6)) +#define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \ + (RV_X(x, 12, 1) << 5) | \ + (RV_X(x, 2, 2) << 6)) +#define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \ + (RV_X(x, 12, 1) << 5) | \ + (RV_X(x, 2, 3) << 6)) +#define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \ + (RV_X(x, 7, 2) << 6)) +#define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \ + (RV_X(x, 7, 3) << 6)) +#define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3)) +#define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3)) +#define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5) + +#define SHIFT_RIGHT(x, y) \ + ((y) < 0 ? ((x) << -(y)) : ((x) >> (y))) + +#define REG_MASK \ + ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES)) + +#define REG_OFFSET(insn, pos) \ + (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK) + +#define REG_PTR(insn, pos, regs) \ + ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))) + +#define GET_RM(insn) (((insn) >> 12) & 7) + +#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs)) +#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs)) +#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs)) +#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs)) +#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs)) +#define GET_SP(regs) (*REG_PTR(2, 0, regs)) +#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val)) +#define IMM_I(insn) ((s32)(insn) >> 20) +#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \ + (s32)(((insn) >> 7) & 0x1f)) +#define MASK_FUNCT3 0x7000 + +static int emulate_load(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long fault_addr, unsigned long htinst) +{ + u8 data_buf[8]; + unsigned long insn; + int shift = 0, len = 0, insn_len = 0; + struct kvm_cpu_trap utrap = { 0 }; + struct kvm_cpu_context *ct = &vcpu->arch.guest_context; + + /* Determine trapped instruction */ + if (htinst & 0x1) { + /* + * Bit[0] == 1 implies trapped instruction value is + * transformed instruction or custom instruction. + */ + insn = htinst | INSN_16BIT_MASK; + insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2; + } else { + /* + * Bit[0] == 0 implies trapped instruction value is + * zero or special value. + */ + insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, + &utrap); + if (utrap.scause) { + /* Redirect trap if we failed to read instruction */ + utrap.sepc = ct->sepc; + kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); + return 1; + } + insn_len = INSN_LEN(insn); + } + + /* Decode length of MMIO and shift */ + if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) { + len = 4; + shift = 8 * (sizeof(ulong) - len); + } else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) { + len = 1; + shift = 8 * (sizeof(ulong) - len); + } else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) { + len = 1; + shift = 8 * (sizeof(ulong) - len); +#ifdef CONFIG_64BIT + } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) { + len = 8; + shift = 8 * (sizeof(ulong) - len); + } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) { + len = 4; +#endif + } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) { + len = 2; + shift = 8 * (sizeof(ulong) - len); + } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) { + len = 2; +#ifdef CONFIG_64BIT + } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) { + len = 8; + shift = 8 * (sizeof(ulong) - len); + insn = RVC_RS2S(insn) << SH_RD; + } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP && + ((insn >> SH_RD) & 0x1f)) { + len = 8; + shift = 8 * (sizeof(ulong) - len); +#endif + } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) { + len = 4; + shift = 8 * (sizeof(ulong) - len); + insn = RVC_RS2S(insn) << SH_RD; + } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP && + ((insn >> SH_RD) & 0x1f)) { + len = 4; + shift = 8 * (sizeof(ulong) - len); + } else { + return -EOPNOTSUPP; + } + + /* Fault address should be aligned to length of MMIO */ + if (fault_addr & (len - 1)) + return -EIO; + + /* Save instruction decode info */ + vcpu->arch.mmio_decode.insn = insn; + vcpu->arch.mmio_decode.insn_len = insn_len; + vcpu->arch.mmio_decode.shift = shift; + vcpu->arch.mmio_decode.len = len; + vcpu->arch.mmio_decode.return_handled = 0; + + /* Update MMIO details in kvm_run struct */ + run->mmio.is_write = false; + run->mmio.phys_addr = fault_addr; + run->mmio.len = len; + + /* Try to handle MMIO access in the kernel */ + if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) { + /* Successfully handled MMIO access in the kernel so resume */ + memcpy(run->mmio.data, data_buf, len); + vcpu->stat.mmio_exit_kernel++; + kvm_riscv_vcpu_mmio_return(vcpu, run); + return 1; + } + + /* Exit to userspace for MMIO emulation */ + vcpu->stat.mmio_exit_user++; + run->exit_reason = KVM_EXIT_MMIO; + + return 0; +} + +static int emulate_store(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long fault_addr, unsigned long htinst) +{ + u8 data8; + u16 data16; + u32 data32; + u64 data64; + ulong data; + unsigned long insn; + int len = 0, insn_len = 0; + struct kvm_cpu_trap utrap = { 0 }; + struct kvm_cpu_context *ct = &vcpu->arch.guest_context; + + /* Determine trapped instruction */ + if (htinst & 0x1) { + /* + * Bit[0] == 1 implies trapped instruction value is + * transformed instruction or custom instruction. + */ + insn = htinst | INSN_16BIT_MASK; + insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2; + } else { + /* + * Bit[0] == 0 implies trapped instruction value is + * zero or special value. + */ + insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, + &utrap); + if (utrap.scause) { + /* Redirect trap if we failed to read instruction */ + utrap.sepc = ct->sepc; + kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); + return 1; + } + insn_len = INSN_LEN(insn); + } + + data = GET_RS2(insn, &vcpu->arch.guest_context); + data8 = data16 = data32 = data64 = data; + + if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) { + len = 4; + } else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) { + len = 1; +#ifdef CONFIG_64BIT + } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) { + len = 8; +#endif + } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) { + len = 2; +#ifdef CONFIG_64BIT + } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) { + len = 8; + data64 = GET_RS2S(insn, &vcpu->arch.guest_context); + } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP && + ((insn >> SH_RD) & 0x1f)) { + len = 8; + data64 = GET_RS2C(insn, &vcpu->arch.guest_context); +#endif + } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) { + len = 4; + data32 = GET_RS2S(insn, &vcpu->arch.guest_context); + } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP && + ((insn >> SH_RD) & 0x1f)) { + len = 4; + data32 = GET_RS2C(insn, &vcpu->arch.guest_context); + } else { + return -EOPNOTSUPP; + } + + /* Fault address should be aligned to length of MMIO */ + if (fault_addr & (len - 1)) + return -EIO; + + /* Save instruction decode info */ + vcpu->arch.mmio_decode.insn = insn; + vcpu->arch.mmio_decode.insn_len = insn_len; + vcpu->arch.mmio_decode.shift = 0; + vcpu->arch.mmio_decode.len = len; + vcpu->arch.mmio_decode.return_handled = 0; + + /* Copy data to kvm_run instance */ + switch (len) { + case 1: + *((u8 *)run->mmio.data) = data8; + break; + case 2: + *((u16 *)run->mmio.data) = data16; + break; + case 4: + *((u32 *)run->mmio.data) = data32; + break; + case 8: + *((u64 *)run->mmio.data) = data64; + break; + default: + return -EOPNOTSUPP; + }; + + /* Update MMIO details in kvm_run struct */ + run->mmio.is_write = true; + run->mmio.phys_addr = fault_addr; + run->mmio.len = len; + + /* Try to handle MMIO access in the kernel */ + if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS, + fault_addr, len, run->mmio.data)) { + /* Successfully handled MMIO access in the kernel so resume */ + vcpu->stat.mmio_exit_kernel++; + kvm_riscv_vcpu_mmio_return(vcpu, run); + return 1; + } + + /* Exit to userspace for MMIO emulation */ + vcpu->stat.mmio_exit_user++; + run->exit_reason = KVM_EXIT_MMIO; + + return 0; +} + +static int stage2_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_cpu_trap *trap) +{ + struct kvm_memory_slot *memslot; + unsigned long hva, fault_addr; + bool writeable; + gfn_t gfn; + int ret; + + fault_addr = (trap->htval << 2) | (trap->stval & 0x3); + gfn = fault_addr >> PAGE_SHIFT; + memslot = gfn_to_memslot(vcpu->kvm, gfn); + hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable); + + if (kvm_is_error_hva(hva) || + (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writeable)) { + switch (trap->scause) { + case EXC_LOAD_GUEST_PAGE_FAULT: + return emulate_load(vcpu, run, fault_addr, + trap->htinst); + case EXC_STORE_GUEST_PAGE_FAULT: + return emulate_store(vcpu, run, fault_addr, + trap->htinst); + default: + return -EOPNOTSUPP; + }; + } + + ret = kvm_riscv_stage2_map(vcpu, memslot, fault_addr, hva, + (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false); + if (ret < 0) + return ret; + + return 1; +} + +/** + * kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory + * + * @vcpu: The VCPU pointer + * @read_insn: Flag representing whether we are reading instruction + * @guest_addr: Guest address to read + * @trap: Output pointer to trap details + */ +unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, + bool read_insn, + unsigned long guest_addr, + struct kvm_cpu_trap *trap) +{ + register unsigned long taddr asm("a0") = (unsigned long)trap; + register unsigned long ttmp asm("a1"); + register unsigned long val asm("t0"); + register unsigned long tmp asm("t1"); + register unsigned long addr asm("t2") = guest_addr; + unsigned long flags; + unsigned long old_stvec, old_hstatus; + + local_irq_save(flags); + + old_hstatus = csr_swap(CSR_HSTATUS, vcpu->arch.guest_context.hstatus); + old_stvec = csr_swap(CSR_STVEC, (ulong)&__kvm_riscv_unpriv_trap); + + if (read_insn) { + /* + * HLVX.HU instruction + * 0110010 00011 rs1 100 rd 1110011 + */ + asm volatile ("\n" + ".option push\n" + ".option norvc\n" + "add %[ttmp], %[taddr], 0\n" + /* + * HLVX.HU %[val], (%[addr]) + * HLVX.HU t0, (t2) + * 0110010 00011 00111 100 00101 1110011 + */ + ".word 0x6433c2f3\n" + "andi %[tmp], %[val], 3\n" + "addi %[tmp], %[tmp], -3\n" + "bne %[tmp], zero, 2f\n" + "addi %[addr], %[addr], 2\n" + /* + * HLVX.HU %[tmp], (%[addr]) + * HLVX.HU t1, (t2) + * 0110010 00011 00111 100 00110 1110011 + */ + ".word 0x6433c373\n" + "sll %[tmp], %[tmp], 16\n" + "add %[val], %[val], %[tmp]\n" + "2:\n" + ".option pop" + : [val] "=&r" (val), [tmp] "=&r" (tmp), + [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp), + [addr] "+&r" (addr) : : "memory"); + + if (trap->scause == EXC_LOAD_PAGE_FAULT) + trap->scause = EXC_INST_PAGE_FAULT; + } else { + /* + * HLV.D instruction + * 0110110 00000 rs1 100 rd 1110011 + * + * HLV.W instruction + * 0110100 00000 rs1 100 rd 1110011 + */ + asm volatile ("\n" + ".option push\n" + ".option norvc\n" + "add %[ttmp], %[taddr], 0\n" +#ifdef CONFIG_64BIT + /* + * HLV.D %[val], (%[addr]) + * HLV.D t0, (t2) + * 0110110 00000 00111 100 00101 1110011 + */ + ".word 0x6c03c2f3\n" +#else + /* + * HLV.W %[val], (%[addr]) + * HLV.W t0, (t2) + * 0110100 00000 00111 100 00101 1110011 + */ + ".word 0x6803c2f3\n" +#endif + ".option pop" + : [val] "=&r" (val), + [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp) + : [addr] "r" (addr) : "memory"); + } + + csr_write(CSR_STVEC, old_stvec); + csr_write(CSR_HSTATUS, old_hstatus); + + local_irq_restore(flags); + + return val; +} + +/** + * kvm_riscv_vcpu_trap_redirect -- Redirect trap to Guest + * + * @vcpu: The VCPU pointer + * @trap: Trap details + */ +void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu, + struct kvm_cpu_trap *trap) +{ + unsigned long vsstatus = csr_read(CSR_VSSTATUS); + + /* Change Guest SSTATUS.SPP bit */ + vsstatus &= ~SR_SPP; + if (vcpu->arch.guest_context.sstatus & SR_SPP) + vsstatus |= SR_SPP; + + /* Change Guest SSTATUS.SPIE bit */ + vsstatus &= ~SR_SPIE; + if (vsstatus & SR_SIE) + vsstatus |= SR_SPIE; + + /* Clear Guest SSTATUS.SIE bit */ + vsstatus &= ~SR_SIE; + + /* Update Guest SSTATUS */ + csr_write(CSR_VSSTATUS, vsstatus); + + /* Update Guest SCAUSE, STVAL, and SEPC */ + csr_write(CSR_VSCAUSE, trap->scause); + csr_write(CSR_VSTVAL, trap->stval); + csr_write(CSR_VSEPC, trap->sepc); + + /* Set Guest PC to Guest exception vector */ + vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC); +} /** * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation @@ -19,7 +528,54 @@ */ int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) { - /* TODO: */ + u8 data8; + u16 data16; + u32 data32; + u64 data64; + ulong insn; + int len, shift; + + if (vcpu->arch.mmio_decode.return_handled) + return 0; + + vcpu->arch.mmio_decode.return_handled = 1; + insn = vcpu->arch.mmio_decode.insn; + + if (run->mmio.is_write) + goto done; + + len = vcpu->arch.mmio_decode.len; + shift = vcpu->arch.mmio_decode.shift; + + switch (len) { + case 1: + data8 = *((u8 *)run->mmio.data); + SET_RD(insn, &vcpu->arch.guest_context, + (ulong)data8 << shift >> shift); + break; + case 2: + data16 = *((u16 *)run->mmio.data); + SET_RD(insn, &vcpu->arch.guest_context, + (ulong)data16 << shift >> shift); + break; + case 4: + data32 = *((u32 *)run->mmio.data); + SET_RD(insn, &vcpu->arch.guest_context, + (ulong)data32 << shift >> shift); + break; + case 8: + data64 = *((u64 *)run->mmio.data); + SET_RD(insn, &vcpu->arch.guest_context, + (ulong)data64 << shift >> shift); + break; + default: + return -EOPNOTSUPP; + }; + +done: + /* Move to next instruction */ + vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len; + return 0; } @@ -30,6 +586,36 @@ int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_cpu_trap *trap) { - /* TODO: */ - return 0; + int ret; + + /* If we got host interrupt then do nothing */ + if (trap->scause & CAUSE_IRQ_FLAG) + return 1; + + /* Handle guest traps */ + ret = -EFAULT; + run->exit_reason = KVM_EXIT_UNKNOWN; + switch (trap->scause) { + case EXC_INST_GUEST_PAGE_FAULT: + case EXC_LOAD_GUEST_PAGE_FAULT: + case EXC_STORE_GUEST_PAGE_FAULT: + if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) + ret = stage2_page_fault(vcpu, run, trap); + break; + default: + break; + }; + + /* Print details in-case of error */ + if (ret < 0) { + kvm_err("VCPU exit error %d\n", ret); + kvm_err("SEPC=0x%lx SSTATUS=0x%lx HSTATUS=0x%lx\n", + vcpu->arch.guest_context.sepc, + vcpu->arch.guest_context.sstatus, + vcpu->arch.guest_context.hstatus); + kvm_err("SCAUSE=0x%lx STVAL=0x%lx HTVAL=0x%lx HTINST=0x%lx\n", + trap->scause, trap->stval, trap->htval, trap->htinst); + } + + return ret; } diff --git a/arch/riscv/kvm/vcpu_switch.S b/arch/riscv/kvm/vcpu_switch.S index 5174b025ff4e..e22721e1b892 100644 --- a/arch/riscv/kvm/vcpu_switch.S +++ b/arch/riscv/kvm/vcpu_switch.S @@ -201,3 +201,26 @@ __kvm_switch_return: /* Return to C code */ ret ENDPROC(__kvm_riscv_switch_to) + +ENTRY(__kvm_riscv_unpriv_trap) + /* + * We assume that faulting unpriv load/store instruction is + * 4-byte long and blindly increment SEPC by 4. + * + * The trap details will be saved at address pointed by 'A0' + * register and we use 'A1' register as temporary. + */ + csrr a1, CSR_SEPC + REG_S a1, (KVM_ARCH_TRAP_SEPC)(a0) + addi a1, a1, 4 + csrw CSR_SEPC, a1 + csrr a1, CSR_SCAUSE + REG_S a1, (KVM_ARCH_TRAP_SCAUSE)(a0) + csrr a1, CSR_STVAL + REG_S a1, (KVM_ARCH_TRAP_STVAL)(a0) + csrr a1, CSR_HTVAL + REG_S a1, (KVM_ARCH_TRAP_HTVAL)(a0) + csrr a1, CSR_HTINST + REG_S a1, (KVM_ARCH_TRAP_HTINST)(a0) + sret +ENDPROC(__kvm_riscv_unpriv_trap) diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c index 22490803d904..ad38c575c0bd 100644 --- a/arch/riscv/kvm/vm.c +++ b/arch/riscv/kvm/vm.c @@ -62,6 +62,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) int r; switch (ext) { + case KVM_CAP_IOEVENTFD: case KVM_CAP_DEVICE_CTRL: case KVM_CAP_USER_MEMORY: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: -- cgit v1.2.3 From 5a5d79acd7daebeb813a7c0654ca91c5ea7c228e Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 27 Sep 2021 17:10:07 +0530 Subject: RISC-V: KVM: Handle WFI exits for VCPU We get illegal instruction trap whenever Guest/VM executes WFI instruction. This patch handles WFI trap by blocking the trapped VCPU using kvm_vcpu_block() API. The blocked VCPU will be automatically resumed whenever a VCPU interrupt is injected from user-space or from in-kernel IRQCHIP emulation. Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Acked-by: Palmer Dabbelt --- arch/riscv/kvm/vcpu_exit.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c index dc66be032ad7..f659be94231d 100644 --- a/arch/riscv/kvm/vcpu_exit.c +++ b/arch/riscv/kvm/vcpu_exit.c @@ -12,6 +12,13 @@ #include #include +#define INSN_OPCODE_MASK 0x007c +#define INSN_OPCODE_SHIFT 2 +#define INSN_OPCODE_SYSTEM 28 + +#define INSN_MASK_WFI 0xffffffff +#define INSN_MATCH_WFI 0x10500073 + #define INSN_MATCH_LB 0x3 #define INSN_MASK_LB 0x707f #define INSN_MATCH_LH 0x1003 @@ -116,6 +123,71 @@ (s32)(((insn) >> 7) & 0x1f)) #define MASK_FUNCT3 0x7000 +static int truly_illegal_insn(struct kvm_vcpu *vcpu, + struct kvm_run *run, + ulong insn) +{ + struct kvm_cpu_trap utrap = { 0 }; + + /* Redirect trap to Guest VCPU */ + utrap.sepc = vcpu->arch.guest_context.sepc; + utrap.scause = EXC_INST_ILLEGAL; + utrap.stval = insn; + kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); + + return 1; +} + +static int system_opcode_insn(struct kvm_vcpu *vcpu, + struct kvm_run *run, + ulong insn) +{ + if ((insn & INSN_MASK_WFI) == INSN_MATCH_WFI) { + vcpu->stat.wfi_exit_stat++; + if (!kvm_arch_vcpu_runnable(vcpu)) { + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); + kvm_vcpu_block(vcpu); + vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_clear_request(KVM_REQ_UNHALT, vcpu); + } + vcpu->arch.guest_context.sepc += INSN_LEN(insn); + return 1; + } + + return truly_illegal_insn(vcpu, run, insn); +} + +static int virtual_inst_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_cpu_trap *trap) +{ + unsigned long insn = trap->stval; + struct kvm_cpu_trap utrap = { 0 }; + struct kvm_cpu_context *ct; + + if (unlikely(INSN_IS_16BIT(insn))) { + if (insn == 0) { + ct = &vcpu->arch.guest_context; + insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, + ct->sepc, + &utrap); + if (utrap.scause) { + utrap.sepc = ct->sepc; + kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); + return 1; + } + } + if (INSN_IS_16BIT(insn)) + return truly_illegal_insn(vcpu, run, insn); + } + + switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) { + case INSN_OPCODE_SYSTEM: + return system_opcode_insn(vcpu, run, insn); + default: + return truly_illegal_insn(vcpu, run, insn); + } +} + static int emulate_load(struct kvm_vcpu *vcpu, struct kvm_run *run, unsigned long fault_addr, unsigned long htinst) { @@ -596,6 +668,10 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, ret = -EFAULT; run->exit_reason = KVM_EXIT_UNKNOWN; switch (trap->scause) { + case EXC_VIRTUAL_INST_FAULT: + if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) + ret = virtual_inst_fault(vcpu, run, trap); + break; case EXC_INST_GUEST_PAGE_FAULT: case EXC_LOAD_GUEST_PAGE_FAULT: case EXC_STORE_GUEST_PAGE_FAULT: -- cgit v1.2.3 From fd7bb4a251dfc1da3496bf59a4793937c13e8c1f Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 27 Sep 2021 17:10:08 +0530 Subject: RISC-V: KVM: Implement VMID allocator We implement a simple VMID allocator for Guests/VMs which: 1. Detects number of VMID bits at boot-time 2. Uses atomic number to track VMID version and increments VMID version whenever we run-out of VMIDs 3. Flushes Guest TLBs on all host CPUs whenever we run-out of VMIDs 4. Force updates HW Stage2 VMID for each Guest VCPU whenever VMID changes using VCPU request KVM_REQ_UPDATE_HGATP Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Reviewed-by: Alexander Graf Acked-by: Palmer Dabbelt --- arch/riscv/include/asm/kvm_host.h | 24 ++++++++ arch/riscv/kvm/Makefile | 14 ++++- arch/riscv/kvm/main.c | 4 ++ arch/riscv/kvm/tlb.S | 74 +++++++++++++++++++++++ arch/riscv/kvm/vcpu.c | 9 +++ arch/riscv/kvm/vm.c | 6 ++ arch/riscv/kvm/vmid.c | 120 ++++++++++++++++++++++++++++++++++++++ 7 files changed, 249 insertions(+), 2 deletions(-) create mode 100644 arch/riscv/kvm/tlb.S create mode 100644 arch/riscv/kvm/vmid.c diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 88b2f21efed8..69c342430242 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -26,6 +26,7 @@ #define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1) +#define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2) struct kvm_vm_stat { struct kvm_vm_stat_generic generic; @@ -43,7 +44,19 @@ struct kvm_vcpu_stat { struct kvm_arch_memory_slot { }; +struct kvm_vmid { + /* + * Writes to vmid_version and vmid happen with vmid_lock held + * whereas reads happen without any lock held. + */ + unsigned long vmid_version; + unsigned long vmid; +}; + struct kvm_arch { + /* stage2 vmid */ + struct kvm_vmid vmid; + /* stage2 page table */ pgd_t *pgd; phys_addr_t pgd_phys; @@ -173,6 +186,11 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} +void __kvm_riscv_hfence_gvma_vmid_gpa(unsigned long gpa, unsigned long vmid); +void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid); +void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa); +void __kvm_riscv_hfence_gvma_all(void); + int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, gpa_t gpa, unsigned long hva, bool is_write); @@ -181,6 +199,12 @@ int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm); void kvm_riscv_stage2_free_pgd(struct kvm *kvm); void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu); +void kvm_riscv_stage2_vmid_detect(void); +unsigned long kvm_riscv_stage2_vmid_bits(void); +int kvm_riscv_stage2_vmid_init(struct kvm *kvm); +bool kvm_riscv_stage2_vmid_ver_changed(struct kvm_vmid *vmid); +void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu); + void __kvm_riscv_unpriv_trap(void); unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 1e1c3e1e4e1b..a0274763e096 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -9,5 +9,15 @@ KVM := ../../../virt/kvm obj-$(CONFIG_KVM) += kvm.o -kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/binary_stats.o \ - $(KVM)/eventfd.o main.o vm.o mmu.o vcpu.o vcpu_exit.o vcpu_switch.o +kvm-y += $(KVM)/kvm_main.o +kvm-y += $(KVM)/coalesced_mmio.o +kvm-y += $(KVM)/binary_stats.o +kvm-y += $(KVM)/eventfd.o +kvm-y += main.o +kvm-y += vm.o +kvm-y += vmid.o +kvm-y += tlb.o +kvm-y += mmu.o +kvm-y += vcpu.o +kvm-y += vcpu_exit.o +kvm-y += vcpu_switch.o diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index 47926f0c175d..49a4941e3838 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -79,8 +79,12 @@ int kvm_arch_init(void *opaque) return -ENODEV; } + kvm_riscv_stage2_vmid_detect(); + kvm_info("hypervisor extension available\n"); + kvm_info("VMID %ld bits available\n", kvm_riscv_stage2_vmid_bits()); + return 0; } diff --git a/arch/riscv/kvm/tlb.S b/arch/riscv/kvm/tlb.S new file mode 100644 index 000000000000..c858570f0856 --- /dev/null +++ b/arch/riscv/kvm/tlb.S @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel + */ + +#include +#include + + .text + .altmacro + .option norelax + + /* + * Instruction encoding of hfence.gvma is: + * HFENCE.GVMA rs1, rs2 + * HFENCE.GVMA zero, rs2 + * HFENCE.GVMA rs1 + * HFENCE.GVMA + * + * rs1!=zero and rs2!=zero ==> HFENCE.GVMA rs1, rs2 + * rs1==zero and rs2!=zero ==> HFENCE.GVMA zero, rs2 + * rs1!=zero and rs2==zero ==> HFENCE.GVMA rs1 + * rs1==zero and rs2==zero ==> HFENCE.GVMA + * + * Instruction encoding of HFENCE.GVMA is: + * 0110001 rs2(5) rs1(5) 000 00000 1110011 + */ + +ENTRY(__kvm_riscv_hfence_gvma_vmid_gpa) + /* + * rs1 = a0 (GPA) + * rs2 = a1 (VMID) + * HFENCE.GVMA a0, a1 + * 0110001 01011 01010 000 00000 1110011 + */ + .word 0x62b50073 + ret +ENDPROC(__kvm_riscv_hfence_gvma_vmid_gpa) + +ENTRY(__kvm_riscv_hfence_gvma_vmid) + /* + * rs1 = zero + * rs2 = a0 (VMID) + * HFENCE.GVMA zero, a0 + * 0110001 01010 00000 000 00000 1110011 + */ + .word 0x62a00073 + ret +ENDPROC(__kvm_riscv_hfence_gvma_vmid) + +ENTRY(__kvm_riscv_hfence_gvma_gpa) + /* + * rs1 = a0 (GPA) + * rs2 = zero + * HFENCE.GVMA a0 + * 0110001 00000 01010 000 00000 1110011 + */ + .word 0x62050073 + ret +ENDPROC(__kvm_riscv_hfence_gvma_gpa) + +ENTRY(__kvm_riscv_hfence_gvma_all) + /* + * rs1 = zero + * rs2 = zero + * HFENCE.GVMA + * 0110001 00000 00000 000 00000 1110011 + */ + .word 0x62000073 + ret +ENDPROC(__kvm_riscv_hfence_gvma_all) diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 64f74290a90f..dfe479d9f564 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -622,6 +622,12 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) kvm_riscv_reset_vcpu(vcpu); + + if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu)) + kvm_riscv_stage2_update_hgatp(vcpu); + + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) + __kvm_riscv_hfence_gvma_all(); } } @@ -667,6 +673,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* Check conditions before entering the guest */ cond_resched(); + kvm_riscv_stage2_vmid_update(vcpu); + kvm_riscv_check_vcpu_requests(vcpu); preempt_disable(); @@ -703,6 +711,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_riscv_update_hvip(vcpu); if (ret <= 0 || + kvm_riscv_stage2_vmid_ver_changed(&vcpu->kvm->arch.vmid) || kvm_request_pending(vcpu)) { vcpu->mode = OUTSIDE_GUEST_MODE; local_irq_enable(); diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c index ad38c575c0bd..42e75dc8ab06 100644 --- a/arch/riscv/kvm/vm.c +++ b/arch/riscv/kvm/vm.c @@ -41,6 +41,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (r) return r; + r = kvm_riscv_stage2_vmid_init(kvm); + if (r) { + kvm_riscv_stage2_free_pgd(kvm); + return r; + } + return 0; } diff --git a/arch/riscv/kvm/vmid.c b/arch/riscv/kvm/vmid.c new file mode 100644 index 000000000000..2c6253b293bc --- /dev/null +++ b/arch/riscv/kvm/vmid.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static unsigned long vmid_version = 1; +static unsigned long vmid_next; +static unsigned long vmid_bits; +static DEFINE_SPINLOCK(vmid_lock); + +void kvm_riscv_stage2_vmid_detect(void) +{ + unsigned long old; + + /* Figure-out number of VMID bits in HW */ + old = csr_read(CSR_HGATP); + csr_write(CSR_HGATP, old | HGATP_VMID_MASK); + vmid_bits = csr_read(CSR_HGATP); + vmid_bits = (vmid_bits & HGATP_VMID_MASK) >> HGATP_VMID_SHIFT; + vmid_bits = fls_long(vmid_bits); + csr_write(CSR_HGATP, old); + + /* We polluted local TLB so flush all guest TLB */ + __kvm_riscv_hfence_gvma_all(); + + /* We don't use VMID bits if they are not sufficient */ + if ((1UL << vmid_bits) < num_possible_cpus()) + vmid_bits = 0; +} + +unsigned long kvm_riscv_stage2_vmid_bits(void) +{ + return vmid_bits; +} + +int kvm_riscv_stage2_vmid_init(struct kvm *kvm) +{ + /* Mark the initial VMID and VMID version invalid */ + kvm->arch.vmid.vmid_version = 0; + kvm->arch.vmid.vmid = 0; + + return 0; +} + +bool kvm_riscv_stage2_vmid_ver_changed(struct kvm_vmid *vmid) +{ + if (!vmid_bits) + return false; + + return unlikely(READ_ONCE(vmid->vmid_version) != + READ_ONCE(vmid_version)); +} + +void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu) +{ + int i; + struct kvm_vcpu *v; + struct cpumask hmask; + struct kvm_vmid *vmid = &vcpu->kvm->arch.vmid; + + if (!kvm_riscv_stage2_vmid_ver_changed(vmid)) + return; + + spin_lock(&vmid_lock); + + /* + * We need to re-check the vmid_version here to ensure that if + * another vcpu already allocated a valid vmid for this vm. + */ + if (!kvm_riscv_stage2_vmid_ver_changed(vmid)) { + spin_unlock(&vmid_lock); + return; + } + + /* First user of a new VMID version? */ + if (unlikely(vmid_next == 0)) { + WRITE_ONCE(vmid_version, READ_ONCE(vmid_version) + 1); + vmid_next = 1; + + /* + * We ran out of VMIDs so we increment vmid_version and + * start assigning VMIDs from 1. + * + * This also means existing VMIDs assignement to all Guest + * instances is invalid and we have force VMID re-assignement + * for all Guest instances. The Guest instances that were not + * running will automatically pick-up new VMIDs because will + * call kvm_riscv_stage2_vmid_update() whenever they enter + * in-kernel run loop. For Guest instances that are already + * running, we force VM exits on all host CPUs using IPI and + * flush all Guest TLBs. + */ + riscv_cpuid_to_hartid_mask(cpu_online_mask, &hmask); + sbi_remote_hfence_gvma(cpumask_bits(&hmask), 0, 0); + } + + vmid->vmid = vmid_next; + vmid_next++; + vmid_next &= (1 << vmid_bits) - 1; + + WRITE_ONCE(vmid->vmid_version, READ_ONCE(vmid_version)); + + spin_unlock(&vmid_lock); + + /* Request stage2 page table update for all VCPUs */ + kvm_for_each_vcpu(i, v, vcpu->kvm) + kvm_make_request(KVM_REQ_UPDATE_HGATP, v); +} -- cgit v1.2.3 From 9d05c1fee837572d91f2b5463d67d4e098987e95 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 27 Sep 2021 17:10:09 +0530 Subject: RISC-V: KVM: Implement stage2 page table programming This patch implements all required functions for programming the stage2 page table for each Guest/VM. At high-level, the flow of stage2 related functions is similar from KVM ARM/ARM64 implementation but the stage2 page table format is quite different for KVM RISC-V. [jiangyifei: stage2 dirty log support] Signed-off-by: Yifei Jiang Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Acked-by: Palmer Dabbelt --- arch/riscv/include/asm/kvm_host.h | 12 + arch/riscv/kvm/Kconfig | 1 + arch/riscv/kvm/main.c | 19 ++ arch/riscv/kvm/mmu.c | 654 +++++++++++++++++++++++++++++++++++++- arch/riscv/kvm/vm.c | 6 - 5 files changed, 676 insertions(+), 16 deletions(-) diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 69c342430242..2e71a353395e 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -70,6 +70,13 @@ struct kvm_mmio_decode { int return_handled; }; +#define KVM_MMU_PAGE_CACHE_NR_OBJS 32 + +struct kvm_mmu_page_cache { + int nobjs; + void *objects[KVM_MMU_PAGE_CACHE_NR_OBJS]; +}; + struct kvm_cpu_trap { unsigned long sepc; unsigned long scause; @@ -171,6 +178,9 @@ struct kvm_vcpu_arch { /* MMIO instruction details */ struct kvm_mmio_decode mmio_decode; + /* Cache pages needed to program page tables with spinlock held */ + struct kvm_mmu_page_cache mmu_page_cache; + /* VCPU power-off state */ bool power_off; @@ -198,6 +208,8 @@ void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu); int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm); void kvm_riscv_stage2_free_pgd(struct kvm *kvm); void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu); +void kvm_riscv_stage2_mode_detect(void); +unsigned long kvm_riscv_stage2_mode(void); void kvm_riscv_stage2_vmid_detect(void); unsigned long kvm_riscv_stage2_vmid_bits(void); diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig index b42979f84042..633063edaee8 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig @@ -23,6 +23,7 @@ config KVM select PREEMPT_NOTIFIERS select ANON_INODES select KVM_MMIO + select KVM_GENERIC_DIRTYLOG_READ_PROTECT select HAVE_KVM_VCPU_ASYNC_IOCTL select HAVE_KVM_EVENTFD select SRCU diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index 49a4941e3838..421ecf4e6360 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -64,6 +64,8 @@ void kvm_arch_hardware_disable(void) int kvm_arch_init(void *opaque) { + const char *str; + if (!riscv_isa_extension_available(NULL, h)) { kvm_info("hypervisor extension not available\n"); return -ENODEV; @@ -79,10 +81,27 @@ int kvm_arch_init(void *opaque) return -ENODEV; } + kvm_riscv_stage2_mode_detect(); + kvm_riscv_stage2_vmid_detect(); kvm_info("hypervisor extension available\n"); + switch (kvm_riscv_stage2_mode()) { + case HGATP_MODE_SV32X4: + str = "Sv32x4"; + break; + case HGATP_MODE_SV39X4: + str = "Sv39x4"; + break; + case HGATP_MODE_SV48X4: + str = "Sv48x4"; + break; + default: + return -ENODEV; + } + kvm_info("using %s G-stage page table format\n", str); + kvm_info("VMID %ld bits available\n", kvm_riscv_stage2_vmid_bits()); return 0; diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 8ec10ef861e7..fa9a4f9b9542 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -15,13 +15,421 @@ #include #include #include +#include #include #include +#include + +#ifdef CONFIG_64BIT +static unsigned long stage2_mode = (HGATP_MODE_SV39X4 << HGATP_MODE_SHIFT); +static unsigned long stage2_pgd_levels = 3; +#define stage2_index_bits 9 +#else +static unsigned long stage2_mode = (HGATP_MODE_SV32X4 << HGATP_MODE_SHIFT); +static unsigned long stage2_pgd_levels = 2; +#define stage2_index_bits 10 +#endif + +#define stage2_pgd_xbits 2 +#define stage2_pgd_size (1UL << (HGATP_PAGE_SHIFT + stage2_pgd_xbits)) +#define stage2_gpa_bits (HGATP_PAGE_SHIFT + \ + (stage2_pgd_levels * stage2_index_bits) + \ + stage2_pgd_xbits) +#define stage2_gpa_size ((gpa_t)(1ULL << stage2_gpa_bits)) + +#define stage2_pte_leaf(__ptep) \ + (pte_val(*(__ptep)) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)) + +static inline unsigned long stage2_pte_index(gpa_t addr, u32 level) +{ + unsigned long mask; + unsigned long shift = HGATP_PAGE_SHIFT + (stage2_index_bits * level); + + if (level == (stage2_pgd_levels - 1)) + mask = (PTRS_PER_PTE * (1UL << stage2_pgd_xbits)) - 1; + else + mask = PTRS_PER_PTE - 1; + + return (addr >> shift) & mask; +} + +static inline unsigned long stage2_pte_page_vaddr(pte_t pte) +{ + return (unsigned long)pfn_to_virt(pte_val(pte) >> _PAGE_PFN_SHIFT); +} + +static int stage2_page_size_to_level(unsigned long page_size, u32 *out_level) +{ + u32 i; + unsigned long psz = 1UL << 12; + + for (i = 0; i < stage2_pgd_levels; i++) { + if (page_size == (psz << (i * stage2_index_bits))) { + *out_level = i; + return 0; + } + } + + return -EINVAL; +} + +static int stage2_level_to_page_size(u32 level, unsigned long *out_pgsize) +{ + if (stage2_pgd_levels < level) + return -EINVAL; + + *out_pgsize = 1UL << (12 + (level * stage2_index_bits)); + + return 0; +} + +static int stage2_cache_topup(struct kvm_mmu_page_cache *pcache, + int min, int max) +{ + void *page; + + BUG_ON(max > KVM_MMU_PAGE_CACHE_NR_OBJS); + if (pcache->nobjs >= min) + return 0; + while (pcache->nobjs < max) { + page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + return -ENOMEM; + pcache->objects[pcache->nobjs++] = page; + } + + return 0; +} + +static void stage2_cache_flush(struct kvm_mmu_page_cache *pcache) +{ + while (pcache && pcache->nobjs) + free_page((unsigned long)pcache->objects[--pcache->nobjs]); +} + +static void *stage2_cache_alloc(struct kvm_mmu_page_cache *pcache) +{ + void *p; + + if (!pcache) + return NULL; + + BUG_ON(!pcache->nobjs); + p = pcache->objects[--pcache->nobjs]; + + return p; +} + +static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr, + pte_t **ptepp, u32 *ptep_level) +{ + pte_t *ptep; + u32 current_level = stage2_pgd_levels - 1; + + *ptep_level = current_level; + ptep = (pte_t *)kvm->arch.pgd; + ptep = &ptep[stage2_pte_index(addr, current_level)]; + while (ptep && pte_val(*ptep)) { + if (stage2_pte_leaf(ptep)) { + *ptep_level = current_level; + *ptepp = ptep; + return true; + } + + if (current_level) { + current_level--; + *ptep_level = current_level; + ptep = (pte_t *)stage2_pte_page_vaddr(*ptep); + ptep = &ptep[stage2_pte_index(addr, current_level)]; + } else { + ptep = NULL; + } + } + + return false; +} + +static void stage2_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr) +{ + struct cpumask hmask; + unsigned long size = PAGE_SIZE; + struct kvm_vmid *vmid = &kvm->arch.vmid; + + if (stage2_level_to_page_size(level, &size)) + return; + addr &= ~(size - 1); + + /* + * TODO: Instead of cpu_online_mask, we should only target CPUs + * where the Guest/VM is running. + */ + preempt_disable(); + riscv_cpuid_to_hartid_mask(cpu_online_mask, &hmask); + sbi_remote_hfence_gvma_vmid(cpumask_bits(&hmask), addr, size, + READ_ONCE(vmid->vmid)); + preempt_enable(); +} + +static int stage2_set_pte(struct kvm *kvm, u32 level, + struct kvm_mmu_page_cache *pcache, + gpa_t addr, const pte_t *new_pte) +{ + u32 current_level = stage2_pgd_levels - 1; + pte_t *next_ptep = (pte_t *)kvm->arch.pgd; + pte_t *ptep = &next_ptep[stage2_pte_index(addr, current_level)]; + + if (current_level < level) + return -EINVAL; + + while (current_level != level) { + if (stage2_pte_leaf(ptep)) + return -EEXIST; + + if (!pte_val(*ptep)) { + next_ptep = stage2_cache_alloc(pcache); + if (!next_ptep) + return -ENOMEM; + *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)), + __pgprot(_PAGE_TABLE)); + } else { + if (stage2_pte_leaf(ptep)) + return -EEXIST; + next_ptep = (pte_t *)stage2_pte_page_vaddr(*ptep); + } + + current_level--; + ptep = &next_ptep[stage2_pte_index(addr, current_level)]; + } + + *ptep = *new_pte; + if (stage2_pte_leaf(ptep)) + stage2_remote_tlb_flush(kvm, current_level, addr); + + return 0; +} + +static int stage2_map_page(struct kvm *kvm, + struct kvm_mmu_page_cache *pcache, + gpa_t gpa, phys_addr_t hpa, + unsigned long page_size, + bool page_rdonly, bool page_exec) +{ + int ret; + u32 level = 0; + pte_t new_pte; + pgprot_t prot; + + ret = stage2_page_size_to_level(page_size, &level); + if (ret) + return ret; + + /* + * A RISC-V implementation can choose to either: + * 1) Update 'A' and 'D' PTE bits in hardware + * 2) Generate page fault when 'A' and/or 'D' bits are not set + * PTE so that software can update these bits. + * + * We support both options mentioned above. To achieve this, we + * always set 'A' and 'D' PTE bits at time of creating stage2 + * mapping. To support KVM dirty page logging with both options + * mentioned above, we will write-protect stage2 PTEs to track + * dirty pages. + */ + + if (page_exec) { + if (page_rdonly) + prot = PAGE_READ_EXEC; + else + prot = PAGE_WRITE_EXEC; + } else { + if (page_rdonly) + prot = PAGE_READ; + else + prot = PAGE_WRITE; + } + new_pte = pfn_pte(PFN_DOWN(hpa), prot); + new_pte = pte_mkdirty(new_pte); + + return stage2_set_pte(kvm, level, pcache, gpa, &new_pte); +} + +enum stage2_op { + STAGE2_OP_NOP = 0, /* Nothing */ + STAGE2_OP_CLEAR, /* Clear/Unmap */ + STAGE2_OP_WP, /* Write-protect */ +}; + +static void stage2_op_pte(struct kvm *kvm, gpa_t addr, + pte_t *ptep, u32 ptep_level, enum stage2_op op) +{ + int i, ret; + pte_t *next_ptep; + u32 next_ptep_level; + unsigned long next_page_size, page_size; + + ret = stage2_level_to_page_size(ptep_level, &page_size); + if (ret) + return; + + BUG_ON(addr & (page_size - 1)); + + if (!pte_val(*ptep)) + return; + + if (ptep_level && !stage2_pte_leaf(ptep)) { + next_ptep = (pte_t *)stage2_pte_page_vaddr(*ptep); + next_ptep_level = ptep_level - 1; + ret = stage2_level_to_page_size(next_ptep_level, + &next_page_size); + if (ret) + return; + + if (op == STAGE2_OP_CLEAR) + set_pte(ptep, __pte(0)); + for (i = 0; i < PTRS_PER_PTE; i++) + stage2_op_pte(kvm, addr + i * next_page_size, + &next_ptep[i], next_ptep_level, op); + if (op == STAGE2_OP_CLEAR) + put_page(virt_to_page(next_ptep)); + } else { + if (op == STAGE2_OP_CLEAR) + set_pte(ptep, __pte(0)); + else if (op == STAGE2_OP_WP) + set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE)); + stage2_remote_tlb_flush(kvm, ptep_level, addr); + } +} + +static void stage2_unmap_range(struct kvm *kvm, gpa_t start, gpa_t size) +{ + int ret; + pte_t *ptep; + u32 ptep_level; + bool found_leaf; + unsigned long page_size; + gpa_t addr = start, end = start + size; + + while (addr < end) { + found_leaf = stage2_get_leaf_entry(kvm, addr, + &ptep, &ptep_level); + ret = stage2_level_to_page_size(ptep_level, &page_size); + if (ret) + break; + + if (!found_leaf) + goto next; + + if (!(addr & (page_size - 1)) && ((end - addr) >= page_size)) + stage2_op_pte(kvm, addr, ptep, + ptep_level, STAGE2_OP_CLEAR); + +next: + addr += page_size; + } +} + +static void stage2_wp_range(struct kvm *kvm, gpa_t start, gpa_t end) +{ + int ret; + pte_t *ptep; + u32 ptep_level; + bool found_leaf; + gpa_t addr = start; + unsigned long page_size; + + while (addr < end) { + found_leaf = stage2_get_leaf_entry(kvm, addr, + &ptep, &ptep_level); + ret = stage2_level_to_page_size(ptep_level, &page_size); + if (ret) + break; + + if (!found_leaf) + goto next; + + if (!(addr & (page_size - 1)) && ((end - addr) >= page_size)) + stage2_op_pte(kvm, addr, ptep, + ptep_level, STAGE2_OP_WP); + +next: + addr += page_size; + } +} + +static void stage2_wp_memory_region(struct kvm *kvm, int slot) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); + phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; + phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; + + spin_lock(&kvm->mmu_lock); + stage2_wp_range(kvm, start, end); + spin_unlock(&kvm->mmu_lock); + kvm_flush_remote_tlbs(kvm); +} + +static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, + unsigned long size, bool writable) +{ + pte_t pte; + int ret = 0; + unsigned long pfn; + phys_addr_t addr, end; + struct kvm_mmu_page_cache pcache = { 0, }; + + end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK; + pfn = __phys_to_pfn(hpa); + + for (addr = gpa; addr < end; addr += PAGE_SIZE) { + pte = pfn_pte(pfn, PAGE_KERNEL); + + if (!writable) + pte = pte_wrprotect(pte); + + ret = stage2_cache_topup(&pcache, + stage2_pgd_levels, + KVM_MMU_PAGE_CACHE_NR_OBJS); + if (ret) + goto out; + + spin_lock(&kvm->mmu_lock); + ret = stage2_set_pte(kvm, 0, &pcache, addr, &pte); + spin_unlock(&kvm->mmu_lock); + if (ret) + goto out; + + pfn++; + } + +out: + stage2_cache_flush(&pcache); + return ret; + +} + +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, + unsigned long mask) +{ + phys_addr_t base_gfn = slot->base_gfn + gfn_offset; + phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; + phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; + + stage2_wp_range(kvm, start, end); +} void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { } +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, + const struct kvm_memory_slot *memslot) +{ + kvm_flush_remote_tlbs(kvm); +} + void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free) { } @@ -32,7 +440,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) void kvm_arch_flush_shadow_all(struct kvm *kvm) { - /* TODO: */ + kvm_riscv_stage2_free_pgd(kvm); } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, @@ -46,7 +454,13 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_memory_slot *new, enum kvm_mr_change change) { - /* TODO: */ + /* + * At this point memslot has been committed and there is an + * allocated dirty_bitmap[], dirty pages will be tracked while + * the memory slot is write protected. + */ + if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) + stage2_wp_memory_region(kvm, mem->slot); } int kvm_arch_prepare_memory_region(struct kvm *kvm, @@ -54,35 +468,255 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { - /* TODO: */ - return 0; + hva_t hva = mem->userspace_addr; + hva_t reg_end = hva + mem->memory_size; + bool writable = !(mem->flags & KVM_MEM_READONLY); + int ret = 0; + + if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && + change != KVM_MR_FLAGS_ONLY) + return 0; + + /* + * Prevent userspace from creating a memory region outside of the GPA + * space addressable by the KVM guest GPA space. + */ + if ((memslot->base_gfn + memslot->npages) >= + (stage2_gpa_size >> PAGE_SHIFT)) + return -EFAULT; + + mmap_read_lock(current->mm); + + /* + * A memory region could potentially cover multiple VMAs, and + * any holes between them, so iterate over all of them to find + * out if we can map any of them right now. + * + * +--------------------------------------------+ + * +---------------+----------------+ +----------------+ + * | : VMA 1 | VMA 2 | | VMA 3 : | + * +---------------+----------------+ +----------------+ + * | memory region | + * +--------------------------------------------+ + */ + do { + struct vm_area_struct *vma = find_vma(current->mm, hva); + hva_t vm_start, vm_end; + + if (!vma || vma->vm_start >= reg_end) + break; + + /* + * Mapping a read-only VMA is only allowed if the + * memory region is configured as read-only. + */ + if (writable && !(vma->vm_flags & VM_WRITE)) { + ret = -EPERM; + break; + } + + /* Take the intersection of this VMA with the memory region */ + vm_start = max(hva, vma->vm_start); + vm_end = min(reg_end, vma->vm_end); + + if (vma->vm_flags & VM_PFNMAP) { + gpa_t gpa = mem->guest_phys_addr + + (vm_start - mem->userspace_addr); + phys_addr_t pa; + + pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; + pa += vm_start - vma->vm_start; + + /* IO region dirty page logging not allowed */ + if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { + ret = -EINVAL; + goto out; + } + + ret = stage2_ioremap(kvm, gpa, pa, + vm_end - vm_start, writable); + if (ret) + break; + } + hva = vm_end; + } while (hva < reg_end); + + if (change == KVM_MR_FLAGS_ONLY) + goto out; + + spin_lock(&kvm->mmu_lock); + if (ret) + stage2_unmap_range(kvm, mem->guest_phys_addr, + mem->memory_size); + spin_unlock(&kvm->mmu_lock); + +out: + mmap_read_unlock(current->mm); + return ret; } int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, gpa_t gpa, unsigned long hva, bool is_write) { - /* TODO: */ - return 0; + int ret; + kvm_pfn_t hfn; + bool writeable; + short vma_pageshift; + gfn_t gfn = gpa >> PAGE_SHIFT; + struct vm_area_struct *vma; + struct kvm *kvm = vcpu->kvm; + struct kvm_mmu_page_cache *pcache = &vcpu->arch.mmu_page_cache; + bool logging = (memslot->dirty_bitmap && + !(memslot->flags & KVM_MEM_READONLY)) ? true : false; + unsigned long vma_pagesize; + + mmap_read_lock(current->mm); + + vma = find_vma_intersection(current->mm, hva, hva + 1); + if (unlikely(!vma)) { + kvm_err("Failed to find VMA for hva 0x%lx\n", hva); + mmap_read_unlock(current->mm); + return -EFAULT; + } + + if (is_vm_hugetlb_page(vma)) + vma_pageshift = huge_page_shift(hstate_vma(vma)); + else + vma_pageshift = PAGE_SHIFT; + vma_pagesize = 1ULL << vma_pageshift; + if (logging || (vma->vm_flags & VM_PFNMAP)) + vma_pagesize = PAGE_SIZE; + + if (vma_pagesize == PMD_SIZE || vma_pagesize == PGDIR_SIZE) + gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; + + mmap_read_unlock(current->mm); + + if (vma_pagesize != PGDIR_SIZE && + vma_pagesize != PMD_SIZE && + vma_pagesize != PAGE_SIZE) { + kvm_err("Invalid VMA page size 0x%lx\n", vma_pagesize); + return -EFAULT; + } + + /* We need minimum second+third level pages */ + ret = stage2_cache_topup(pcache, stage2_pgd_levels, + KVM_MMU_PAGE_CACHE_NR_OBJS); + if (ret) { + kvm_err("Failed to topup stage2 cache\n"); + return ret; + } + + hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writeable); + if (hfn == KVM_PFN_ERR_HWPOISON) { + send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, + vma_pageshift, current); + return 0; + } + if (is_error_noslot_pfn(hfn)) + return -EFAULT; + + /* + * If logging is active then we allow writable pages only + * for write faults. + */ + if (logging && !is_write) + writeable = false; + + spin_lock(&kvm->mmu_lock); + + if (writeable) { + kvm_set_pfn_dirty(hfn); + mark_page_dirty(kvm, gfn); + ret = stage2_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, + vma_pagesize, false, true); + } else { + ret = stage2_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, + vma_pagesize, true, true); + } + + if (ret) + kvm_err("Failed to map in stage2\n"); + + spin_unlock(&kvm->mmu_lock); + kvm_set_pfn_accessed(hfn); + kvm_release_pfn_clean(hfn); + return ret; } void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu) { - /* TODO: */ + stage2_cache_flush(&vcpu->arch.mmu_page_cache); } int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm) { - /* TODO: */ + struct page *pgd_page; + + if (kvm->arch.pgd != NULL) { + kvm_err("kvm_arch already initialized?\n"); + return -EINVAL; + } + + pgd_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, + get_order(stage2_pgd_size)); + if (!pgd_page) + return -ENOMEM; + kvm->arch.pgd = page_to_virt(pgd_page); + kvm->arch.pgd_phys = page_to_phys(pgd_page); + return 0; } void kvm_riscv_stage2_free_pgd(struct kvm *kvm) { - /* TODO: */ + void *pgd = NULL; + + spin_lock(&kvm->mmu_lock); + if (kvm->arch.pgd) { + stage2_unmap_range(kvm, 0UL, stage2_gpa_size); + pgd = READ_ONCE(kvm->arch.pgd); + kvm->arch.pgd = NULL; + kvm->arch.pgd_phys = 0; + } + spin_unlock(&kvm->mmu_lock); + + if (pgd) + free_pages((unsigned long)pgd, get_order(stage2_pgd_size)); } void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu) { - /* TODO: */ + unsigned long hgatp = stage2_mode; + struct kvm_arch *k = &vcpu->kvm->arch; + + hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) & + HGATP_VMID_MASK; + hgatp |= (k->pgd_phys >> PAGE_SHIFT) & HGATP_PPN; + + csr_write(CSR_HGATP, hgatp); + + if (!kvm_riscv_stage2_vmid_bits()) + __kvm_riscv_hfence_gvma_all(); +} + +void kvm_riscv_stage2_mode_detect(void) +{ +#ifdef CONFIG_64BIT + /* Try Sv48x4 stage2 mode */ + csr_write(CSR_HGATP, HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT); + if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV48X4) { + stage2_mode = (HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT); + stage2_pgd_levels = 4; + } + csr_write(CSR_HGATP, 0); + + __kvm_riscv_hfence_gvma_all(); +#endif +} + +unsigned long kvm_riscv_stage2_mode(void) +{ + return stage2_mode >> HGATP_MODE_SHIFT; } diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c index 42e75dc8ab06..e2834ab9044c 100644 --- a/arch/riscv/kvm/vm.c +++ b/arch/riscv/kvm/vm.c @@ -27,12 +27,6 @@ const struct kvm_stats_header kvm_vm_stats_header = { sizeof(kvm_vm_stats_desc), }; -int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) -{ - /* TODO: To be added later. */ - return -EOPNOTSUPP; -} - int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int r; -- cgit v1.2.3 From 9955371cc014e02a1ef2d13c4aaf743d18bd66aa Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 27 Sep 2021 17:10:10 +0530 Subject: RISC-V: KVM: Implement MMU notifiers This patch implements MMU notifiers for KVM RISC-V so that Guest physical address space is in-sync with Host physical address space. This will allow swapping, page migration, etc to work transparently with KVM RISC-V. Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Reviewed-by: Alexander Graf Acked-by: Palmer Dabbelt --- arch/riscv/include/asm/kvm_host.h | 2 + arch/riscv/kvm/Kconfig | 1 + arch/riscv/kvm/mmu.c | 90 ++++++++++++++++++++++++++++++++++++--- arch/riscv/kvm/vm.c | 1 + 4 files changed, 89 insertions(+), 5 deletions(-) diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 2e71a353395e..17ed90a4798e 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -196,6 +196,8 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} +#define KVM_ARCH_WANT_MMU_NOTIFIER + void __kvm_riscv_hfence_gvma_vmid_gpa(unsigned long gpa, unsigned long vmid); void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid); void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa); diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig index 633063edaee8..a712bb910cda 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig @@ -20,6 +20,7 @@ if VIRTUALIZATION config KVM tristate "Kernel-based Virtual Machine (KVM) support (EXPERIMENTAL)" depends on RISCV_SBI && MMU + select MMU_NOTIFIER select PREEMPT_NOTIFIERS select ANON_INODES select KVM_MMIO diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index fa9a4f9b9542..3a00c2df7640 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -300,7 +300,8 @@ static void stage2_op_pte(struct kvm *kvm, gpa_t addr, } } -static void stage2_unmap_range(struct kvm *kvm, gpa_t start, gpa_t size) +static void stage2_unmap_range(struct kvm *kvm, gpa_t start, + gpa_t size, bool may_block) { int ret; pte_t *ptep; @@ -325,6 +326,13 @@ static void stage2_unmap_range(struct kvm *kvm, gpa_t start, gpa_t size) next: addr += page_size; + + /* + * If the range is too large, release the kvm->mmu_lock + * to prevent starvation and lockup detector warnings. + */ + if (may_block && addr < end) + cond_resched_lock(&kvm->mmu_lock); } } @@ -405,7 +413,6 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, out: stage2_cache_flush(&pcache); return ret; - } void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, @@ -547,7 +554,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, spin_lock(&kvm->mmu_lock); if (ret) stage2_unmap_range(kvm, mem->guest_phys_addr, - mem->memory_size); + mem->memory_size, false); spin_unlock(&kvm->mmu_lock); out: @@ -555,6 +562,73 @@ out: return ret; } +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) +{ + if (!kvm->arch.pgd) + return 0; + + stage2_unmap_range(kvm, range->start << PAGE_SHIFT, + (range->end - range->start) << PAGE_SHIFT, + range->may_block); + return 0; +} + +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + int ret; + kvm_pfn_t pfn = pte_pfn(range->pte); + + if (!kvm->arch.pgd) + return 0; + + WARN_ON(range->end - range->start != 1); + + ret = stage2_map_page(kvm, NULL, range->start << PAGE_SHIFT, + __pfn_to_phys(pfn), PAGE_SIZE, true, true); + if (ret) { + kvm_debug("Failed to map stage2 page (error %d)\n", ret); + return 1; + } + + return 0; +} + +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + pte_t *ptep; + u32 ptep_level = 0; + u64 size = (range->end - range->start) << PAGE_SHIFT; + + if (!kvm->arch.pgd) + return 0; + + WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE); + + if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT, + &ptep, &ptep_level)) + return 0; + + return ptep_test_and_clear_young(NULL, 0, ptep); +} + +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + pte_t *ptep; + u32 ptep_level = 0; + u64 size = (range->end - range->start) << PAGE_SHIFT; + + if (!kvm->arch.pgd) + return 0; + + WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE); + + if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT, + &ptep, &ptep_level)) + return 0; + + return pte_young(*ptep); +} + int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, gpa_t gpa, unsigned long hva, bool is_write) @@ -569,7 +643,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, struct kvm_mmu_page_cache *pcache = &vcpu->arch.mmu_page_cache; bool logging = (memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY)) ? true : false; - unsigned long vma_pagesize; + unsigned long vma_pagesize, mmu_seq; mmap_read_lock(current->mm); @@ -608,6 +682,8 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, return ret; } + mmu_seq = kvm->mmu_notifier_seq; + hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writeable); if (hfn == KVM_PFN_ERR_HWPOISON) { send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, @@ -626,6 +702,9 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, spin_lock(&kvm->mmu_lock); + if (mmu_notifier_retry(kvm, mmu_seq)) + goto out_unlock; + if (writeable) { kvm_set_pfn_dirty(hfn); mark_page_dirty(kvm, gfn); @@ -639,6 +718,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, if (ret) kvm_err("Failed to map in stage2\n"); +out_unlock: spin_unlock(&kvm->mmu_lock); kvm_set_pfn_accessed(hfn); kvm_release_pfn_clean(hfn); @@ -675,7 +755,7 @@ void kvm_riscv_stage2_free_pgd(struct kvm *kvm) spin_lock(&kvm->mmu_lock); if (kvm->arch.pgd) { - stage2_unmap_range(kvm, 0UL, stage2_gpa_size); + stage2_unmap_range(kvm, 0UL, stage2_gpa_size, false); pgd = READ_ONCE(kvm->arch.pgd); kvm->arch.pgd = NULL; kvm->arch.pgd_phys = 0; diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c index e2834ab9044c..892d020674c0 100644 --- a/arch/riscv/kvm/vm.c +++ b/arch/riscv/kvm/vm.c @@ -65,6 +65,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_IOEVENTFD: case KVM_CAP_DEVICE_CTRL: case KVM_CAP_USER_MEMORY: + case KVM_CAP_SYNC_MMU: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_ONE_REG: case KVM_CAP_READONLY_MEM: -- cgit v1.2.3 From 3a9f66cb25e18a3eeca36c08d9f823a35b3ddc22 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Mon, 27 Sep 2021 17:10:11 +0530 Subject: RISC-V: KVM: Add timer functionality The RISC-V hypervisor specification doesn't have any virtual timer feature. Due to this, the guest VCPU timer will be programmed via SBI calls. The host will use a separate hrtimer event for each guest VCPU to provide timer functionality. We inject a virtual timer interrupt to the guest VCPU whenever the guest VCPU hrtimer event expires. This patch adds guest VCPU timer implementation along with ONE_REG interface to access VCPU timer state from user space. Signed-off-by: Atish Patra Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Acked-by: Daniel Lezcano Acked-by: Palmer Dabbelt --- arch/riscv/include/asm/kvm_host.h | 7 + arch/riscv/include/asm/kvm_vcpu_timer.h | 44 +++++++ arch/riscv/include/uapi/asm/kvm.h | 17 +++ arch/riscv/kvm/Makefile | 1 + arch/riscv/kvm/vcpu.c | 14 ++ arch/riscv/kvm/vcpu_timer.c | 225 ++++++++++++++++++++++++++++++++ arch/riscv/kvm/vm.c | 2 +- drivers/clocksource/timer-riscv.c | 9 ++ include/clocksource/timer-riscv.h | 16 +++ 9 files changed, 334 insertions(+), 1 deletion(-) create mode 100644 arch/riscv/include/asm/kvm_vcpu_timer.h create mode 100644 arch/riscv/kvm/vcpu_timer.c create mode 100644 include/clocksource/timer-riscv.h diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 17ed90a4798e..c22d3f6519fa 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -12,6 +12,7 @@ #include #include #include +#include #ifdef CONFIG_64BIT #define KVM_MAX_VCPUS (1U << 16) @@ -60,6 +61,9 @@ struct kvm_arch { /* stage2 page table */ pgd_t *pgd; phys_addr_t pgd_phys; + + /* Guest Timer */ + struct kvm_guest_timer timer; }; struct kvm_mmio_decode { @@ -175,6 +179,9 @@ struct kvm_vcpu_arch { unsigned long irqs_pending; unsigned long irqs_pending_mask; + /* VCPU Timer */ + struct kvm_vcpu_timer timer; + /* MMIO instruction details */ struct kvm_mmio_decode mmio_decode; diff --git a/arch/riscv/include/asm/kvm_vcpu_timer.h b/arch/riscv/include/asm/kvm_vcpu_timer.h new file mode 100644 index 000000000000..375281eb49e0 --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_timer.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra + */ + +#ifndef __KVM_VCPU_RISCV_TIMER_H +#define __KVM_VCPU_RISCV_TIMER_H + +#include + +struct kvm_guest_timer { + /* Mult & Shift values to get nanoseconds from cycles */ + u32 nsec_mult; + u32 nsec_shift; + /* Time delta value */ + u64 time_delta; +}; + +struct kvm_vcpu_timer { + /* Flag for whether init is done */ + bool init_done; + /* Flag for whether timer event is configured */ + bool next_set; + /* Next timer event cycles */ + u64 next_cycles; + /* Underlying hrtimer instance */ + struct hrtimer hrt; +}; + +int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles); +int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); +int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); +int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu); +int kvm_riscv_guest_timer_init(struct kvm *kvm); + +#endif diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index f7e9dc388d54..08691dd27bcf 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -74,6 +74,18 @@ struct kvm_riscv_csr { unsigned long scounteren; }; +/* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_timer { + __u64 frequency; + __u64 time; + __u64 compare; + __u64 state; +}; + +/* Possible states for kvm_riscv_timer */ +#define KVM_RISCV_TIMER_STATE_OFF 0 +#define KVM_RISCV_TIMER_STATE_ON 1 + #define KVM_REG_SIZE(id) \ (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) @@ -96,6 +108,11 @@ struct kvm_riscv_csr { #define KVM_REG_RISCV_CSR_REG(name) \ (offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long)) +/* Timer registers are mapped as type 4 */ +#define KVM_REG_RISCV_TIMER (0x04 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_TIMER_REG(name) \ + (offsetof(struct kvm_riscv_timer, name) / sizeof(__u64)) + #endif #endif /* __LINUX_KVM_RISCV_H */ diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index a0274763e096..4beb4e277e96 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -21,3 +21,4 @@ kvm-y += mmu.o kvm-y += vcpu.o kvm-y += vcpu_exit.o kvm-y += vcpu_switch.o +kvm-y += vcpu_timer.o diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index dfe479d9f564..840f4586796f 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -58,6 +58,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) memcpy(cntx, reset_cntx, sizeof(*cntx)); + kvm_riscv_vcpu_timer_reset(vcpu); + WRITE_ONCE(vcpu->arch.irqs_pending, 0); WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0); } @@ -85,6 +87,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) cntx->hstatus |= HSTATUS_SPVP; cntx->hstatus |= HSTATUS_SPV; + /* Setup VCPU timer */ + kvm_riscv_vcpu_timer_init(vcpu); + /* Reset VCPU */ kvm_riscv_reset_vcpu(vcpu); @@ -97,6 +102,9 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { + /* Cleanup VCPU timer */ + kvm_riscv_vcpu_timer_deinit(vcpu); + /* Flush the pages pre-allocated for Stage2 page table mappings */ kvm_riscv_stage2_flush_cache(vcpu); } @@ -332,6 +340,8 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, return kvm_riscv_vcpu_set_reg_core(vcpu, reg); else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR) return kvm_riscv_vcpu_set_reg_csr(vcpu, reg); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER) + return kvm_riscv_vcpu_set_reg_timer(vcpu, reg); return -EINVAL; } @@ -345,6 +355,8 @@ static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, return kvm_riscv_vcpu_get_reg_core(vcpu, reg); else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR) return kvm_riscv_vcpu_get_reg_csr(vcpu, reg); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER) + return kvm_riscv_vcpu_get_reg_timer(vcpu, reg); return -EINVAL; } @@ -579,6 +591,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_riscv_stage2_update_hgatp(vcpu); + kvm_riscv_vcpu_timer_restore(vcpu); + vcpu->cpu = cpu; } diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c new file mode 100644 index 000000000000..ddd0ce727b83 --- /dev/null +++ b/arch/riscv/kvm/vcpu_timer.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt) +{ + return get_cycles64() + gt->time_delta; +} + +static u64 kvm_riscv_delta_cycles2ns(u64 cycles, + struct kvm_guest_timer *gt, + struct kvm_vcpu_timer *t) +{ + unsigned long flags; + u64 cycles_now, cycles_delta, delta_ns; + + local_irq_save(flags); + cycles_now = kvm_riscv_current_cycles(gt); + if (cycles_now < cycles) + cycles_delta = cycles - cycles_now; + else + cycles_delta = 0; + delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift; + local_irq_restore(flags); + + return delta_ns; +} + +static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h) +{ + u64 delta_ns; + struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt); + struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer); + struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; + + if (kvm_riscv_current_cycles(gt) < t->next_cycles) { + delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); + hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns)); + return HRTIMER_RESTART; + } + + t->next_set = false; + kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_TIMER); + + return HRTIMER_NORESTART; +} + +static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t) +{ + if (!t->init_done || !t->next_set) + return -EINVAL; + + hrtimer_cancel(&t->hrt); + t->next_set = false; + + return 0; +} + +int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; + u64 delta_ns; + + if (!t->init_done) + return -EINVAL; + + kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_TIMER); + + delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t); + t->next_cycles = ncycles; + hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); + t->next_set = true; + + return 0; +} + +int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; + u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_TIMER); + u64 reg_val; + + if (KVM_REG_SIZE(reg->id) != sizeof(u64)) + return -EINVAL; + if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64)) + return -EINVAL; + + switch (reg_num) { + case KVM_REG_RISCV_TIMER_REG(frequency): + reg_val = riscv_timebase; + break; + case KVM_REG_RISCV_TIMER_REG(time): + reg_val = kvm_riscv_current_cycles(gt); + break; + case KVM_REG_RISCV_TIMER_REG(compare): + reg_val = t->next_cycles; + break; + case KVM_REG_RISCV_TIMER_REG(state): + reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON : + KVM_RISCV_TIMER_STATE_OFF; + break; + default: + return -EINVAL; + }; + + if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + +int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; + u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_TIMER); + u64 reg_val; + int ret = 0; + + if (KVM_REG_SIZE(reg->id) != sizeof(u64)) + return -EINVAL; + if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64)) + return -EINVAL; + + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + switch (reg_num) { + case KVM_REG_RISCV_TIMER_REG(frequency): + ret = -EOPNOTSUPP; + break; + case KVM_REG_RISCV_TIMER_REG(time): + gt->time_delta = reg_val - get_cycles64(); + break; + case KVM_REG_RISCV_TIMER_REG(compare): + t->next_cycles = reg_val; + break; + case KVM_REG_RISCV_TIMER_REG(state): + if (reg_val == KVM_RISCV_TIMER_STATE_ON) + ret = kvm_riscv_vcpu_timer_next_event(vcpu, reg_val); + else + ret = kvm_riscv_vcpu_timer_cancel(t); + break; + default: + ret = -EINVAL; + break; + }; + + return ret; +} + +int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + if (t->init_done) + return -EINVAL; + + hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + t->hrt.function = kvm_riscv_vcpu_hrtimer_expired; + t->init_done = true; + t->next_set = false; + + return 0; +} + +int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu) +{ + int ret; + + ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); + vcpu->arch.timer.init_done = false; + + return ret; +} + +int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu) +{ + return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); +} + +void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu) +{ + struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; + +#ifdef CONFIG_64BIT + csr_write(CSR_HTIMEDELTA, gt->time_delta); +#else + csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta)); + csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32)); +#endif +} + +int kvm_riscv_guest_timer_init(struct kvm *kvm) +{ + struct kvm_guest_timer *gt = &kvm->arch.timer; + + riscv_cs_get_mult_shift(>->nsec_mult, >->nsec_shift); + gt->time_delta = -get_cycles64(); + + return 0; +} diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c index 892d020674c0..26399df15b63 100644 --- a/arch/riscv/kvm/vm.c +++ b/arch/riscv/kvm/vm.c @@ -41,7 +41,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) return r; } - return 0; + return kvm_riscv_guest_timer_init(kvm); } void kvm_arch_destroy_vm(struct kvm *kvm) diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index c51c5ed15aa7..1767f8bf2013 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -13,10 +13,12 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -79,6 +81,13 @@ static int riscv_timer_dying_cpu(unsigned int cpu) return 0; } +void riscv_cs_get_mult_shift(u32 *mult, u32 *shift) +{ + *mult = riscv_clocksource.mult; + *shift = riscv_clocksource.shift; +} +EXPORT_SYMBOL_GPL(riscv_cs_get_mult_shift); + /* called directly from the low-level interrupt handler */ static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id) { diff --git a/include/clocksource/timer-riscv.h b/include/clocksource/timer-riscv.h new file mode 100644 index 000000000000..d7f455754e60 --- /dev/null +++ b/include/clocksource/timer-riscv.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra + */ + +#ifndef __TIMER_RISCV_H +#define __TIMER_RISCV_H + +#include + +extern void riscv_cs_get_mult_shift(u32 *mult, u32 *shift); + +#endif -- cgit v1.2.3 From 5de52d4a23ad3346c3cb24aae728ef6642d1a92e Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Mon, 27 Sep 2021 17:10:12 +0530 Subject: RISC-V: KVM: FP lazy save/restore This patch adds floating point (F and D extension) context save/restore for guest VCPUs. The FP context is saved and restored lazily only when kernel enter/exits the in-kernel run loop and not during the KVM world switch. This way FP save/restore has minimal impact on KVM performance. Signed-off-by: Atish Patra Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Reviewed-by: Alexander Graf Acked-by: Palmer Dabbelt --- arch/riscv/include/asm/kvm_host.h | 5 ++ arch/riscv/kernel/asm-offsets.c | 72 ++++++++++++++++ arch/riscv/kvm/vcpu.c | 91 ++++++++++++++++++++ arch/riscv/kvm/vcpu_switch.S | 174 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 342 insertions(+) diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index c22d3f6519fa..be159686da46 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -125,6 +125,7 @@ struct kvm_cpu_context { unsigned long sepc; unsigned long sstatus; unsigned long hstatus; + union __riscv_fp_state fp; }; struct kvm_vcpu_csr { @@ -239,6 +240,10 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_cpu_trap *trap); void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch); +void __kvm_riscv_fp_f_save(struct kvm_cpu_context *context); +void __kvm_riscv_fp_f_restore(struct kvm_cpu_context *context); +void __kvm_riscv_fp_d_save(struct kvm_cpu_context *context); +void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context); int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index 91c77555d914..24d3827e4837 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -195,6 +195,78 @@ void asm_offsets(void) OFFSET(KVM_ARCH_TRAP_HTVAL, kvm_cpu_trap, htval); OFFSET(KVM_ARCH_TRAP_HTINST, kvm_cpu_trap, htinst); + /* F extension */ + + OFFSET(KVM_ARCH_FP_F_F0, kvm_cpu_context, fp.f.f[0]); + OFFSET(KVM_ARCH_FP_F_F1, kvm_cpu_context, fp.f.f[1]); + OFFSET(KVM_ARCH_FP_F_F2, kvm_cpu_context, fp.f.f[2]); + OFFSET(KVM_ARCH_FP_F_F3, kvm_cpu_context, fp.f.f[3]); + OFFSET(KVM_ARCH_FP_F_F4, kvm_cpu_context, fp.f.f[4]); + OFFSET(KVM_ARCH_FP_F_F5, kvm_cpu_context, fp.f.f[5]); + OFFSET(KVM_ARCH_FP_F_F6, kvm_cpu_context, fp.f.f[6]); + OFFSET(KVM_ARCH_FP_F_F7, kvm_cpu_context, fp.f.f[7]); + OFFSET(KVM_ARCH_FP_F_F8, kvm_cpu_context, fp.f.f[8]); + OFFSET(KVM_ARCH_FP_F_F9, kvm_cpu_context, fp.f.f[9]); + OFFSET(KVM_ARCH_FP_F_F10, kvm_cpu_context, fp.f.f[10]); + OFFSET(KVM_ARCH_FP_F_F11, kvm_cpu_context, fp.f.f[11]); + OFFSET(KVM_ARCH_FP_F_F12, kvm_cpu_context, fp.f.f[12]); + OFFSET(KVM_ARCH_FP_F_F13, kvm_cpu_context, fp.f.f[13]); + OFFSET(KVM_ARCH_FP_F_F14, kvm_cpu_context, fp.f.f[14]); + OFFSET(KVM_ARCH_FP_F_F15, kvm_cpu_context, fp.f.f[15]); + OFFSET(KVM_ARCH_FP_F_F16, kvm_cpu_context, fp.f.f[16]); + OFFSET(KVM_ARCH_FP_F_F17, kvm_cpu_context, fp.f.f[17]); + OFFSET(KVM_ARCH_FP_F_F18, kvm_cpu_context, fp.f.f[18]); + OFFSET(KVM_ARCH_FP_F_F19, kvm_cpu_context, fp.f.f[19]); + OFFSET(KVM_ARCH_FP_F_F20, kvm_cpu_context, fp.f.f[20]); + OFFSET(KVM_ARCH_FP_F_F21, kvm_cpu_context, fp.f.f[21]); + OFFSET(KVM_ARCH_FP_F_F22, kvm_cpu_context, fp.f.f[22]); + OFFSET(KVM_ARCH_FP_F_F23, kvm_cpu_context, fp.f.f[23]); + OFFSET(KVM_ARCH_FP_F_F24, kvm_cpu_context, fp.f.f[24]); + OFFSET(KVM_ARCH_FP_F_F25, kvm_cpu_context, fp.f.f[25]); + OFFSET(KVM_ARCH_FP_F_F26, kvm_cpu_context, fp.f.f[26]); + OFFSET(KVM_ARCH_FP_F_F27, kvm_cpu_context, fp.f.f[27]); + OFFSET(KVM_ARCH_FP_F_F28, kvm_cpu_context, fp.f.f[28]); + OFFSET(KVM_ARCH_FP_F_F29, kvm_cpu_context, fp.f.f[29]); + OFFSET(KVM_ARCH_FP_F_F30, kvm_cpu_context, fp.f.f[30]); + OFFSET(KVM_ARCH_FP_F_F31, kvm_cpu_context, fp.f.f[31]); + OFFSET(KVM_ARCH_FP_F_FCSR, kvm_cpu_context, fp.f.fcsr); + + /* D extension */ + + OFFSET(KVM_ARCH_FP_D_F0, kvm_cpu_context, fp.d.f[0]); + OFFSET(KVM_ARCH_FP_D_F1, kvm_cpu_context, fp.d.f[1]); + OFFSET(KVM_ARCH_FP_D_F2, kvm_cpu_context, fp.d.f[2]); + OFFSET(KVM_ARCH_FP_D_F3, kvm_cpu_context, fp.d.f[3]); + OFFSET(KVM_ARCH_FP_D_F4, kvm_cpu_context, fp.d.f[4]); + OFFSET(KVM_ARCH_FP_D_F5, kvm_cpu_context, fp.d.f[5]); + OFFSET(KVM_ARCH_FP_D_F6, kvm_cpu_context, fp.d.f[6]); + OFFSET(KVM_ARCH_FP_D_F7, kvm_cpu_context, fp.d.f[7]); + OFFSET(KVM_ARCH_FP_D_F8, kvm_cpu_context, fp.d.f[8]); + OFFSET(KVM_ARCH_FP_D_F9, kvm_cpu_context, fp.d.f[9]); + OFFSET(KVM_ARCH_FP_D_F10, kvm_cpu_context, fp.d.f[10]); + OFFSET(KVM_ARCH_FP_D_F11, kvm_cpu_context, fp.d.f[11]); + OFFSET(KVM_ARCH_FP_D_F12, kvm_cpu_context, fp.d.f[12]); + OFFSET(KVM_ARCH_FP_D_F13, kvm_cpu_context, fp.d.f[13]); + OFFSET(KVM_ARCH_FP_D_F14, kvm_cpu_context, fp.d.f[14]); + OFFSET(KVM_ARCH_FP_D_F15, kvm_cpu_context, fp.d.f[15]); + OFFSET(KVM_ARCH_FP_D_F16, kvm_cpu_context, fp.d.f[16]); + OFFSET(KVM_ARCH_FP_D_F17, kvm_cpu_context, fp.d.f[17]); + OFFSET(KVM_ARCH_FP_D_F18, kvm_cpu_context, fp.d.f[18]); + OFFSET(KVM_ARCH_FP_D_F19, kvm_cpu_context, fp.d.f[19]); + OFFSET(KVM_ARCH_FP_D_F20, kvm_cpu_context, fp.d.f[20]); + OFFSET(KVM_ARCH_FP_D_F21, kvm_cpu_context, fp.d.f[21]); + OFFSET(KVM_ARCH_FP_D_F22, kvm_cpu_context, fp.d.f[22]); + OFFSET(KVM_ARCH_FP_D_F23, kvm_cpu_context, fp.d.f[23]); + OFFSET(KVM_ARCH_FP_D_F24, kvm_cpu_context, fp.d.f[24]); + OFFSET(KVM_ARCH_FP_D_F25, kvm_cpu_context, fp.d.f[25]); + OFFSET(KVM_ARCH_FP_D_F26, kvm_cpu_context, fp.d.f[26]); + OFFSET(KVM_ARCH_FP_D_F27, kvm_cpu_context, fp.d.f[27]); + OFFSET(KVM_ARCH_FP_D_F28, kvm_cpu_context, fp.d.f[28]); + OFFSET(KVM_ARCH_FP_D_F29, kvm_cpu_context, fp.d.f[29]); + OFFSET(KVM_ARCH_FP_D_F30, kvm_cpu_context, fp.d.f[30]); + OFFSET(KVM_ARCH_FP_D_F31, kvm_cpu_context, fp.d.f[31]); + OFFSET(KVM_ARCH_FP_D_FCSR, kvm_cpu_context, fp.d.fcsr); + /* * THREAD_{F,X}* might be larger than a S-type offset can handle, but * these are used in performance-sensitive assembly so we can't resort diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 840f4586796f..cf6832365345 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -38,6 +38,86 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { sizeof(kvm_vcpu_stats_desc), }; +#ifdef CONFIG_FPU +static void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) +{ + unsigned long isa = vcpu->arch.isa; + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + + cntx->sstatus &= ~SR_FS; + if (riscv_isa_extension_available(&isa, f) || + riscv_isa_extension_available(&isa, d)) + cntx->sstatus |= SR_FS_INITIAL; + else + cntx->sstatus |= SR_FS_OFF; +} + +static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx) +{ + cntx->sstatus &= ~SR_FS; + cntx->sstatus |= SR_FS_CLEAN; +} + +static void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, + unsigned long isa) +{ + if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) { + if (riscv_isa_extension_available(&isa, d)) + __kvm_riscv_fp_d_save(cntx); + else if (riscv_isa_extension_available(&isa, f)) + __kvm_riscv_fp_f_save(cntx); + kvm_riscv_vcpu_fp_clean(cntx); + } +} + +static void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, + unsigned long isa) +{ + if ((cntx->sstatus & SR_FS) != SR_FS_OFF) { + if (riscv_isa_extension_available(&isa, d)) + __kvm_riscv_fp_d_restore(cntx); + else if (riscv_isa_extension_available(&isa, f)) + __kvm_riscv_fp_f_restore(cntx); + kvm_riscv_vcpu_fp_clean(cntx); + } +} + +static void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) +{ + /* No need to check host sstatus as it can be modified outside */ + if (riscv_isa_extension_available(NULL, d)) + __kvm_riscv_fp_d_save(cntx); + else if (riscv_isa_extension_available(NULL, f)) + __kvm_riscv_fp_f_save(cntx); +} + +static void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx) +{ + if (riscv_isa_extension_available(NULL, d)) + __kvm_riscv_fp_d_restore(cntx); + else if (riscv_isa_extension_available(NULL, f)) + __kvm_riscv_fp_f_restore(cntx); +} +#else +static void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) +{ +} +static void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, + unsigned long isa) +{ +} +static void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, + unsigned long isa) +{ +} +static void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) +{ +} +static void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx) +{ +} +#endif + #define KVM_RISCV_ISA_ALLOWED (riscv_isa_extension_mask(a) | \ riscv_isa_extension_mask(c) | \ riscv_isa_extension_mask(d) | \ @@ -58,6 +138,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) memcpy(cntx, reset_cntx, sizeof(*cntx)); + kvm_riscv_vcpu_fp_reset(vcpu); + kvm_riscv_vcpu_timer_reset(vcpu); WRITE_ONCE(vcpu->arch.irqs_pending, 0); @@ -192,6 +274,7 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, vcpu->arch.isa = reg_val; vcpu->arch.isa &= riscv_isa_extension_base(NULL); vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED; + kvm_riscv_vcpu_fp_reset(vcpu); } else { return -EOPNOTSUPP; } @@ -593,6 +676,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_riscv_vcpu_timer_restore(vcpu); + kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); + kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, + vcpu->arch.isa); + vcpu->cpu = cpu; } @@ -602,6 +689,10 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->cpu = -1; + kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, + vcpu->arch.isa); + kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); + csr_write(CSR_HGATP, 0); csr->vsstatus = csr_read(CSR_VSSTATUS); diff --git a/arch/riscv/kvm/vcpu_switch.S b/arch/riscv/kvm/vcpu_switch.S index e22721e1b892..029a28a195c6 100644 --- a/arch/riscv/kvm/vcpu_switch.S +++ b/arch/riscv/kvm/vcpu_switch.S @@ -224,3 +224,177 @@ ENTRY(__kvm_riscv_unpriv_trap) REG_S a1, (KVM_ARCH_TRAP_HTINST)(a0) sret ENDPROC(__kvm_riscv_unpriv_trap) + +#ifdef CONFIG_FPU + .align 3 + .global __kvm_riscv_fp_f_save +__kvm_riscv_fp_f_save: + csrr t2, CSR_SSTATUS + li t1, SR_FS + csrs CSR_SSTATUS, t1 + frcsr t0 + fsw f0, KVM_ARCH_FP_F_F0(a0) + fsw f1, KVM_ARCH_FP_F_F1(a0) + fsw f2, KVM_ARCH_FP_F_F2(a0) + fsw f3, KVM_ARCH_FP_F_F3(a0) + fsw f4, KVM_ARCH_FP_F_F4(a0) + fsw f5, KVM_ARCH_FP_F_F5(a0) + fsw f6, KVM_ARCH_FP_F_F6(a0) + fsw f7, KVM_ARCH_FP_F_F7(a0) + fsw f8, KVM_ARCH_FP_F_F8(a0) + fsw f9, KVM_ARCH_FP_F_F9(a0) + fsw f10, KVM_ARCH_FP_F_F10(a0) + fsw f11, KVM_ARCH_FP_F_F11(a0) + fsw f12, KVM_ARCH_FP_F_F12(a0) + fsw f13, KVM_ARCH_FP_F_F13(a0) + fsw f14, KVM_ARCH_FP_F_F14(a0) + fsw f15, KVM_ARCH_FP_F_F15(a0) + fsw f16, KVM_ARCH_FP_F_F16(a0) + fsw f17, KVM_ARCH_FP_F_F17(a0) + fsw f18, KVM_ARCH_FP_F_F18(a0) + fsw f19, KVM_ARCH_FP_F_F19(a0) + fsw f20, KVM_ARCH_FP_F_F20(a0) + fsw f21, KVM_ARCH_FP_F_F21(a0) + fsw f22, KVM_ARCH_FP_F_F22(a0) + fsw f23, KVM_ARCH_FP_F_F23(a0) + fsw f24, KVM_ARCH_FP_F_F24(a0) + fsw f25, KVM_ARCH_FP_F_F25(a0) + fsw f26, KVM_ARCH_FP_F_F26(a0) + fsw f27, KVM_ARCH_FP_F_F27(a0) + fsw f28, KVM_ARCH_FP_F_F28(a0) + fsw f29, KVM_ARCH_FP_F_F29(a0) + fsw f30, KVM_ARCH_FP_F_F30(a0) + fsw f31, KVM_ARCH_FP_F_F31(a0) + sw t0, KVM_ARCH_FP_F_FCSR(a0) + csrw CSR_SSTATUS, t2 + ret + + .align 3 + .global __kvm_riscv_fp_d_save +__kvm_riscv_fp_d_save: + csrr t2, CSR_SSTATUS + li t1, SR_FS + csrs CSR_SSTATUS, t1 + frcsr t0 + fsd f0, KVM_ARCH_FP_D_F0(a0) + fsd f1, KVM_ARCH_FP_D_F1(a0) + fsd f2, KVM_ARCH_FP_D_F2(a0) + fsd f3, KVM_ARCH_FP_D_F3(a0) + fsd f4, KVM_ARCH_FP_D_F4(a0) + fsd f5, KVM_ARCH_FP_D_F5(a0) + fsd f6, KVM_ARCH_FP_D_F6(a0) + fsd f7, KVM_ARCH_FP_D_F7(a0) + fsd f8, KVM_ARCH_FP_D_F8(a0) + fsd f9, KVM_ARCH_FP_D_F9(a0) + fsd f10, KVM_ARCH_FP_D_F10(a0) + fsd f11, KVM_ARCH_FP_D_F11(a0) + fsd f12, KVM_ARCH_FP_D_F12(a0) + fsd f13, KVM_ARCH_FP_D_F13(a0) + fsd f14, KVM_ARCH_FP_D_F14(a0) + fsd f15, KVM_ARCH_FP_D_F15(a0) + fsd f16, KVM_ARCH_FP_D_F16(a0) + fsd f17, KVM_ARCH_FP_D_F17(a0) + fsd f18, KVM_ARCH_FP_D_F18(a0) + fsd f19, KVM_ARCH_FP_D_F19(a0) + fsd f20, KVM_ARCH_FP_D_F20(a0) + fsd f21, KVM_ARCH_FP_D_F21(a0) + fsd f22, KVM_ARCH_FP_D_F22(a0) + fsd f23, KVM_ARCH_FP_D_F23(a0) + fsd f24, KVM_ARCH_FP_D_F24(a0) + fsd f25, KVM_ARCH_FP_D_F25(a0) + fsd f26, KVM_ARCH_FP_D_F26(a0) + fsd f27, KVM_ARCH_FP_D_F27(a0) + fsd f28, KVM_ARCH_FP_D_F28(a0) + fsd f29, KVM_ARCH_FP_D_F29(a0) + fsd f30, KVM_ARCH_FP_D_F30(a0) + fsd f31, KVM_ARCH_FP_D_F31(a0) + sw t0, KVM_ARCH_FP_D_FCSR(a0) + csrw CSR_SSTATUS, t2 + ret + + .align 3 + .global __kvm_riscv_fp_f_restore +__kvm_riscv_fp_f_restore: + csrr t2, CSR_SSTATUS + li t1, SR_FS + lw t0, KVM_ARCH_FP_F_FCSR(a0) + csrs CSR_SSTATUS, t1 + flw f0, KVM_ARCH_FP_F_F0(a0) + flw f1, KVM_ARCH_FP_F_F1(a0) + flw f2, KVM_ARCH_FP_F_F2(a0) + flw f3, KVM_ARCH_FP_F_F3(a0) + flw f4, KVM_ARCH_FP_F_F4(a0) + flw f5, KVM_ARCH_FP_F_F5(a0) + flw f6, KVM_ARCH_FP_F_F6(a0) + flw f7, KVM_ARCH_FP_F_F7(a0) + flw f8, KVM_ARCH_FP_F_F8(a0) + flw f9, KVM_ARCH_FP_F_F9(a0) + flw f10, KVM_ARCH_FP_F_F10(a0) + flw f11, KVM_ARCH_FP_F_F11(a0) + flw f12, KVM_ARCH_FP_F_F12(a0) + flw f13, KVM_ARCH_FP_F_F13(a0) + flw f14, KVM_ARCH_FP_F_F14(a0) + flw f15, KVM_ARCH_FP_F_F15(a0) + flw f16, KVM_ARCH_FP_F_F16(a0) + flw f17, KVM_ARCH_FP_F_F17(a0) + flw f18, KVM_ARCH_FP_F_F18(a0) + flw f19, KVM_ARCH_FP_F_F19(a0) + flw f20, KVM_ARCH_FP_F_F20(a0) + flw f21, KVM_ARCH_FP_F_F21(a0) + flw f22, KVM_ARCH_FP_F_F22(a0) + flw f23, KVM_ARCH_FP_F_F23(a0) + flw f24, KVM_ARCH_FP_F_F24(a0) + flw f25, KVM_ARCH_FP_F_F25(a0) + flw f26, KVM_ARCH_FP_F_F26(a0) + flw f27, KVM_ARCH_FP_F_F27(a0) + flw f28, KVM_ARCH_FP_F_F28(a0) + flw f29, KVM_ARCH_FP_F_F29(a0) + flw f30, KVM_ARCH_FP_F_F30(a0) + flw f31, KVM_ARCH_FP_F_F31(a0) + fscsr t0 + csrw CSR_SSTATUS, t2 + ret + + .align 3 + .global __kvm_riscv_fp_d_restore +__kvm_riscv_fp_d_restore: + csrr t2, CSR_SSTATUS + li t1, SR_FS + lw t0, KVM_ARCH_FP_D_FCSR(a0) + csrs CSR_SSTATUS, t1 + fld f0, KVM_ARCH_FP_D_F0(a0) + fld f1, KVM_ARCH_FP_D_F1(a0) + fld f2, KVM_ARCH_FP_D_F2(a0) + fld f3, KVM_ARCH_FP_D_F3(a0) + fld f4, KVM_ARCH_FP_D_F4(a0) + fld f5, KVM_ARCH_FP_D_F5(a0) + fld f6, KVM_ARCH_FP_D_F6(a0) + fld f7, KVM_ARCH_FP_D_F7(a0) + fld f8, KVM_ARCH_FP_D_F8(a0) + fld f9, KVM_ARCH_FP_D_F9(a0) + fld f10, KVM_ARCH_FP_D_F10(a0) + fld f11, KVM_ARCH_FP_D_F11(a0) + fld f12, KVM_ARCH_FP_D_F12(a0) + fld f13, KVM_ARCH_FP_D_F13(a0) + fld f14, KVM_ARCH_FP_D_F14(a0) + fld f15, KVM_ARCH_FP_D_F15(a0) + fld f16, KVM_ARCH_FP_D_F16(a0) + fld f17, KVM_ARCH_FP_D_F17(a0) + fld f18, KVM_ARCH_FP_D_F18(a0) + fld f19, KVM_ARCH_FP_D_F19(a0) + fld f20, KVM_ARCH_FP_D_F20(a0) + fld f21, KVM_ARCH_FP_D_F21(a0) + fld f22, KVM_ARCH_FP_D_F22(a0) + fld f23, KVM_ARCH_FP_D_F23(a0) + fld f24, KVM_ARCH_FP_D_F24(a0) + fld f25, KVM_ARCH_FP_D_F25(a0) + fld f26, KVM_ARCH_FP_D_F26(a0) + fld f27, KVM_ARCH_FP_D_F27(a0) + fld f28, KVM_ARCH_FP_D_F28(a0) + fld f29, KVM_ARCH_FP_D_F29(a0) + fld f30, KVM_ARCH_FP_D_F30(a0) + fld f31, KVM_ARCH_FP_D_F31(a0) + fscsr t0 + csrw CSR_SSTATUS, t2 + ret +#endif -- cgit v1.2.3 From 4d9c5c072f03770516cb343bb3ddd42b57cd21b8 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Mon, 27 Sep 2021 17:10:13 +0530 Subject: RISC-V: KVM: Implement ONE REG interface for FP registers Add a KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctl interface for floating point registers such as F0-F31 and FCSR. This support is added for both 'F' and 'D' extensions. Signed-off-by: Atish Patra Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Reviewed-by: Alexander Graf Acked-by: Palmer Dabbelt --- arch/riscv/include/uapi/asm/kvm.h | 10 ++++ arch/riscv/kvm/vcpu.c | 104 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 114 insertions(+) diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 08691dd27bcf..f808ad1ce500 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -113,6 +113,16 @@ struct kvm_riscv_timer { #define KVM_REG_RISCV_TIMER_REG(name) \ (offsetof(struct kvm_riscv_timer, name) / sizeof(__u64)) +/* F extension registers are mapped as type 5 */ +#define KVM_REG_RISCV_FP_F (0x05 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_FP_F_REG(name) \ + (offsetof(struct __riscv_f_ext_state, name) / sizeof(__u32)) + +/* D extension registers are mapped as type 6 */ +#define KVM_REG_RISCV_FP_D (0x06 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_FP_D_REG(name) \ + (offsetof(struct __riscv_d_ext_state, name) / sizeof(__u64)) + #endif #endif /* __LINUX_KVM_RISCV_H */ diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index cf6832365345..5acec47236c9 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -414,6 +414,98 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, return 0; } +static int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype) +{ + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + unsigned long isa = vcpu->arch.isa; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + rtype); + void *reg_val; + + if ((rtype == KVM_REG_RISCV_FP_F) && + riscv_isa_extension_available(&isa, f)) { + if (KVM_REG_SIZE(reg->id) != sizeof(u32)) + return -EINVAL; + if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) + reg_val = &cntx->fp.f.fcsr; + else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && + reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) + reg_val = &cntx->fp.f.f[reg_num]; + else + return -EINVAL; + } else if ((rtype == KVM_REG_RISCV_FP_D) && + riscv_isa_extension_available(&isa, d)) { + if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) { + if (KVM_REG_SIZE(reg->id) != sizeof(u32)) + return -EINVAL; + reg_val = &cntx->fp.d.fcsr; + } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) && + reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { + if (KVM_REG_SIZE(reg->id) != sizeof(u64)) + return -EINVAL; + reg_val = &cntx->fp.d.f[reg_num]; + } else + return -EINVAL; + } else + return -EINVAL; + + if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + +static int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype) +{ + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + unsigned long isa = vcpu->arch.isa; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + rtype); + void *reg_val; + + if ((rtype == KVM_REG_RISCV_FP_F) && + riscv_isa_extension_available(&isa, f)) { + if (KVM_REG_SIZE(reg->id) != sizeof(u32)) + return -EINVAL; + if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) + reg_val = &cntx->fp.f.fcsr; + else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && + reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) + reg_val = &cntx->fp.f.f[reg_num]; + else + return -EINVAL; + } else if ((rtype == KVM_REG_RISCV_FP_D) && + riscv_isa_extension_available(&isa, d)) { + if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) { + if (KVM_REG_SIZE(reg->id) != sizeof(u32)) + return -EINVAL; + reg_val = &cntx->fp.d.fcsr; + } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) && + reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { + if (KVM_REG_SIZE(reg->id) != sizeof(u64)) + return -EINVAL; + reg_val = &cntx->fp.d.f[reg_num]; + } else + return -EINVAL; + } else + return -EINVAL; + + if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { @@ -425,6 +517,12 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, return kvm_riscv_vcpu_set_reg_csr(vcpu, reg); else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER) return kvm_riscv_vcpu_set_reg_timer(vcpu, reg); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F) + return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, + KVM_REG_RISCV_FP_F); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D) + return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, + KVM_REG_RISCV_FP_D); return -EINVAL; } @@ -440,6 +538,12 @@ static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, return kvm_riscv_vcpu_get_reg_csr(vcpu, reg); else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER) return kvm_riscv_vcpu_get_reg_timer(vcpu, reg); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F) + return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, + KVM_REG_RISCV_FP_F); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D) + return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, + KVM_REG_RISCV_FP_D); return -EINVAL; } -- cgit v1.2.3 From dea8ee31a039277576c215fffa13957970246366 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Mon, 27 Sep 2021 17:10:14 +0530 Subject: RISC-V: KVM: Add SBI v0.1 support The KVM host kernel is running in HS-mode needs so we need to handle the SBI calls coming from guest kernel running in VS-mode. This patch adds SBI v0.1 support in KVM RISC-V. Almost all SBI v0.1 calls are implemented in KVM kernel module except GETCHAR and PUTCHART calls which are forwarded to user space because these calls cannot be implemented in kernel space. In future, when we implement SBI v0.2 for Guest, we will forward SBI v0.2 experimental and vendor extension calls to user space. Signed-off-by: Atish Patra Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Acked-by: Palmer Dabbelt --- arch/riscv/include/asm/kvm_host.h | 10 +++ arch/riscv/kvm/Makefile | 1 + arch/riscv/kvm/vcpu.c | 9 ++ arch/riscv/kvm/vcpu_exit.c | 4 + arch/riscv/kvm/vcpu_sbi.c | 185 ++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 ++ 6 files changed, 217 insertions(+) create mode 100644 arch/riscv/kvm/vcpu_sbi.c diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index be159686da46..d7e1696cd2ec 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -74,6 +74,10 @@ struct kvm_mmio_decode { int return_handled; }; +struct kvm_sbi_context { + int return_handled; +}; + #define KVM_MMU_PAGE_CACHE_NR_OBJS 32 struct kvm_mmu_page_cache { @@ -186,6 +190,9 @@ struct kvm_vcpu_arch { /* MMIO instruction details */ struct kvm_mmio_decode mmio_decode; + /* SBI context */ + struct kvm_sbi_context sbi_context; + /* Cache pages needed to program page tables with spinlock held */ struct kvm_mmu_page_cache mmu_page_cache; @@ -253,4 +260,7 @@ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask); void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run); + #endif /* __RISCV_KVM_HOST_H__ */ diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 4beb4e277e96..3226696b8340 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -21,4 +21,5 @@ kvm-y += mmu.o kvm-y += vcpu.o kvm-y += vcpu_exit.o kvm-y += vcpu_switch.o +kvm-y += vcpu_sbi.o kvm-y += vcpu_timer.o diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 5acec47236c9..c44cabce7dd8 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -867,6 +867,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) } } + /* Process SBI value returned from user-space */ + if (run->exit_reason == KVM_EXIT_RISCV_SBI) { + ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run); + if (ret) { + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); + return ret; + } + } + if (run->immediate_exit) { srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); return -EINTR; diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c index f659be94231d..13bbc3f73713 100644 --- a/arch/riscv/kvm/vcpu_exit.c +++ b/arch/riscv/kvm/vcpu_exit.c @@ -678,6 +678,10 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) ret = stage2_page_fault(vcpu, run, trap); break; + case EXC_SUPERVISOR_SYSCALL: + if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) + ret = kvm_riscv_vcpu_sbi_ecall(vcpu, run); + break; default: break; }; diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c new file mode 100644 index 000000000000..ebdcdbade9c6 --- /dev/null +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (c) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra + */ + +#include +#include +#include +#include +#include +#include + +#define SBI_VERSION_MAJOR 0 +#define SBI_VERSION_MINOR 1 + +static void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, + struct kvm_run *run) +{ + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + + vcpu->arch.sbi_context.return_handled = 0; + vcpu->stat.ecall_exit_stat++; + run->exit_reason = KVM_EXIT_RISCV_SBI; + run->riscv_sbi.extension_id = cp->a7; + run->riscv_sbi.function_id = cp->a6; + run->riscv_sbi.args[0] = cp->a0; + run->riscv_sbi.args[1] = cp->a1; + run->riscv_sbi.args[2] = cp->a2; + run->riscv_sbi.args[3] = cp->a3; + run->riscv_sbi.args[4] = cp->a4; + run->riscv_sbi.args[5] = cp->a5; + run->riscv_sbi.ret[0] = cp->a0; + run->riscv_sbi.ret[1] = cp->a1; +} + +int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + + /* Handle SBI return only once */ + if (vcpu->arch.sbi_context.return_handled) + return 0; + vcpu->arch.sbi_context.return_handled = 1; + + /* Update return values */ + cp->a0 = run->riscv_sbi.ret[0]; + cp->a1 = run->riscv_sbi.ret[1]; + + /* Move to next instruction */ + vcpu->arch.guest_context.sepc += 4; + + return 0; +} + +#ifdef CONFIG_RISCV_SBI_V01 + +static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, + struct kvm_run *run, u32 type) +{ + int i; + struct kvm_vcpu *tmp; + + kvm_for_each_vcpu(i, tmp, vcpu->kvm) + tmp->arch.power_off = true; + kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); + + memset(&run->system_event, 0, sizeof(run->system_event)); + run->system_event.type = type; + run->exit_reason = KVM_EXIT_SYSTEM_EVENT; +} + +int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + ulong hmask; + int i, ret = 1; + u64 next_cycle; + struct kvm_vcpu *rvcpu; + bool next_sepc = true; + struct cpumask cm, hm; + struct kvm *kvm = vcpu->kvm; + struct kvm_cpu_trap utrap = { 0 }; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + + if (!cp) + return -EINVAL; + + switch (cp->a7) { + case SBI_EXT_0_1_CONSOLE_GETCHAR: + case SBI_EXT_0_1_CONSOLE_PUTCHAR: + /* + * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be + * handled in kernel so we forward these to user-space + */ + kvm_riscv_vcpu_sbi_forward(vcpu, run); + next_sepc = false; + ret = 0; + break; + case SBI_EXT_0_1_SET_TIMER: +#if __riscv_xlen == 32 + next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0; +#else + next_cycle = (u64)cp->a0; +#endif + kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle); + break; + case SBI_EXT_0_1_CLEAR_IPI: + kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT); + break; + case SBI_EXT_0_1_SEND_IPI: + if (cp->a0) + hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, + &utrap); + else + hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; + if (utrap.scause) { + utrap.sepc = cp->sepc; + kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); + next_sepc = false; + break; + } + for_each_set_bit(i, &hmask, BITS_PER_LONG) { + rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); + kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT); + } + break; + case SBI_EXT_0_1_SHUTDOWN: + kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN); + next_sepc = false; + ret = 0; + break; + case SBI_EXT_0_1_REMOTE_FENCE_I: + case SBI_EXT_0_1_REMOTE_SFENCE_VMA: + case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID: + if (cp->a0) + hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, + &utrap); + else + hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; + if (utrap.scause) { + utrap.sepc = cp->sepc; + kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); + next_sepc = false; + break; + } + cpumask_clear(&cm); + for_each_set_bit(i, &hmask, BITS_PER_LONG) { + rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); + if (rvcpu->cpu < 0) + continue; + cpumask_set_cpu(rvcpu->cpu, &cm); + } + riscv_cpuid_to_hartid_mask(&cm, &hm); + if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I) + sbi_remote_fence_i(cpumask_bits(&hm)); + else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) + sbi_remote_hfence_vvma(cpumask_bits(&hm), + cp->a1, cp->a2); + else + sbi_remote_hfence_vvma_asid(cpumask_bits(&hm), + cp->a1, cp->a2, cp->a3); + break; + default: + /* Return error for unsupported SBI calls */ + cp->a0 = SBI_ERR_NOT_SUPPORTED; + break; + }; + + if (next_sepc) + cp->sepc += 4; + + return ret; +} + +#else + +int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + kvm_riscv_vcpu_sbi_forward(vcpu, run); + return 0; +} + +#endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a067410ebea5..322b4b588d75 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -269,6 +269,7 @@ struct kvm_xen_exit { #define KVM_EXIT_AP_RESET_HOLD 32 #define KVM_EXIT_X86_BUS_LOCK 33 #define KVM_EXIT_XEN 34 +#define KVM_EXIT_RISCV_SBI 35 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -469,6 +470,13 @@ struct kvm_run { } msr; /* KVM_EXIT_XEN */ struct kvm_xen_exit xen; + /* KVM_EXIT_RISCV_SBI */ + struct { + unsigned long extension_id; + unsigned long function_id; + unsigned long args[6]; + unsigned long ret[2]; + } riscv_sbi; /* Fix the size of the union. */ char padding[256]; }; -- cgit v1.2.3 From da40d85805937d6721b430dacfa2aaa44e3dcfb5 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 27 Sep 2021 17:10:15 +0530 Subject: RISC-V: KVM: Document RISC-V specific parts of KVM API Document RISC-V specific parts of the KVM API, such as: - The interrupt numbers passed to the KVM_INTERRUPT ioctl. - The states supported by the KVM_{GET,SET}_MP_STATE ioctls. - The registers supported by the KVM_{GET,SET}_ONE_REG interface and the encoding of those register ids. - The exit reason KVM_EXIT_RISCV_SBI for SBI calls forwarded to userspace tool. CC: Jonathan Corbet CC: linux-doc@vger.kernel.org Signed-off-by: Anup Patel Acked-by: Palmer Dabbelt --- Documentation/virt/kvm/api.rst | 193 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 184 insertions(+), 9 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index a6729c8cf063..0c0bf26426b3 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -532,7 +532,7 @@ translation mode. ------------------ :Capability: basic -:Architectures: x86, ppc, mips +:Architectures: x86, ppc, mips, riscv :Type: vcpu ioctl :Parameters: struct kvm_interrupt (in) :Returns: 0 on success, negative on failure. @@ -601,6 +601,23 @@ interrupt number dequeues the interrupt. This is an asynchronous vcpu ioctl and can be invoked from any thread. +RISC-V: +^^^^^^^ + +Queues an external interrupt to be injected into the virutal CPU. This ioctl +is overloaded with 2 different irq values: + +a) KVM_INTERRUPT_SET + + This sets external interrupt for a virtual CPU and it will receive + once it is ready. + +b) KVM_INTERRUPT_UNSET + + This clears pending external interrupt for a virtual CPU. + +This is an asynchronous vcpu ioctl and can be invoked from any thread. + 4.17 KVM_DEBUG_GUEST -------------------- @@ -1399,7 +1416,7 @@ for vm-wide capabilities. --------------------- :Capability: KVM_CAP_MP_STATE -:Architectures: x86, s390, arm, arm64 +:Architectures: x86, s390, arm, arm64, riscv :Type: vcpu ioctl :Parameters: struct kvm_mp_state (out) :Returns: 0 on success; -1 on error @@ -1416,7 +1433,8 @@ uniprocessor guests). Possible values are: ========================== =============================================== - KVM_MP_STATE_RUNNABLE the vcpu is currently running [x86,arm/arm64] + KVM_MP_STATE_RUNNABLE the vcpu is currently running + [x86,arm/arm64,riscv] KVM_MP_STATE_UNINITIALIZED the vcpu is an application processor (AP) which has not yet received an INIT signal [x86] KVM_MP_STATE_INIT_RECEIVED the vcpu has received an INIT signal, and is @@ -1425,7 +1443,7 @@ Possible values are: is waiting for an interrupt [x86] KVM_MP_STATE_SIPI_RECEIVED the vcpu has just received a SIPI (vector accessible via KVM_GET_VCPU_EVENTS) [x86] - KVM_MP_STATE_STOPPED the vcpu is stopped [s390,arm/arm64] + KVM_MP_STATE_STOPPED the vcpu is stopped [s390,arm/arm64,riscv] KVM_MP_STATE_CHECK_STOP the vcpu is in a special error state [s390] KVM_MP_STATE_OPERATING the vcpu is operating (running or halted) [s390] @@ -1437,8 +1455,8 @@ On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel irqchip, the multiprocessing state must be maintained by userspace on these architectures. -For arm/arm64: -^^^^^^^^^^^^^^ +For arm/arm64/riscv: +^^^^^^^^^^^^^^^^^^^^ The only states that are valid are KVM_MP_STATE_STOPPED and KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not. @@ -1447,7 +1465,7 @@ KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not. --------------------- :Capability: KVM_CAP_MP_STATE -:Architectures: x86, s390, arm, arm64 +:Architectures: x86, s390, arm, arm64, riscv :Type: vcpu ioctl :Parameters: struct kvm_mp_state (in) :Returns: 0 on success; -1 on error @@ -1459,8 +1477,8 @@ On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel irqchip, the multiprocessing state must be maintained by userspace on these architectures. -For arm/arm64: -^^^^^^^^^^^^^^ +For arm/arm64/riscv: +^^^^^^^^^^^^^^^^^^^^ The only states that are valid are KVM_MP_STATE_STOPPED and KVM_MP_STATE_RUNNABLE which reflect if the vcpu should be paused or not. @@ -2577,6 +2595,144 @@ following id bit patterns:: 0x7020 0000 0003 02 <0:3> +RISC-V registers are mapped using the lower 32 bits. The upper 8 bits of +that is the register group type. + +RISC-V config registers are meant for configuring a Guest VCPU and it has +the following id bit patterns:: + + 0x8020 0000 01 (32bit Host) + 0x8030 0000 01 (64bit Host) + +Following are the RISC-V config registers: + +======================= ========= ============================================= + Encoding Register Description +======================= ========= ============================================= + 0x80x0 0000 0100 0000 isa ISA feature bitmap of Guest VCPU +======================= ========= ============================================= + +The isa config register can be read anytime but can only be written before +a Guest VCPU runs. It will have ISA feature bits matching underlying host +set by default. + +RISC-V core registers represent the general excution state of a Guest VCPU +and it has the following id bit patterns:: + + 0x8020 0000 02 (32bit Host) + 0x8030 0000 02 (64bit Host) + +Following are the RISC-V core registers: + +======================= ========= ============================================= + Encoding Register Description +======================= ========= ============================================= + 0x80x0 0000 0200 0000 regs.pc Program counter + 0x80x0 0000 0200 0001 regs.ra Return address + 0x80x0 0000 0200 0002 regs.sp Stack pointer + 0x80x0 0000 0200 0003 regs.gp Global pointer + 0x80x0 0000 0200 0004 regs.tp Task pointer + 0x80x0 0000 0200 0005 regs.t0 Caller saved register 0 + 0x80x0 0000 0200 0006 regs.t1 Caller saved register 1 + 0x80x0 0000 0200 0007 regs.t2 Caller saved register 2 + 0x80x0 0000 0200 0008 regs.s0 Callee saved register 0 + 0x80x0 0000 0200 0009 regs.s1 Callee saved register 1 + 0x80x0 0000 0200 000a regs.a0 Function argument (or return value) 0 + 0x80x0 0000 0200 000b regs.a1 Function argument (or return value) 1 + 0x80x0 0000 0200 000c regs.a2 Function argument 2 + 0x80x0 0000 0200 000d regs.a3 Function argument 3 + 0x80x0 0000 0200 000e regs.a4 Function argument 4 + 0x80x0 0000 0200 000f regs.a5 Function argument 5 + 0x80x0 0000 0200 0010 regs.a6 Function argument 6 + 0x80x0 0000 0200 0011 regs.a7 Function argument 7 + 0x80x0 0000 0200 0012 regs.s2 Callee saved register 2 + 0x80x0 0000 0200 0013 regs.s3 Callee saved register 3 + 0x80x0 0000 0200 0014 regs.s4 Callee saved register 4 + 0x80x0 0000 0200 0015 regs.s5 Callee saved register 5 + 0x80x0 0000 0200 0016 regs.s6 Callee saved register 6 + 0x80x0 0000 0200 0017 regs.s7 Callee saved register 7 + 0x80x0 0000 0200 0018 regs.s8 Callee saved register 8 + 0x80x0 0000 0200 0019 regs.s9 Callee saved register 9 + 0x80x0 0000 0200 001a regs.s10 Callee saved register 10 + 0x80x0 0000 0200 001b regs.s11 Callee saved register 11 + 0x80x0 0000 0200 001c regs.t3 Caller saved register 3 + 0x80x0 0000 0200 001d regs.t4 Caller saved register 4 + 0x80x0 0000 0200 001e regs.t5 Caller saved register 5 + 0x80x0 0000 0200 001f regs.t6 Caller saved register 6 + 0x80x0 0000 0200 0020 mode Privilege mode (1 = S-mode or 0 = U-mode) +======================= ========= ============================================= + +RISC-V csr registers represent the supervisor mode control/status registers +of a Guest VCPU and it has the following id bit patterns:: + + 0x8020 0000 03 (32bit Host) + 0x8030 0000 03 (64bit Host) + +Following are the RISC-V csr registers: + +======================= ========= ============================================= + Encoding Register Description +======================= ========= ============================================= + 0x80x0 0000 0300 0000 sstatus Supervisor status + 0x80x0 0000 0300 0001 sie Supervisor interrupt enable + 0x80x0 0000 0300 0002 stvec Supervisor trap vector base + 0x80x0 0000 0300 0003 sscratch Supervisor scratch register + 0x80x0 0000 0300 0004 sepc Supervisor exception program counter + 0x80x0 0000 0300 0005 scause Supervisor trap cause + 0x80x0 0000 0300 0006 stval Supervisor bad address or instruction + 0x80x0 0000 0300 0007 sip Supervisor interrupt pending + 0x80x0 0000 0300 0008 satp Supervisor address translation and protection +======================= ========= ============================================= + +RISC-V timer registers represent the timer state of a Guest VCPU and it has +the following id bit patterns:: + + 0x8030 0000 04 + +Following are the RISC-V timer registers: + +======================= ========= ============================================= + Encoding Register Description +======================= ========= ============================================= + 0x8030 0000 0400 0000 frequency Time base frequency (read-only) + 0x8030 0000 0400 0001 time Time value visible to Guest + 0x8030 0000 0400 0002 compare Time compare programmed by Guest + 0x8030 0000 0400 0003 state Time compare state (1 = ON or 0 = OFF) +======================= ========= ============================================= + +RISC-V F-extension registers represent the single precision floating point +state of a Guest VCPU and it has the following id bit patterns:: + + 0x8020 0000 05 + +Following are the RISC-V F-extension registers: + +======================= ========= ============================================= + Encoding Register Description +======================= ========= ============================================= + 0x8020 0000 0500 0000 f[0] Floating point register 0 + ... + 0x8020 0000 0500 001f f[31] Floating point register 31 + 0x8020 0000 0500 0020 fcsr Floating point control and status register +======================= ========= ============================================= + +RISC-V D-extension registers represent the double precision floating point +state of a Guest VCPU and it has the following id bit patterns:: + + 0x8020 0000 06 (fcsr) + 0x8030 0000 06 (non-fcsr) + +Following are the RISC-V D-extension registers: + +======================= ========= ============================================= + Encoding Register Description +======================= ========= ============================================= + 0x8030 0000 0600 0000 f[0] Floating point register 0 + ... + 0x8030 0000 0600 001f f[31] Floating point register 31 + 0x8020 0000 0600 0020 fcsr Floating point control and status register +======================= ========= ============================================= + 4.69 KVM_GET_ONE_REG -------------------- @@ -5848,6 +6004,25 @@ Valid values for 'type' are: Userspace is expected to place the hypercall result into the appropriate field before invoking KVM_RUN again. +:: + + /* KVM_EXIT_RISCV_SBI */ + struct { + unsigned long extension_id; + unsigned long function_id; + unsigned long args[6]; + unsigned long ret[2]; + } riscv_sbi; +If exit reason is KVM_EXIT_RISCV_SBI then it indicates that the VCPU has +done a SBI call which is not handled by KVM RISC-V kernel module. The details +of the SBI call are available in 'riscv_sbi' member of kvm_run structure. The +'extension_id' field of 'riscv_sbi' represents SBI extension ID whereas the +'function_id' field represents function ID of given SBI extension. The 'args' +array field of 'riscv_sbi' represents parameters for the SBI call and 'ret' +array field represents return values. The userspace should update the return +values of SBI call before resuming the VCPU. For more details on RISC-V SBI +spec refer, https://github.com/riscv/riscv-sbi-doc. + :: /* Fix the size of the union. */ -- cgit v1.2.3 From 24b699d12c34cfc907de9fe3989a122b7b13391c Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 27 Sep 2021 17:10:16 +0530 Subject: RISC-V: KVM: Add MAINTAINERS entry Add myself as maintainer for KVM RISC-V and Atish as designated reviewer. Signed-off-by: Atish Patra Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Reviewed-by: Alexander Graf Acked-by: Palmer Dabbelt --- MAINTAINERS | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index ca6d6fde85cf..faaf9780af6d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10270,6 +10270,18 @@ F: arch/powerpc/include/uapi/asm/kvm* F: arch/powerpc/kernel/kvm* F: arch/powerpc/kvm/ +KERNEL VIRTUAL MACHINE FOR RISC-V (KVM/riscv) +M: Anup Patel +R: Atish Patra +L: kvm@vger.kernel.org +L: kvm-riscv@lists.infradead.org +L: linux-riscv@lists.infradead.org +S: Maintained +T: git git://github.com/kvm-riscv/linux.git +F: arch/riscv/include/asm/kvm* +F: arch/riscv/include/uapi/asm/kvm* +F: arch/riscv/kvm/ + KERNEL VIRTUAL MACHINE for s390 (KVM/s390) M: Christian Borntraeger M: Janosch Frank -- cgit v1.2.3 From a78738ed1d9bf40d09109599b884508c69d188b8 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 8 Oct 2021 14:58:34 +0100 Subject: KVM: arm64: Turn __KVM_HOST_SMCCC_FUNC_* into an enum (mostly) __KVM_HOST_SMCCC_FUNC_* is a royal pain, as there is a fair amount of churn around these #defines, and we avoid making it an enum only for the sake of the early init, low level code that requires __KVM_HOST_SMCCC_FUNC___kvm_hyp_init to be usable from assembly. Let's be brave and turn everything but this symbol into an enum, using a bit of arithmetic to avoid any overlap. Acked-by: Will Deacon Link: https://lore.kernel.org/r/877depq9gw.wl-maz@kernel.org Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon Link: https://lore.kernel.org/r/20211008135839.1193-2-will@kernel.org --- arch/arm64/include/asm/kvm_asm.h | 44 ++++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index e86045ac43ba..43b5e213ae43 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -44,31 +44,35 @@ #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name) #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0 -#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run 1 -#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2 -#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3 -#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4 -#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5 -#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6 -#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7 -#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config 8 -#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9 -#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10 -#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11 -#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 12 -#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13 -#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14 -#define __KVM_HOST_SMCCC_FUNC___pkvm_init 15 -#define __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp 16 -#define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping 17 -#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18 -#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19 -#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 20 #ifndef __ASSEMBLY__ #include +enum __kvm_host_smccc_func { + /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */ + __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1, + __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context, + __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa, + __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid, + __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context, + __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff, + __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs, + __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config, + __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr, + __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr, + __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs, + __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2, + __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs, + __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs, + __KVM_HOST_SMCCC_FUNC___pkvm_init, + __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp, + __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping, + __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector, + __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize, + __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc, +}; + #define DECLARE_KVM_VHE_SYM(sym) extern char sym[] #define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[] -- cgit v1.2.3 From 8f4566f18db5d1257fc2d5442e16274424a529c1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 8 Oct 2021 14:58:35 +0100 Subject: arm64: Prevent kexec and hibernation if is_protected_kvm_enabled() When pKVM is enabled, the hypervisor code at EL2 and its data structures are inaccessible to the host kernel and cannot be torn down or replaced as this would defeat the integrity properies which pKVM aims to provide. Furthermore, the ABI between the host and EL2 is flexible and private to whatever the current implementation of KVM requires and so booting a new kernel with an old EL2 component is very likely to end in disaster. In preparation for uninstalling the hyp stub calls which are relied upon to reset EL2, disable kexec and hibernation in the host when protected KVM is enabled. Cc: Marc Zyngier Cc: Quentin Perret Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211008135839.1193-3-will@kernel.org --- arch/arm64/kernel/smp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 6f6ff072acbd..44369b99a57e 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -1128,5 +1128,6 @@ bool cpus_are_stuck_in_kernel(void) { bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die()); - return !!cpus_stuck_in_kernel || smp_spin_tables; + return !!cpus_stuck_in_kernel || smp_spin_tables || + is_protected_kvm_enabled(); } -- cgit v1.2.3 From 8579a185bacaa64c65e43e251ceede2f7600f7e2 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 8 Oct 2021 14:58:36 +0100 Subject: KVM: arm64: Reject stub hypercalls after pKVM has been initialised The stub hypercalls provide mechanisms to reset and replace the EL2 code, so uninstall them once pKVM has been initialised in order to ensure the integrity of the hypervisor code. To ensure pKVM initialisation remains functional, split cpu_hyp_reinit() into two helper functions to separate usage of the stub from usage of pkvm hypercalls either side of __pkvm_init on the boot CPU. Cc: Marc Zyngier Cc: Quentin Perret Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211008135839.1193-4-will@kernel.org --- arch/arm64/kvm/arm.c | 31 +++++++++++++++++++++++-------- arch/arm64/kvm/hyp/nvhe/host.S | 26 +++++++++++++++++--------- 2 files changed, 40 insertions(+), 17 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index fe102cd2e518..9506cf88fa0e 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1579,25 +1579,33 @@ static void cpu_set_hyp_vector(void) kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot); } -static void cpu_hyp_reinit(void) +static void cpu_hyp_init_context(void) { kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt); - cpu_hyp_reset(); - - if (is_kernel_in_hyp_mode()) - kvm_timer_init_vhe(); - else + if (!is_kernel_in_hyp_mode()) cpu_init_hyp_mode(); +} +static void cpu_hyp_init_features(void) +{ cpu_set_hyp_vector(); - kvm_arm_init_debug(); + if (is_kernel_in_hyp_mode()) + kvm_timer_init_vhe(); + if (vgic_present) kvm_vgic_init_cpu_hardware(); } +static void cpu_hyp_reinit(void) +{ + cpu_hyp_reset(); + cpu_hyp_init_context(); + cpu_hyp_init_features(); +} + static void _kvm_arch_hardware_enable(void *discard) { if (!__this_cpu_read(kvm_arm_hardware_enabled)) { @@ -1788,10 +1796,17 @@ static int do_pkvm_init(u32 hyp_va_bits) int ret; preempt_disable(); - hyp_install_host_vector(); + cpu_hyp_init_context(); ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size, num_possible_cpus(), kern_hyp_va(per_cpu_base), hyp_va_bits); + cpu_hyp_init_features(); + + /* + * The stub hypercalls are now disabled, so set our local flag to + * prevent a later re-init attempt in kvm_arch_hardware_enable(). + */ + __this_cpu_write(kvm_arm_hardware_enabled, 1); preempt_enable(); return ret; diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index 4b652ffb591d..0c6116d34e18 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -110,17 +110,14 @@ SYM_FUNC_START(__hyp_do_panic) b __host_enter_for_panic SYM_FUNC_END(__hyp_do_panic) -.macro host_el1_sync_vect - .align 7 -.L__vect_start\@: - stp x0, x1, [sp, #-16]! - mrs x0, esr_el2 - lsr x0, x0, #ESR_ELx_EC_SHIFT - cmp x0, #ESR_ELx_EC_HVC64 - b.ne __host_exit - +SYM_FUNC_START(__host_hvc) ldp x0, x1, [sp] // Don't fixup the stack yet + /* No stub for you, sonny Jim */ +alternative_if ARM64_KVM_PROTECTED_MODE + b __host_exit +alternative_else_nop_endif + /* Check for a stub HVC call */ cmp x0, #HVC_STUB_HCALL_NR b.hs __host_exit @@ -137,6 +134,17 @@ SYM_FUNC_END(__hyp_do_panic) ldr x5, =__kvm_handle_stub_hvc hyp_pa x5, x6 br x5 +SYM_FUNC_END(__host_hvc) + +.macro host_el1_sync_vect + .align 7 +.L__vect_start\@: + stp x0, x1, [sp, #-16]! + mrs x0, esr_el2 + lsr x0, x0, #ESR_ELx_EC_SHIFT + cmp x0, #ESR_ELx_EC_HVC64 + b.eq __host_hvc + b __host_exit .L__vect_end\@: .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80) .error "host_el1_sync_vect larger than vector entry" -- cgit v1.2.3 From 2f2e1a5069679491d18cf9021da19b40c56a17f3 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 8 Oct 2021 14:58:37 +0100 Subject: KVM: arm64: Propagate errors from __pkvm_prot_finalize hypercall If the __pkvm_prot_finalize hypercall returns an error, we WARN but fail to propagate the failure code back to kvm_arch_init(). Pass a pointer to a zero-initialised return variable so that failure to finalise the pKVM protections on a host CPU can be reported back to KVM. Cc: Marc Zyngier Cc: Quentin Perret Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211008135839.1193-5-will@kernel.org --- arch/arm64/kvm/arm.c | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 9506cf88fa0e..13bbf35896cd 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1986,9 +1986,25 @@ out_err: return err; } -static void _kvm_host_prot_finalize(void *discard) +static void _kvm_host_prot_finalize(void *arg) { - WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)); + int *err = arg; + + if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize))) + WRITE_ONCE(*err, -EINVAL); +} + +static int pkvm_drop_host_privileges(void) +{ + int ret = 0; + + /* + * Flip the static key upfront as that may no longer be possible + * once the host stage 2 is installed. + */ + static_branch_enable(&kvm_protected_mode_initialized); + on_each_cpu(_kvm_host_prot_finalize, &ret, 1); + return ret; } static int finalize_hyp_mode(void) @@ -2002,15 +2018,7 @@ static int finalize_hyp_mode(void) * None of other sections should ever be introspected. */ kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start); - - /* - * Flip the static key upfront as that may no longer be possible - * once the host stage 2 is installed. - */ - static_branch_enable(&kvm_protected_mode_initialized); - on_each_cpu(_kvm_host_prot_finalize, NULL, 1); - - return 0; + return pkvm_drop_host_privileges(); } struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) -- cgit v1.2.3 From 07036cffe17ec07e8fb630d86f8ea21832d9e57d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 8 Oct 2021 14:58:38 +0100 Subject: KVM: arm64: Prevent re-finalisation of pKVM for a given CPU __pkvm_prot_finalize() completes the deprivilege of the host when pKVM is in use by installing a stage-2 translation table for the calling CPU. Issuing the hypercall multiple times for a given CPU makes little sense, but in such a case just return early with -EPERM rather than go through the whole page-table dance again. Cc: Marc Zyngier Cc: Quentin Perret Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211008135839.1193-6-will@kernel.org --- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index bacd493a4eac..cafe17e5fa8f 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -123,6 +123,9 @@ int __pkvm_prot_finalize(void) struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); + if (params->hcr_el2 & HCR_VM) + return -EPERM; + params->vttbr = kvm_get_vttbr(mmu); params->vtcr = host_kvm.arch.vtcr; params->hcr_el2 |= HCR_VM; -- cgit v1.2.3 From 057bed206f70d624c2eacb43ec56551950a26832 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 8 Oct 2021 14:58:39 +0100 Subject: KVM: arm64: Disable privileged hypercalls after pKVM finalisation After pKVM has been 'finalised' using the __pkvm_prot_finalize hypercall, the calling CPU will have a Stage-2 translation enabled to prevent access to memory pages owned by EL2. Although this forms a significant part of the process to deprivilege the host kernel, we also need to ensure that the hypercall interface is reduced so that the EL2 code cannot, for example, be re-initialised using a new set of vectors. Re-order the hypercalls so that only a suffix remains available after finalisation of pKVM. Cc: Marc Zyngier Cc: Quentin Perret Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211008135839.1193-7-will@kernel.org --- arch/arm64/include/asm/kvm_asm.h | 25 ++++++++++++++----------- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 37 ++++++++++++++++++++++++++----------- 2 files changed, 40 insertions(+), 22 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 43b5e213ae43..4654d27fd221 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -50,27 +50,30 @@ #include enum __kvm_host_smccc_func { + /* Hypercalls available only prior to pKVM finalisation */ /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */ - __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1, + __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1, + __KVM_HOST_SMCCC_FUNC___pkvm_init, + __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping, + __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector, + __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs, + __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs, + __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config, + __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize, + + /* Hypercalls available after pKVM finalisation */ + __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp, + __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc, + __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run, __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context, __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa, __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid, __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context, __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff, - __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs, - __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config, __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr, __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr, - __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs, - __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2, __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs, __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs, - __KVM_HOST_SMCCC_FUNC___pkvm_init, - __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp, - __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping, - __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector, - __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize, - __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc, }; #define DECLARE_KVM_VHE_SYM(sym) extern char sym[] diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 2da6aa8da868..8566805ef62c 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -165,36 +165,51 @@ typedef void (*hcall_t)(struct kvm_cpu_context *); #define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x static const hcall_t host_hcall[] = { - HANDLE_FUNC(__kvm_vcpu_run), + /* ___kvm_hyp_init */ + HANDLE_FUNC(__kvm_get_mdcr_el2), + HANDLE_FUNC(__pkvm_init), + HANDLE_FUNC(__pkvm_create_private_mapping), + HANDLE_FUNC(__pkvm_cpu_set_vector), + HANDLE_FUNC(__kvm_enable_ssbs), + HANDLE_FUNC(__vgic_v3_init_lrs), + HANDLE_FUNC(__vgic_v3_get_gic_config), + HANDLE_FUNC(__pkvm_prot_finalize), + + HANDLE_FUNC(__pkvm_host_share_hyp), HANDLE_FUNC(__kvm_adjust_pc), + HANDLE_FUNC(__kvm_vcpu_run), HANDLE_FUNC(__kvm_flush_vm_context), HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa), HANDLE_FUNC(__kvm_tlb_flush_vmid), HANDLE_FUNC(__kvm_flush_cpu_context), HANDLE_FUNC(__kvm_timer_set_cntvoff), - HANDLE_FUNC(__kvm_enable_ssbs), - HANDLE_FUNC(__vgic_v3_get_gic_config), HANDLE_FUNC(__vgic_v3_read_vmcr), HANDLE_FUNC(__vgic_v3_write_vmcr), - HANDLE_FUNC(__vgic_v3_init_lrs), - HANDLE_FUNC(__kvm_get_mdcr_el2), HANDLE_FUNC(__vgic_v3_save_aprs), HANDLE_FUNC(__vgic_v3_restore_aprs), - HANDLE_FUNC(__pkvm_init), - HANDLE_FUNC(__pkvm_cpu_set_vector), - HANDLE_FUNC(__pkvm_host_share_hyp), - HANDLE_FUNC(__pkvm_create_private_mapping), - HANDLE_FUNC(__pkvm_prot_finalize), }; static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(unsigned long, id, host_ctxt, 0); + unsigned long hcall_min = 0; hcall_t hfn; + /* + * If pKVM has been initialised then reject any calls to the + * early "privileged" hypercalls. Note that we cannot reject + * calls to __pkvm_prot_finalize for two reasons: (1) The static + * key used to determine initialisation must be toggled prior to + * finalisation and (2) finalisation is performed on a per-CPU + * basis. This is all fine, however, since __pkvm_prot_finalize + * returns -EPERM after the first call for a given CPU. + */ + if (static_branch_unlikely(&kvm_protected_mode_initialized)) + hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize; + id -= KVM_HOST_SMCCC_ID(0); - if (unlikely(id >= ARRAY_SIZE(host_hcall))) + if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall))) goto inval; hfn = host_hcall[id]; -- cgit v1.2.3 From f25c5e4dafd859b941a4654cbab9eb83ff994bcd Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 4 Oct 2021 18:19:11 -0700 Subject: kvm: arm64: vgic: Introduce vgic_check_iorange Add the new vgic_check_iorange helper that checks that an iorange is sane: the start address and size have valid alignments, the range is within the addressable PA range, start+size doesn't overflow, and the start wasn't already defined. No functional change. Reviewed-by: Eric Auger Signed-off-by: Ricardo Koller Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211005011921.437353-2-ricarkol@google.com --- arch/arm64/kvm/vgic/vgic-kvm-device.c | 22 ++++++++++++++++++++++ arch/arm64/kvm/vgic/vgic.h | 4 ++++ 2 files changed, 26 insertions(+) diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index 7740995de982..cc0ad227b380 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -29,6 +29,28 @@ int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, return 0; } +int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr, + phys_addr_t addr, phys_addr_t alignment, + phys_addr_t size) +{ + int ret; + + ret = vgic_check_ioaddr(kvm, &ioaddr, addr, alignment); + if (ret) + return ret; + + if (!IS_ALIGNED(size, alignment)) + return -EINVAL; + + if (addr + size < addr) + return -EINVAL; + + if (addr + size > kvm_phys_size(kvm)) + return -E2BIG; + + return 0; +} + static int vgic_check_type(struct kvm *kvm, int type_needed) { if (kvm->arch.vgic.vgic_model != type_needed) diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index 14a9218641f5..4be01c38e8f1 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -175,6 +175,10 @@ void vgic_irq_handle_resampling(struct vgic_irq *irq, int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, phys_addr_t addr, phys_addr_t alignment); +int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr, + phys_addr_t addr, phys_addr_t alignment, + phys_addr_t size); + void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu); void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); -- cgit v1.2.3 From 4612d98f58c73ad63928200fd332f75c8e524dae Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 4 Oct 2021 18:19:12 -0700 Subject: KVM: arm64: vgic-v3: Check redist region is not above the VM IPA size Verify that the redistributor regions do not extend beyond the VM-specified IPA range (phys_size). This can happen when using KVM_VGIC_V3_ADDR_TYPE_REDIST or KVM_VGIC_V3_ADDR_TYPE_REDIST_REGIONS with: base + size > phys_size AND base < phys_size Add the missing check into vgic_v3_alloc_redist_region() which is called when setting the regions, and into vgic_v3_check_base() which is called when attempting the first vcpu-run. The vcpu-run check does not apply to KVM_VGIC_V3_ADDR_TYPE_REDIST_REGIONS because the regions size is known before the first vcpu-run. Note that using the REDIST_REGIONS API results in a different check, which already exists, at first vcpu run: that the number of redist regions is enough for all vcpus. Finally, this patch also enables some extra tests in vgic_v3_alloc_redist_region() by calculating "size" early for the legacy redist api: like checking that the REDIST region can fit all the already created vcpus. Reviewed-by: Eric Auger Signed-off-by: Ricardo Koller Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211005011921.437353-3-ricarkol@google.com --- arch/arm64/kvm/vgic/vgic-mmio-v3.c | 6 ++++-- arch/arm64/kvm/vgic/vgic-v3.c | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index a09cdc0b953c..a9642fc71fdf 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -796,7 +796,9 @@ static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index, struct vgic_dist *d = &kvm->arch.vgic; struct vgic_redist_region *rdreg; struct list_head *rd_regions = &d->rd_regions; - size_t size = count * KVM_VGIC_V3_REDIST_SIZE; + int nr_vcpus = atomic_read(&kvm->online_vcpus); + size_t size = count ? count * KVM_VGIC_V3_REDIST_SIZE + : nr_vcpus * KVM_VGIC_V3_REDIST_SIZE; int ret; /* cross the end of memory ? */ @@ -840,7 +842,7 @@ static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index, rdreg->base = VGIC_ADDR_UNDEF; - ret = vgic_check_ioaddr(kvm, &rdreg->base, base, SZ_64K); + ret = vgic_check_iorange(kvm, rdreg->base, base, SZ_64K, size); if (ret) goto free; diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 21a6207fb2ee..960f51a8691f 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -483,8 +483,10 @@ bool vgic_v3_check_base(struct kvm *kvm) return false; list_for_each_entry(rdreg, &d->rd_regions, list) { - if (rdreg->base + vgic_v3_rd_region_size(kvm, rdreg) < - rdreg->base) + size_t sz = vgic_v3_rd_region_size(kvm, rdreg); + + if (vgic_check_iorange(kvm, VGIC_ADDR_UNDEF, + rdreg->base, SZ_64K, sz)) return false; } -- cgit v1.2.3 From c56a87da0a7fa14180082249ac954c7ebc9e74e1 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 4 Oct 2021 18:19:13 -0700 Subject: KVM: arm64: vgic-v2: Check cpu interface region is not above the VM IPA size Verify that the GICv2 CPU interface does not extend beyond the VM-specified IPA range (phys_size). base + size > phys_size AND base < phys_size Add the missing check into kvm_vgic_addr() which is called when setting the region. This patch also enables some superfluous checks for the distributor (vgic_check_ioaddr was enough as alignment == size for the distributors). Reviewed-by: Eric Auger Signed-off-by: Ricardo Koller Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211005011921.437353-4-ricarkol@google.com --- arch/arm64/kvm/vgic/vgic-kvm-device.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index cc0ad227b380..08ae34b1a986 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -79,7 +79,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) { int r = 0; struct vgic_dist *vgic = &kvm->arch.vgic; - phys_addr_t *addr_ptr, alignment; + phys_addr_t *addr_ptr, alignment, size; u64 undef_value = VGIC_ADDR_UNDEF; mutex_lock(&kvm->lock); @@ -88,16 +88,19 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); addr_ptr = &vgic->vgic_dist_base; alignment = SZ_4K; + size = KVM_VGIC_V2_DIST_SIZE; break; case KVM_VGIC_V2_ADDR_TYPE_CPU: r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); addr_ptr = &vgic->vgic_cpu_base; alignment = SZ_4K; + size = KVM_VGIC_V2_CPU_SIZE; break; case KVM_VGIC_V3_ADDR_TYPE_DIST: r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3); addr_ptr = &vgic->vgic_dist_base; alignment = SZ_64K; + size = KVM_VGIC_V3_DIST_SIZE; break; case KVM_VGIC_V3_ADDR_TYPE_REDIST: { struct vgic_redist_region *rdreg; @@ -162,7 +165,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) goto out; if (write) { - r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment); + r = vgic_check_iorange(kvm, *addr_ptr, *addr, alignment, size); if (!r) *addr_ptr = *addr; } else { -- cgit v1.2.3 From 2ec02f6c64f043a249850c835ca7975c3a155d8b Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 4 Oct 2021 18:19:14 -0700 Subject: KVM: arm64: vgic-v3: Check ITS region is not above the VM IPA size Verify that the ITS region does not extend beyond the VM-specified IPA range (phys_size). base + size > phys_size AND base < phys_size Add the missing check into vgic_its_set_attr() which is called when setting the region. Reviewed-by: Eric Auger Signed-off-by: Ricardo Koller Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211005011921.437353-5-ricarkol@google.com --- arch/arm64/kvm/vgic/vgic-its.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 61728c543eb9..ad55bb8cd30f 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -2710,8 +2710,8 @@ static int vgic_its_set_attr(struct kvm_device *dev, if (copy_from_user(&addr, uaddr, sizeof(addr))) return -EFAULT; - ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base, - addr, SZ_64K); + ret = vgic_check_iorange(dev->kvm, its->vgic_its_base, + addr, SZ_64K, KVM_VGIC_V3_ITS_SIZE); if (ret) return ret; -- cgit v1.2.3 From 96e903896969679104c7fef2c776ed1b5b09584f Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 4 Oct 2021 18:19:15 -0700 Subject: KVM: arm64: vgic: Drop vgic_check_ioaddr() There are no more users of vgic_check_ioaddr(). Move its checks to vgic_check_iorange() and then remove it. Signed-off-by: Ricardo Koller Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211005011921.437353-6-ricarkol@google.com --- arch/arm64/kvm/vgic/vgic-kvm-device.c | 26 ++++---------------------- arch/arm64/kvm/vgic/vgic.h | 3 --- 2 files changed, 4 insertions(+), 25 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index 08ae34b1a986..0d000d2fe8d2 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -14,38 +14,20 @@ /* common helpers */ -int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, - phys_addr_t addr, phys_addr_t alignment) -{ - if (addr & ~kvm_phys_mask(kvm)) - return -E2BIG; - - if (!IS_ALIGNED(addr, alignment)) - return -EINVAL; - - if (!IS_VGIC_ADDR_UNDEF(*ioaddr)) - return -EEXIST; - - return 0; -} - int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr, phys_addr_t addr, phys_addr_t alignment, phys_addr_t size) { - int ret; - - ret = vgic_check_ioaddr(kvm, &ioaddr, addr, alignment); - if (ret) - return ret; + if (!IS_VGIC_ADDR_UNDEF(ioaddr)) + return -EEXIST; - if (!IS_ALIGNED(size, alignment)) + if (!IS_ALIGNED(addr, alignment) || !IS_ALIGNED(size, alignment)) return -EINVAL; if (addr + size < addr) return -EINVAL; - if (addr + size > kvm_phys_size(kvm)) + if (addr & ~kvm_phys_mask(kvm) || addr + size > kvm_phys_size(kvm)) return -E2BIG; return 0; diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index 4be01c38e8f1..3fd6c86a7ef3 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -172,9 +172,6 @@ void vgic_kick_vcpus(struct kvm *kvm); void vgic_irq_handle_resampling(struct vgic_irq *irq, bool lr_deactivated, bool lr_pending); -int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, - phys_addr_t addr, phys_addr_t alignment); - int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr, phys_addr_t addr, phys_addr_t alignment, phys_addr_t size); -- cgit v1.2.3 From 3f4db37e203b0562d9ebae575af13ea159fbd077 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 4 Oct 2021 18:19:16 -0700 Subject: KVM: arm64: selftests: Make vgic_init gic version agnostic As a preparation for the next commits which will add some tests for GICv2, make aarch64/vgic_init GIC version agnostic. Add a new generic run_tests function(gic_dev_type) that starts all applicable tests using GICv3 or GICv2. GICv2 tests are attempted if GICv3 is not available in the system. There are currently no GICv2 tests, but the test passes now in GICv2 systems. Reviewed-by: Eric Auger Signed-off-by: Ricardo Koller Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211005011921.437353-7-ricarkol@google.com --- tools/testing/selftests/kvm/aarch64/vgic_init.c | 156 +++++++++++++++--------- 1 file changed, 95 insertions(+), 61 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c index 623f31a14326..896a29f2503d 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_init.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c @@ -22,6 +22,9 @@ #define GICR_TYPER 0x8 +#define VGIC_DEV_IS_V2(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V2) +#define VGIC_DEV_IS_V3(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V3) + struct vm_gic { struct kvm_vm *vm; int gic_fd; @@ -30,8 +33,8 @@ struct vm_gic { static int max_ipa_bits; /* helper to access a redistributor register */ -static int access_redist_reg(int gicv3_fd, int vcpu, int offset, - uint32_t *val, bool write) +static int access_v3_redist_reg(int gicv3_fd, int vcpu, int offset, + uint32_t *val, bool write) { uint64_t attr = REG_OFFSET(vcpu, offset); @@ -58,7 +61,7 @@ static int run_vcpu(struct kvm_vm *vm, uint32_t vcpuid) return 0; } -static struct vm_gic vm_gic_create(void) +static struct vm_gic vm_gic_v3_create(void) { struct vm_gic v; @@ -80,7 +83,7 @@ static void vm_gic_destroy(struct vm_gic *v) * device gets created, a legacy RDIST region is set at @0x0 * and a DIST region is set @0x60000 */ -static void subtest_dist_rdist(struct vm_gic *v) +static void subtest_v3_dist_rdist(struct vm_gic *v) { int ret; uint64_t addr; @@ -145,7 +148,7 @@ static void subtest_dist_rdist(struct vm_gic *v) } /* Test the new REDIST region API */ -static void subtest_redist_regions(struct vm_gic *v) +static void subtest_v3_redist_regions(struct vm_gic *v) { uint64_t addr, expected_addr; int ret; @@ -249,7 +252,7 @@ static void subtest_redist_regions(struct vm_gic *v) * VGIC KVM device is created and initialized before the secondary CPUs * get created */ -static void test_vgic_then_vcpus(void) +static void test_v3_vgic_then_vcpus(uint32_t gic_dev_type) { struct vm_gic v; int ret, i; @@ -257,7 +260,7 @@ static void test_vgic_then_vcpus(void) v.vm = vm_create_default(0, 0, guest_code); v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); - subtest_dist_rdist(&v); + subtest_v3_dist_rdist(&v); /* Add the rest of the VCPUs */ for (i = 1; i < NR_VCPUS; ++i) @@ -270,14 +273,14 @@ static void test_vgic_then_vcpus(void) } /* All the VCPUs are created before the VGIC KVM device gets initialized */ -static void test_vcpus_then_vgic(void) +static void test_v3_vcpus_then_vgic(uint32_t gic_dev_type) { struct vm_gic v; int ret; - v = vm_gic_create(); + v = vm_gic_v3_create(); - subtest_dist_rdist(&v); + subtest_v3_dist_rdist(&v); ret = run_vcpu(v.vm, 3); TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run"); @@ -285,15 +288,15 @@ static void test_vcpus_then_vgic(void) vm_gic_destroy(&v); } -static void test_new_redist_regions(void) +static void test_v3_new_redist_regions(void) { void *dummy = NULL; struct vm_gic v; uint64_t addr; int ret; - v = vm_gic_create(); - subtest_redist_regions(&v); + v = vm_gic_v3_create(); + subtest_v3_redist_regions(&v); kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true); @@ -303,8 +306,8 @@ static void test_new_redist_regions(void) /* step2 */ - v = vm_gic_create(); - subtest_redist_regions(&v); + v = vm_gic_v3_create(); + subtest_v3_redist_regions(&v); addr = REDIST_REGION_ATTR_ADDR(1, 0x280000, 0, 2); kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, @@ -317,8 +320,8 @@ static void test_new_redist_regions(void) /* step 3 */ - v = vm_gic_create(); - subtest_redist_regions(&v); + v = vm_gic_v3_create(); + subtest_v3_redist_regions(&v); _kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, dummy, true); @@ -338,7 +341,7 @@ static void test_new_redist_regions(void) vm_gic_destroy(&v); } -static void test_typer_accesses(void) +static void test_v3_typer_accesses(void) { struct vm_gic v; uint64_t addr; @@ -351,12 +354,12 @@ static void test_typer_accesses(void) vm_vcpu_add_default(v.vm, 3, guest_code); - ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); TEST_ASSERT(ret && errno == EINVAL, "attempting to read GICR_TYPER of non created vcpu"); vm_vcpu_add_default(v.vm, 1, guest_code); - ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); TEST_ASSERT(ret && errno == EBUSY, "read GICR_TYPER before GIC initialized"); vm_vcpu_add_default(v.vm, 2, guest_code); @@ -365,7 +368,7 @@ static void test_typer_accesses(void) KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true); for (i = 0; i < NR_VCPUS ; i++) { - ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false); TEST_ASSERT(!ret && !val, "read GICR_TYPER before rdist region setting"); } @@ -374,10 +377,10 @@ static void test_typer_accesses(void) KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); /* The 2 first rdists should be put there (vcpu 0 and 3) */ - ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false); TEST_ASSERT(!ret && !val, "read typer of rdist #0"); - ret = access_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x310, "read typer of rdist #1"); addr = REDIST_REGION_ATTR_ADDR(10, 0x100000, 0, 1); @@ -385,11 +388,11 @@ static void test_typer_accesses(void) KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); TEST_ASSERT(ret && errno == EINVAL, "collision with previous rdist region"); - ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x100, "no redist region attached to vcpu #1 yet, last cannot be returned"); - ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x200, "no redist region attached to vcpu #2, last cannot be returned"); @@ -397,10 +400,10 @@ static void test_typer_accesses(void) kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); - ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #1"); - ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x210, "read typer of rdist #1, last properly returned"); @@ -417,7 +420,7 @@ static void test_typer_accesses(void) * rdist region #2 @0x200000 2 rdist capacity * rdists: 1, 2 */ -static void test_last_bit_redist_regions(void) +static void test_v3_last_bit_redist_regions(void) { uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 }; struct vm_gic v; @@ -444,29 +447,29 @@ static void test_last_bit_redist_regions(void) kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); - ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x000, "read typer of rdist #0"); - ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #1"); - ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x200, "read typer of rdist #2"); - ret = access_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x310, "read typer of rdist #3"); - ret = access_redist_reg(v.gic_fd, 5, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 5, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x500, "read typer of rdist #5"); - ret = access_redist_reg(v.gic_fd, 4, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 4, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x410, "read typer of rdist #4"); vm_gic_destroy(&v); } /* Test last bit with legacy region */ -static void test_last_bit_single_rdist(void) +static void test_v3_last_bit_single_rdist(void) { uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 }; struct vm_gic v; @@ -485,28 +488,32 @@ static void test_last_bit_single_rdist(void) kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true); - ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x000, "read typer of rdist #0"); - ret = access_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x300, "read typer of rdist #1"); - ret = access_redist_reg(v.gic_fd, 5, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 5, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x500, "read typer of rdist #2"); - ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #3"); - ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false); + ret = access_v3_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false); TEST_ASSERT(!ret && val == 0x210, "read typer of rdist #3"); vm_gic_destroy(&v); } -void test_kvm_device(void) +/* + * Returns 0 if it's possible to create GIC device of a given type (V2 or V3). + */ +int test_kvm_device(uint32_t gic_dev_type) { struct vm_gic v; int ret, fd; + uint32_t other; v.vm = vm_create_default_with_vcpus(NR_VCPUS, 0, 0, guest_code, NULL); @@ -514,38 +521,65 @@ void test_kvm_device(void) ret = _kvm_create_device(v.vm, 0, true, &fd); TEST_ASSERT(ret && errno == ENODEV, "unsupported device"); - /* trial mode with VGIC_V3 device */ - ret = _kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, true, &fd); - if (ret) { - print_skip("GICv3 not supported"); - exit(KSFT_SKIP); - } - v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); + /* trial mode */ + ret = _kvm_create_device(v.vm, gic_dev_type, true, &fd); + if (ret) + return ret; + v.gic_fd = kvm_create_device(v.vm, gic_dev_type, false); + + ret = _kvm_create_device(v.vm, gic_dev_type, false, &fd); + TEST_ASSERT(ret && errno == EEXIST, "create GIC device twice"); - ret = _kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false, &fd); - TEST_ASSERT(ret && errno == EEXIST, "create GICv3 device twice"); + kvm_create_device(v.vm, gic_dev_type, true); - kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, true); + /* try to create the other gic_dev_type */ + other = VGIC_DEV_IS_V2(gic_dev_type) ? KVM_DEV_TYPE_ARM_VGIC_V3 + : KVM_DEV_TYPE_ARM_VGIC_V2; - if (!_kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V2, true, &fd)) { - ret = _kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V2, false, &fd); - TEST_ASSERT(ret && errno == EINVAL, "create GICv2 while v3 exists"); + if (!_kvm_create_device(v.vm, other, true, &fd)) { + ret = _kvm_create_device(v.vm, other, false, &fd); + TEST_ASSERT(ret && errno == EINVAL, + "create GIC device while other version exists"); } vm_gic_destroy(&v); + + return 0; +} + +void run_tests(uint32_t gic_dev_type) +{ + if (VGIC_DEV_IS_V3(gic_dev_type)) { + test_v3_vcpus_then_vgic(gic_dev_type); + test_v3_vgic_then_vcpus(gic_dev_type); + test_v3_new_redist_regions(); + test_v3_typer_accesses(); + test_v3_last_bit_redist_regions(); + test_v3_last_bit_single_rdist(); + } } int main(int ac, char **av) { + int ret; + max_ipa_bits = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE); - test_kvm_device(); - test_vcpus_then_vgic(); - test_vgic_then_vcpus(); - test_new_redist_regions(); - test_typer_accesses(); - test_last_bit_redist_regions(); - test_last_bit_single_rdist(); + ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V3); + if (!ret) { + pr_info("Running GIC_v3 tests.\n"); + run_tests(KVM_DEV_TYPE_ARM_VGIC_V3); + return 0; + } + + ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V2); + if (!ret) { + pr_info("Running GIC_v2 tests.\n"); + run_tests(KVM_DEV_TYPE_ARM_VGIC_V2); + return 0; + } + print_skip("No GICv2 nor GICv3 support"); + exit(KSFT_SKIP); return 0; } -- cgit v1.2.3 From 46fb941bc04d3541776c09c2bf641e7f34e62a01 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 4 Oct 2021 18:19:17 -0700 Subject: KVM: arm64: selftests: Make vgic_init/vm_gic_create version agnostic Make vm_gic_create GIC version agnostic in the vgic_init test. Also add a nr_vcpus arg into it instead of defaulting to NR_VCPUS. No functional change. Reviewed-by: Eric Auger Signed-off-by: Ricardo Koller Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211005011921.437353-8-ricarkol@google.com --- tools/testing/selftests/kvm/aarch64/vgic_init.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c index 896a29f2503d..7521dc80cf23 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_init.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c @@ -28,6 +28,7 @@ struct vm_gic { struct kvm_vm *vm; int gic_fd; + uint32_t gic_dev_type; }; static int max_ipa_bits; @@ -61,12 +62,13 @@ static int run_vcpu(struct kvm_vm *vm, uint32_t vcpuid) return 0; } -static struct vm_gic vm_gic_v3_create(void) +static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type, uint32_t nr_vcpus) { struct vm_gic v; - v.vm = vm_create_default_with_vcpus(NR_VCPUS, 0, 0, guest_code, NULL); - v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); + v.gic_dev_type = gic_dev_type; + v.vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL); + v.gic_fd = kvm_create_device(v.vm, gic_dev_type, false); return v; } @@ -257,8 +259,7 @@ static void test_v3_vgic_then_vcpus(uint32_t gic_dev_type) struct vm_gic v; int ret, i; - v.vm = vm_create_default(0, 0, guest_code); - v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); + v = vm_gic_create_with_vcpus(gic_dev_type, 1); subtest_v3_dist_rdist(&v); @@ -278,7 +279,7 @@ static void test_v3_vcpus_then_vgic(uint32_t gic_dev_type) struct vm_gic v; int ret; - v = vm_gic_v3_create(); + v = vm_gic_create_with_vcpus(gic_dev_type, NR_VCPUS); subtest_v3_dist_rdist(&v); @@ -295,7 +296,7 @@ static void test_v3_new_redist_regions(void) uint64_t addr; int ret; - v = vm_gic_v3_create(); + v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS); subtest_v3_redist_regions(&v); kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true); @@ -306,7 +307,7 @@ static void test_v3_new_redist_regions(void) /* step2 */ - v = vm_gic_v3_create(); + v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS); subtest_v3_redist_regions(&v); addr = REDIST_REGION_ATTR_ADDR(1, 0x280000, 0, 2); @@ -320,7 +321,7 @@ static void test_v3_new_redist_regions(void) /* step 3 */ - v = vm_gic_v3_create(); + v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS); subtest_v3_redist_regions(&v); _kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, -- cgit v1.2.3 From c44df5f9ff31daaa72b3a673422d5cca03a1fd02 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 4 Oct 2021 18:19:18 -0700 Subject: KVM: arm64: selftests: Add some tests for GICv2 in vgic_init Add some GICv2 tests: general KVM device tests and DIST/CPUIF overlap tests. Do this by making test_vcpus_then_vgic and test_vgic_then_vcpus in vgic_init GIC version agnostic. Signed-off-by: Ricardo Koller Reviewed-by: Eric Auger Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211005011921.437353-9-ricarkol@google.com --- tools/testing/selftests/kvm/aarch64/vgic_init.c | 111 +++++++++++++++++------- 1 file changed, 79 insertions(+), 32 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c index 7521dc80cf23..cb69e195ad1d 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_init.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c @@ -79,74 +79,120 @@ static void vm_gic_destroy(struct vm_gic *v) kvm_vm_free(v->vm); } +struct vgic_region_attr { + uint64_t attr; + uint64_t size; + uint64_t alignment; +}; + +struct vgic_region_attr gic_v3_dist_region = { + .attr = KVM_VGIC_V3_ADDR_TYPE_DIST, + .size = 0x10000, + .alignment = 0x10000, +}; + +struct vgic_region_attr gic_v3_redist_region = { + .attr = KVM_VGIC_V3_ADDR_TYPE_REDIST, + .size = NR_VCPUS * 0x20000, + .alignment = 0x10000, +}; + +struct vgic_region_attr gic_v2_dist_region = { + .attr = KVM_VGIC_V2_ADDR_TYPE_DIST, + .size = 0x1000, + .alignment = 0x1000, +}; + +struct vgic_region_attr gic_v2_cpu_region = { + .attr = KVM_VGIC_V2_ADDR_TYPE_CPU, + .size = 0x2000, + .alignment = 0x1000, +}; + /** - * Helper routine that performs KVM device tests in general and - * especially ARM_VGIC_V3 ones. Eventually the ARM_VGIC_V3 - * device gets created, a legacy RDIST region is set at @0x0 - * and a DIST region is set @0x60000 + * Helper routine that performs KVM device tests in general. Eventually the + * ARM_VGIC (GICv2 or GICv3) device gets created with an overlapping + * DIST/REDIST (or DIST/CPUIF for GICv2). Assumption is 4 vcpus are going to be + * used hence the overlap. In the case of GICv3, A RDIST region is set at @0x0 + * and a DIST region is set @0x70000. The GICv2 case sets a CPUIF @0x0 and a + * DIST region @0x1000. */ -static void subtest_v3_dist_rdist(struct vm_gic *v) +static void subtest_dist_rdist(struct vm_gic *v) { int ret; uint64_t addr; + struct vgic_region_attr rdist; /* CPU interface in GICv2*/ + struct vgic_region_attr dist; + + rdist = VGIC_DEV_IS_V3(v->gic_dev_type) ? gic_v3_redist_region + : gic_v2_cpu_region; + dist = VGIC_DEV_IS_V3(v->gic_dev_type) ? gic_v3_dist_region + : gic_v2_dist_region; /* Check existing group/attributes */ kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, - KVM_VGIC_V3_ADDR_TYPE_DIST); + dist.attr); kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, - KVM_VGIC_V3_ADDR_TYPE_REDIST); + rdist.attr); /* check non existing attribute */ - ret = _kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, 0); + ret = _kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, -1); TEST_ASSERT(ret && errno == ENXIO, "attribute not supported"); /* misaligned DIST and REDIST address settings */ - addr = 0x1000; + addr = dist.alignment / 0x10; ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, - KVM_VGIC_V3_ADDR_TYPE_DIST, &addr, true); - TEST_ASSERT(ret && errno == EINVAL, "GICv3 dist base not 64kB aligned"); + dist.attr, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, "GIC dist base not aligned"); + addr = rdist.alignment / 0x10; ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, - KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true); - TEST_ASSERT(ret && errno == EINVAL, "GICv3 redist base not 64kB aligned"); + rdist.attr, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, "GIC redist/cpu base not aligned"); /* out of range address */ if (max_ipa_bits) { addr = 1ULL << max_ipa_bits; ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, - KVM_VGIC_V3_ADDR_TYPE_DIST, &addr, true); + dist.attr, &addr, true); TEST_ASSERT(ret && errno == E2BIG, "dist address beyond IPA limit"); ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, - KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true); + rdist.attr, &addr, true); TEST_ASSERT(ret && errno == E2BIG, "redist address beyond IPA limit"); } /* set REDIST base address @0x0*/ addr = 0x00000; kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, - KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true); + rdist.attr, &addr, true); /* Attempt to create a second legacy redistributor region */ addr = 0xE0000; ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, - KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true); - TEST_ASSERT(ret && errno == EEXIST, "GICv3 redist base set again"); + rdist.attr, &addr, true); + TEST_ASSERT(ret && errno == EEXIST, "GIC redist base set again"); - /* Attempt to mix legacy and new redistributor regions */ - addr = REDIST_REGION_ATTR_ADDR(NR_VCPUS, 0x100000, 0, 0); - ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, - KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); - TEST_ASSERT(ret && errno == EINVAL, "attempt to mix GICv3 REDIST and REDIST_REGION"); + ret = _kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST); + if (!ret) { + /* Attempt to mix legacy and new redistributor regions */ + addr = REDIST_REGION_ATTR_ADDR(NR_VCPUS, 0x100000, 0, 0); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, + &addr, true); + TEST_ASSERT(ret && errno == EINVAL, + "attempt to mix GICv3 REDIST and REDIST_REGION"); + } /* * Set overlapping DIST / REDIST, cannot be detected here. Will be detected * on first vcpu run instead. */ - addr = 3 * 2 * 0x10000; - kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_DIST, - &addr, true); + addr = rdist.size - rdist.alignment; + kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + dist.attr, &addr, true); } /* Test the new REDIST region API */ @@ -254,14 +300,14 @@ static void subtest_v3_redist_regions(struct vm_gic *v) * VGIC KVM device is created and initialized before the secondary CPUs * get created */ -static void test_v3_vgic_then_vcpus(uint32_t gic_dev_type) +static void test_vgic_then_vcpus(uint32_t gic_dev_type) { struct vm_gic v; int ret, i; v = vm_gic_create_with_vcpus(gic_dev_type, 1); - subtest_v3_dist_rdist(&v); + subtest_dist_rdist(&v); /* Add the rest of the VCPUs */ for (i = 1; i < NR_VCPUS; ++i) @@ -274,14 +320,14 @@ static void test_v3_vgic_then_vcpus(uint32_t gic_dev_type) } /* All the VCPUs are created before the VGIC KVM device gets initialized */ -static void test_v3_vcpus_then_vgic(uint32_t gic_dev_type) +static void test_vcpus_then_vgic(uint32_t gic_dev_type) { struct vm_gic v; int ret; v = vm_gic_create_with_vcpus(gic_dev_type, NR_VCPUS); - subtest_v3_dist_rdist(&v); + subtest_dist_rdist(&v); ret = run_vcpu(v.vm, 3); TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run"); @@ -550,9 +596,10 @@ int test_kvm_device(uint32_t gic_dev_type) void run_tests(uint32_t gic_dev_type) { + test_vcpus_then_vgic(gic_dev_type); + test_vgic_then_vcpus(gic_dev_type); + if (VGIC_DEV_IS_V3(gic_dev_type)) { - test_v3_vcpus_then_vgic(gic_dev_type); - test_v3_vgic_then_vcpus(gic_dev_type); test_v3_new_redist_regions(); test_v3_typer_accesses(); test_v3_last_bit_redist_regions(); -- cgit v1.2.3 From 2dcd9aa1c3a5d3c90047d67efd08f0518f915449 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 4 Oct 2021 18:19:19 -0700 Subject: KVM: arm64: selftests: Add tests for GIC redist/cpuif partially above IPA range Add tests for checking that KVM returns the right error when trying to set GICv2 CPU interfaces or GICv3 Redistributors partially above the addressable IPA range. Also tighten the IPA range by replacing KVM_CAP_ARM_VM_IPA_SIZE with the IPA range currently configured for the guest (i.e., the default). The check for the GICv3 redistributor created using the REDIST legacy API is not sufficient as this new test only checks the check done using vcpus already created when setting the base. The next commit will add the missing test which verifies that the KVM check is done at first vcpu run. Signed-off-by: Ricardo Koller Reviewed-by: Eric Auger Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211005011921.437353-10-ricarkol@google.com --- tools/testing/selftests/kvm/aarch64/vgic_init.c | 38 +++++++++++++++++-------- 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c index cb69e195ad1d..eadd448b3a96 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_init.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c @@ -31,7 +31,7 @@ struct vm_gic { uint32_t gic_dev_type; }; -static int max_ipa_bits; +static uint64_t max_phys_size; /* helper to access a redistributor register */ static int access_v3_redist_reg(int gicv3_fd, int vcpu, int offset, @@ -152,16 +152,21 @@ static void subtest_dist_rdist(struct vm_gic *v) TEST_ASSERT(ret && errno == EINVAL, "GIC redist/cpu base not aligned"); /* out of range address */ - if (max_ipa_bits) { - addr = 1ULL << max_ipa_bits; - ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, - dist.attr, &addr, true); - TEST_ASSERT(ret && errno == E2BIG, "dist address beyond IPA limit"); + addr = max_phys_size; + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + dist.attr, &addr, true); + TEST_ASSERT(ret && errno == E2BIG, "dist address beyond IPA limit"); - ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, - rdist.attr, &addr, true); - TEST_ASSERT(ret && errno == E2BIG, "redist address beyond IPA limit"); - } + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + rdist.attr, &addr, true); + TEST_ASSERT(ret && errno == E2BIG, "redist address beyond IPA limit"); + + /* Space for half a rdist (a rdist is: 2 * rdist.alignment). */ + addr = max_phys_size - dist.alignment; + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + rdist.attr, &addr, true); + TEST_ASSERT(ret && errno == E2BIG, + "half of the redist is beyond IPA limit"); /* set REDIST base address @0x0*/ addr = 0x00000; @@ -250,12 +255,19 @@ static void subtest_v3_redist_regions(struct vm_gic *v) kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); - addr = REDIST_REGION_ATTR_ADDR(1, 1ULL << max_ipa_bits, 0, 2); + addr = REDIST_REGION_ATTR_ADDR(1, max_phys_size, 0, 2); ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); TEST_ASSERT(ret && errno == E2BIG, "register redist region with base address beyond IPA range"); + /* The last redist is above the pa range. */ + addr = REDIST_REGION_ATTR_ADDR(2, max_phys_size - 0x30000, 0, 2); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + TEST_ASSERT(ret && errno == E2BIG, + "register redist region with top address beyond IPA range"); + addr = 0x260000; ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true); @@ -610,8 +622,10 @@ void run_tests(uint32_t gic_dev_type) int main(int ac, char **av) { int ret; + int pa_bits; - max_ipa_bits = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE); + pa_bits = vm_guest_mode_params[VM_MODE_DEFAULT].pa_bits; + max_phys_size = 1ULL << pa_bits; ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V3); if (!ret) { -- cgit v1.2.3 From 1883458638979531fc4fcbc26d15fec3e51e1734 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 4 Oct 2021 18:19:20 -0700 Subject: KVM: arm64: selftests: Add test for legacy GICv3 REDIST base partially above IPA range Add a new test into vgic_init which checks that the first vcpu fails to run if there is not sufficient REDIST space below the addressable IPA range. This only applies to the KVM_VGIC_V3_ADDR_TYPE_REDIST legacy API as the required REDIST space is not know when setting the DIST region. Note that using the REDIST_REGION API results in a different check at first vcpu run: that the number of redist regions is enough for all vcpus. And there is already a test for that case in, the first step of test_v3_new_redist_regions. Reviewed-by: Eric Auger Signed-off-by: Ricardo Koller Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211005011921.437353-11-ricarkol@google.com --- tools/testing/selftests/kvm/aarch64/vgic_init.c | 34 +++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c index eadd448b3a96..80be1940d2ad 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_init.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c @@ -565,6 +565,39 @@ static void test_v3_last_bit_single_rdist(void) vm_gic_destroy(&v); } +/* Uses the legacy REDIST region API. */ +static void test_v3_redist_ipa_range_check_at_vcpu_run(void) +{ + struct vm_gic v; + int ret, i; + uint64_t addr; + + v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, 1); + + /* Set space for 3 redists, we have 1 vcpu, so this succeeds. */ + addr = max_phys_size - (3 * 2 * 0x10000); + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true); + + addr = 0x00000; + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_DIST, &addr, true); + + /* Add the rest of the VCPUs */ + for (i = 1; i < NR_VCPUS; ++i) + vm_vcpu_add_default(v.vm, i, guest_code); + + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true); + + /* Attempt to run a vcpu without enough redist space. */ + ret = run_vcpu(v.vm, 2); + TEST_ASSERT(ret && errno == EINVAL, + "redist base+size above PA range detected on 1st vcpu run"); + + vm_gic_destroy(&v); +} + /* * Returns 0 if it's possible to create GIC device of a given type (V2 or V3). */ @@ -616,6 +649,7 @@ void run_tests(uint32_t gic_dev_type) test_v3_typer_accesses(); test_v3_last_bit_redist_regions(); test_v3_last_bit_single_rdist(); + test_v3_redist_ipa_range_check_at_vcpu_run(); } } -- cgit v1.2.3 From 3e197f17b23ba7c1a3c7cb1d27f7494444aa42e3 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 4 Oct 2021 18:19:21 -0700 Subject: KVM: arm64: selftests: Add init ITS device test Add some ITS device init tests: general KVM device tests (address not defined already, address aligned) and tests for the ITS region being within the addressable IPA range. Signed-off-by: Ricardo Koller Reviewed-by: Eric Auger Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211005011921.437353-12-ricarkol@google.com --- tools/testing/selftests/kvm/aarch64/vgic_init.c | 42 +++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c index 80be1940d2ad..c563489ff760 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_init.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c @@ -598,6 +598,47 @@ static void test_v3_redist_ipa_range_check_at_vcpu_run(void) vm_gic_destroy(&v); } +static void test_v3_its_region(void) +{ + struct vm_gic v; + uint64_t addr; + int its_fd, ret; + + v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS); + its_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_ITS, false); + + addr = 0x401000; + ret = _kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_ITS_ADDR_TYPE, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, + "ITS region with misaligned address"); + + addr = max_phys_size; + ret = _kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_ITS_ADDR_TYPE, &addr, true); + TEST_ASSERT(ret && errno == E2BIG, + "register ITS region with base address beyond IPA range"); + + addr = max_phys_size - 0x10000; + ret = _kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_ITS_ADDR_TYPE, &addr, true); + TEST_ASSERT(ret && errno == E2BIG, + "Half of ITS region is beyond IPA range"); + + /* This one succeeds setting the ITS base */ + addr = 0x400000; + kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_ITS_ADDR_TYPE, &addr, true); + + addr = 0x300000; + ret = _kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_ITS_ADDR_TYPE, &addr, true); + TEST_ASSERT(ret && errno == EEXIST, "ITS base set again"); + + close(its_fd); + vm_gic_destroy(&v); +} + /* * Returns 0 if it's possible to create GIC device of a given type (V2 or V3). */ @@ -650,6 +691,7 @@ void run_tests(uint32_t gic_dev_type) test_v3_last_bit_redist_regions(); test_v3_last_bit_single_rdist(); test_v3_redist_ipa_range_check_at_vcpu_run(); + test_v3_its_region(); } } -- cgit v1.2.3 From b6a68b97af23cc75781bed38221ce73144ac2e39 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 1 Oct 2021 18:05:53 +0100 Subject: KVM: arm64: Allow KVM to be disabled from the command line Although KVM can be compiled out of the kernel, it cannot be disabled at runtime. Allow this possibility by introducing a new mode that will prevent KVM from initialising. This is useful in the (limited) circumstances where you don't want KVM to be available (what is wrong with you?), or when you want to install another hypervisor instead (good luck with that). Reviewed-by: David Brazdil Acked-by: Will Deacon Acked-by: Suzuki K Poulose Signed-off-by: Marc Zyngier Reviewed-by: Andrew Scull Link: https://lore.kernel.org/r/20211001170553.3062988-1-maz@kernel.org --- Documentation/admin-guide/kernel-parameters.txt | 2 ++ arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/arm.c | 14 +++++++++++++- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 91ba391f9b32..f268731a3d4d 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2365,6 +2365,8 @@ kvm-arm.mode= [KVM,ARM] Select one of KVM/arm64's modes of operation. + none: Forcefully disable KVM. + nvhe: Standard nVHE-based mode, without support for protected guests. diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f8be56d5342b..019490c67976 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -58,6 +58,7 @@ enum kvm_mode { KVM_MODE_DEFAULT, KVM_MODE_PROTECTED, + KVM_MODE_NONE, }; enum kvm_mode kvm_get_mode(void); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index fe102cd2e518..658171231af9 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2064,6 +2064,11 @@ int kvm_arch_init(void *opaque) return -ENODEV; } + if (kvm_get_mode() == KVM_MODE_NONE) { + kvm_info("KVM disabled from command line\n"); + return -ENODEV; + } + in_hyp_mode = is_kernel_in_hyp_mode(); if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || @@ -2137,8 +2142,15 @@ static int __init early_kvm_mode_cfg(char *arg) return 0; } - if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) + if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) { + kvm_mode = KVM_MODE_DEFAULT; return 0; + } + + if (strcmp(arg, "none") == 0) { + kvm_mode = KVM_MODE_NONE; + return 0; + } return -EINVAL; } -- cgit v1.2.3 From c8f1e96734069c788b10545f4fd82bcbb6b55dfa Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 21 Sep 2021 15:22:30 -0700 Subject: KVM: arm64: Unconditionally include generic KVM's Kconfig Unconditionally "source" the generic KVM Kconfig instead of wrapping it with KVM=y. A future patch will select HAVE_KVM so that referencing HAVE_KVM in common kernel code doesn't break, and because KVM=y and HAVE_KVM=n is weird. Source the generic KVM Kconfig unconditionally so that HAVE_KVM and KVM don't end up with a circular dependency. Note, all but one of generic KVM's "configs" are of the HAVE_XYZ nature, and the one outlier correctly takes a dependency on CONFIG_KVM, i.e. the generic Kconfig is intended to be included unconditionally. No functional change intended. Signed-off-by: Sean Christopherson [maz: made NVHE_EL2_DEBUG depend on KVM] Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210921222231.518092-2-seanjc@google.com --- arch/arm64/kvm/Kconfig | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index d7eec0b43744..1170b20d68a7 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -4,6 +4,7 @@ # source "virt/lib/Kconfig" +source "virt/kvm/Kconfig" menuconfig VIRTUALIZATION bool "Virtualization" @@ -43,12 +44,9 @@ menuconfig KVM If unsure, say N. -if KVM - -source "virt/kvm/Kconfig" - config NVHE_EL2_DEBUG bool "Debug mode for non-VHE EL2 object" + depends on KVM help Say Y here to enable the debug mode for the non-VHE KVM EL2 object. Failure reports will BUG() in the hypervisor. This is intended for @@ -56,6 +54,4 @@ config NVHE_EL2_DEBUG If unsure, say N. -endif # KVM - endif # VIRTUALIZATION -- cgit v1.2.3 From e26bb75aa2f17fc079e6a24dff653b098e1f5d37 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 21 Sep 2021 15:22:31 -0700 Subject: KVM: arm64: Depend on HAVE_KVM instead of OF Select HAVE_KVM at all times on arm64, as the OF requirement is always there (even in the case of an ACPI system, we still depend on some of the OF infrastructure), and won't fo away. No functional change intended. Signed-off-by: Sean Christopherson Acked-by: Will Deacon [maz: Drop the "HAVE_KVM if OF" dependency, as OF is always there on arm64, new commit message] Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210921222231.518092-3-seanjc@google.com --- arch/arm64/Kconfig | 1 + arch/arm64/kvm/Kconfig | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 5c7ae4c3954b..0d921d21fd35 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -186,6 +186,7 @@ config ARM64 select HAVE_GCC_PLUGINS select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_IRQ_TIME_ACCOUNTING + select HAVE_KVM select HAVE_NMI select HAVE_PATA_PLATFORM select HAVE_PERF_EVENTS diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 1170b20d68a7..8ffcbe29395e 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -20,7 +20,7 @@ if VIRTUALIZATION menuconfig KVM bool "Kernel-based Virtual Machine (KVM) support" - depends on OF + depends on HAVE_KVM select MMU_NOTIFIER select PREEMPT_NOTIFIERS select HAVE_KVM_CPU_RELAX_INTERCEPT -- cgit v1.2.3 From 00d5101b254b77c35a8d55fe46331b19192866f3 Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Mon, 11 Oct 2021 11:58:38 +0100 Subject: KVM: arm64: Return early from read_id_reg() if register is RAZ If read_id_reg() is called for an ID register which is Read-As-Zero (RAZ), it initializes the return value to zero, then goes through a list of registers which require special handling before returning the final value. By not returning as soon as it checks that the register should be RAZ, the function creates the opportunity for bugs, if, for example, a patch changes a register to RAZ (like has happened with PMSWINC_EL0 in commit 11663111cd49), but doesn't remove the special handling from read_id_reg(); or if a register is RAZ in certain situations, but readable in others. Return early to make it impossible for a RAZ register to be anything other than zero. Reviewed-by: Andrew Jones Signed-off-by: Alexandru Elisei Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211011105840.155815-2-alexandru.elisei@arm.com --- arch/arm64/kvm/sys_regs.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 1d46e185f31e..4adda8bf3168 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1064,7 +1064,12 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r, bool raz) { u32 id = reg_to_encoding(r); - u64 val = raz ? 0 : read_sanitised_ftr_reg(id); + u64 val; + + if (raz) + return 0; + + val = read_sanitised_ftr_reg(id); switch (id) { case SYS_ID_AA64PFR0_EL1: -- cgit v1.2.3 From 5a4309762356f05df4c92629e5df15ab75c42c0d Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Mon, 11 Oct 2021 11:58:39 +0100 Subject: KVM: arm64: Use get_raz_reg() for userspace reads of PMSWINC_EL0 PMSWINC_EL0 is a write-only register and was initially part of the VCPU register state, but was later removed in commit 7a3ba3095a32 ("KVM: arm64: Remove PMSWINC_EL0 shadow register"). To prevent regressions, the register was kept accessible from userspace as Read-As-Zero (RAZ). The read function that is used to handle userspace reads of this register is get_raz_id_reg(), which, while technically correct, as it returns 0, it is not semantically correct, as PMSWINC_EL0 is not an ID register as the function name suggests. Add a new function, get_raz_reg(), to use it as the accessor for PMSWINC_EL0, as to not conflate get_raz_id_reg() to handle other types of registers. No functional change intended. Signed-off-by: Alexandru Elisei Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211011105840.155815-3-alexandru.elisei@arm.com --- arch/arm64/kvm/sys_regs.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 4adda8bf3168..1be827740f87 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1285,6 +1285,15 @@ static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, return __set_id_reg(vcpu, rd, uaddr, true); } +static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + const u64 id = sys_reg_to_index(rd); + const u64 val = 0; + + return reg_to_user(uaddr, &val, id); +} + static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { @@ -1647,7 +1656,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { * previously (and pointlessly) advertised in the past... */ { PMU_SYS_REG(SYS_PMSWINC_EL0), - .get_user = get_raz_id_reg, .set_user = set_wi_reg, + .get_user = get_raz_reg, .set_user = set_wi_reg, .access = access_pmswinc, .reset = NULL }, { PMU_SYS_REG(SYS_PMSELR_EL0), .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 }, -- cgit v1.2.3 From ebf6aa8c047352fe43b1e20e580f12d5564da28e Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Mon, 11 Oct 2021 11:58:40 +0100 Subject: KVM: arm64: Replace get_raz_id_reg() with get_raz_reg() Reading a RAZ ID register isn't different from reading any other RAZ register, so get rid of get_raz_id_reg() and replace it with get_raz_reg(), which does the same thing, but does it without going through two layers of indirection. No functional change. Suggested-by: Andrew Jones Signed-off-by: Alexandru Elisei Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211011105840.155815-4-alexandru.elisei@arm.com --- arch/arm64/kvm/sys_regs.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 1be827740f87..3aff06aafd0c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1273,12 +1273,6 @@ static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, return __set_id_reg(vcpu, rd, uaddr, raz); } -static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, - const struct kvm_one_reg *reg, void __user *uaddr) -{ - return __get_id_reg(vcpu, rd, uaddr, true); -} - static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { @@ -1402,7 +1396,7 @@ static unsigned int mte_visibility(const struct kvm_vcpu *vcpu, #define ID_UNALLOCATED(crm, op2) { \ Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \ .access = access_raz_id_reg, \ - .get_user = get_raz_id_reg, \ + .get_user = get_raz_reg, \ .set_user = set_raz_id_reg, \ } @@ -1414,7 +1408,7 @@ static unsigned int mte_visibility(const struct kvm_vcpu *vcpu, #define ID_HIDDEN(name) { \ SYS_DESC(SYS_##name), \ .access = access_raz_id_reg, \ - .get_user = get_raz_id_reg, \ + .get_user = get_raz_reg, \ .set_user = set_raz_id_reg, \ } -- cgit v1.2.3 From 7dd9b5a157485ae8c48f76f087b1867ace016613 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 10 Oct 2021 15:56:26 +0100 Subject: KVM: arm64: Move __get_fault_info() and co into their own include file In order to avoid including the whole of the switching helpers in unrelated files, move the __get_fault_info() and related helpers into their own include file. Signed-off-by: Marc Zyngier Signed-off-by: Fuad Tabba Link: https://lore.kernel.org/r/20211010145636.1950948-2-tabba@google.com --- arch/arm64/kvm/hyp/include/hyp/fault.h | 75 +++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/include/hyp/switch.h | 61 +-------------------------- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 2 +- 3 files changed, 77 insertions(+), 61 deletions(-) create mode 100644 arch/arm64/kvm/hyp/include/hyp/fault.h diff --git a/arch/arm64/kvm/hyp/include/hyp/fault.h b/arch/arm64/kvm/hyp/include/hyp/fault.h new file mode 100644 index 000000000000..1b8a2dcd712f --- /dev/null +++ b/arch/arm64/kvm/hyp/include/hyp/fault.h @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + */ + +#ifndef __ARM64_KVM_HYP_FAULT_H__ +#define __ARM64_KVM_HYP_FAULT_H__ + +#include +#include +#include +#include + +static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar) +{ + u64 par, tmp; + + /* + * Resolve the IPA the hard way using the guest VA. + * + * Stage-1 translation already validated the memory access + * rights. As such, we can use the EL1 translation regime, and + * don't have to distinguish between EL0 and EL1 access. + * + * We do need to save/restore PAR_EL1 though, as we haven't + * saved the guest context yet, and we may return early... + */ + par = read_sysreg_par(); + if (!__kvm_at("s1e1r", far)) + tmp = read_sysreg_par(); + else + tmp = SYS_PAR_EL1_F; /* back to the guest */ + write_sysreg(par, par_el1); + + if (unlikely(tmp & SYS_PAR_EL1_F)) + return false; /* Translation failed, back to guest */ + + /* Convert PAR to HPFAR format */ + *hpfar = PAR_TO_HPFAR(tmp); + return true; +} + +static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault) +{ + u64 hpfar, far; + + far = read_sysreg_el2(SYS_FAR); + + /* + * The HPFAR can be invalid if the stage 2 fault did not + * happen during a stage 1 page table walk (the ESR_EL2.S1PTW + * bit is clear) and one of the two following cases are true: + * 1. The fault was due to a permission fault + * 2. The processor carries errata 834220 + * + * Therefore, for all non S1PTW faults where we either have a + * permission fault or the errata workaround is enabled, we + * resolve the IPA using the AT instruction. + */ + if (!(esr & ESR_ELx_S1PTW) && + (cpus_have_final_cap(ARM64_WORKAROUND_834220) || + (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { + if (!__translate_far_to_hpfar(far, &hpfar)) + return false; + } else { + hpfar = read_sysreg(hpfar_el2); + } + + fault->far_el2 = far; + fault->hpfar_el2 = hpfar; + return true; +} + +#endif diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index a0e78a6027be..54abc8298ec3 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -8,6 +8,7 @@ #define __ARM64_KVM_HYP_SWITCH_H__ #include +#include #include #include @@ -133,66 +134,6 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) } } -static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar) -{ - u64 par, tmp; - - /* - * Resolve the IPA the hard way using the guest VA. - * - * Stage-1 translation already validated the memory access - * rights. As such, we can use the EL1 translation regime, and - * don't have to distinguish between EL0 and EL1 access. - * - * We do need to save/restore PAR_EL1 though, as we haven't - * saved the guest context yet, and we may return early... - */ - par = read_sysreg_par(); - if (!__kvm_at("s1e1r", far)) - tmp = read_sysreg_par(); - else - tmp = SYS_PAR_EL1_F; /* back to the guest */ - write_sysreg(par, par_el1); - - if (unlikely(tmp & SYS_PAR_EL1_F)) - return false; /* Translation failed, back to guest */ - - /* Convert PAR to HPFAR format */ - *hpfar = PAR_TO_HPFAR(tmp); - return true; -} - -static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault) -{ - u64 hpfar, far; - - far = read_sysreg_el2(SYS_FAR); - - /* - * The HPFAR can be invalid if the stage 2 fault did not - * happen during a stage 1 page table walk (the ESR_EL2.S1PTW - * bit is clear) and one of the two following cases are true: - * 1. The fault was due to a permission fault - * 2. The processor carries errata 834220 - * - * Therefore, for all non S1PTW faults where we either have a - * permission fault or the errata workaround is enabled, we - * resolve the IPA using the AT instruction. - */ - if (!(esr & ESR_ELx_S1PTW) && - (cpus_have_final_cap(ARM64_WORKAROUND_834220) || - (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { - if (!__translate_far_to_hpfar(far, &hpfar)) - return false; - } else { - hpfar = read_sysreg(hpfar_el2); - } - - fault->far_el2 = far; - fault->hpfar_el2 = hpfar; - return true; -} - static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) { u8 ec; diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index bacd493a4eac..2a07d63b8498 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -11,7 +11,7 @@ #include #include -#include +#include #include #include -- cgit v1.2.3 From cc1e6fdfa92b82902883b70dafa729d3bd427b80 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 10 Oct 2021 15:56:27 +0100 Subject: KVM: arm64: Don't include switch.h into nvhe/kvm-main.c hyp-main.c includes switch.h while it only requires adjust-pc.h. Fix it to remove an unnecessary dependency. Signed-off-by: Marc Zyngier Signed-off-by: Fuad Tabba Link: https://lore.kernel.org/r/20211010145636.1950948-3-tabba@google.com --- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 2da6aa8da868..8ca1104f4774 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -4,7 +4,7 @@ * Author: Andrew Scull */ -#include +#include #include #include -- cgit v1.2.3 From 8fb2046180a0ad347f2e5bcae760dca67e65aa73 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 10 Oct 2021 15:56:28 +0100 Subject: KVM: arm64: Move early handlers to per-EC handlers Simplify the early exception handling by slicing the gigantic decoding tree into a more manageable set of functions, similar to what we have in handle_exit.c. This will also make the structure reusable for pKVM's own early exit handling. Signed-off-by: Marc Zyngier Signed-off-by: Fuad Tabba Link: https://lore.kernel.org/r/20211010145636.1950948-4-tabba@google.com --- arch/arm64/kvm/hyp/include/hyp/switch.h | 160 ++++++++++++++++++-------------- arch/arm64/kvm/hyp/nvhe/switch.c | 16 ++++ arch/arm64/kvm/hyp/vhe/switch.c | 16 ++++ 3 files changed, 124 insertions(+), 68 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 54abc8298ec3..1e4177322be7 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -136,16 +136,7 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) { - u8 ec; - u64 esr; - - esr = vcpu->arch.fault.esr_el2; - ec = ESR_ELx_EC(esr); - - if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) - return true; - - return __get_fault_info(esr, &vcpu->arch.fault); + return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); } static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu) @@ -166,8 +157,13 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); } -/* Check for an FPSIMD/SVE trap and handle as appropriate */ -static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) +/* + * We trap the first access to the FP/SIMD to save the host context and + * restore the guest context lazily. + * If FP/SIMD is not implemented, handle the trap and inject an undefined + * instruction exception to the guest. Similarly for trapped SVE accesses. + */ +static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) { bool sve_guest, sve_host; u8 esr_ec; @@ -185,9 +181,6 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) } esr_ec = kvm_vcpu_trap_get_class(vcpu); - if (esr_ec != ESR_ELx_EC_FP_ASIMD && - esr_ec != ESR_ELx_EC_SVE) - return false; /* Don't handle SVE traps for non-SVE vcpus here: */ if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD) @@ -325,7 +318,7 @@ static inline bool esr_is_ptrauth_trap(u32 esr) DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); -static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) +static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code) { struct kvm_cpu_context *ctxt; u64 val; @@ -350,6 +343,87 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) return true; } +static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && + handle_tx2_tvm(vcpu)) + return true; + + if (static_branch_unlikely(&vgic_v3_cpuif_trap) && + __vgic_v3_perform_cpuif_access(vcpu) == 1) + return true; + + return false; +} + +static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + if (static_branch_unlikely(&vgic_v3_cpuif_trap) && + __vgic_v3_perform_cpuif_access(vcpu) == 1) + return true; + + return false; +} + +static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + if (!__populate_fault_info(vcpu)) + return true; + + return false; +} + +static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + if (!__populate_fault_info(vcpu)) + return true; + + if (static_branch_unlikely(&vgic_v2_cpuif_trap)) { + bool valid; + + valid = kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && + kvm_vcpu_dabt_isvalid(vcpu) && + !kvm_vcpu_abt_issea(vcpu) && + !kvm_vcpu_abt_iss1tw(vcpu); + + if (valid) { + int ret = __vgic_v2_perform_cpuif_access(vcpu); + + if (ret == 1) + return true; + + /* Promote an illegal access to an SError.*/ + if (ret == -1) + *exit_code = ARM_EXCEPTION_EL1_SERROR; + } + } + + return false; +} + +typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *); + +static const exit_handler_fn *kvm_get_exit_handler_array(void); + +/* + * Allow the hypervisor to handle the exit with an exit handler if it has one. + * + * Returns true if the hypervisor handled the exit, and control should go back + * to the guest, or false if it hasn't. + */ +static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + const exit_handler_fn *handlers = kvm_get_exit_handler_array(); + exit_handler_fn fn; + + fn = handlers[kvm_vcpu_trap_get_class(vcpu)]; + + if (fn) + return fn(vcpu, exit_code); + + return false; +} + /* * Return true when we were able to fixup the guest exit and should return to * the guest, false when we should restore the host state and return to the @@ -384,59 +458,9 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) if (*exit_code != ARM_EXCEPTION_TRAP) goto exit; - if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && - kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 && - handle_tx2_tvm(vcpu)) + /* Check if there's an exit handler and allow it to handle the exit. */ + if (kvm_hyp_handle_exit(vcpu, exit_code)) goto guest; - - /* - * We trap the first access to the FP/SIMD to save the host context - * and restore the guest context lazily. - * If FP/SIMD is not implemented, handle the trap and inject an - * undefined instruction exception to the guest. - * Similarly for trapped SVE accesses. - */ - if (__hyp_handle_fpsimd(vcpu)) - goto guest; - - if (__hyp_handle_ptrauth(vcpu)) - goto guest; - - if (!__populate_fault_info(vcpu)) - goto guest; - - if (static_branch_unlikely(&vgic_v2_cpuif_trap)) { - bool valid; - - valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW && - kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && - kvm_vcpu_dabt_isvalid(vcpu) && - !kvm_vcpu_abt_issea(vcpu) && - !kvm_vcpu_abt_iss1tw(vcpu); - - if (valid) { - int ret = __vgic_v2_perform_cpuif_access(vcpu); - - if (ret == 1) - goto guest; - - /* Promote an illegal access to an SError.*/ - if (ret == -1) - *exit_code = ARM_EXCEPTION_EL1_SERROR; - - goto exit; - } - } - - if (static_branch_unlikely(&vgic_v3_cpuif_trap) && - (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 || - kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) { - int ret = __vgic_v3_perform_cpuif_access(vcpu); - - if (ret == 1) - goto guest; - } - exit: /* Return to the host kernel and handle the exit */ return false; diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index a34b01cc8ab9..4f3992a1aabd 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -158,6 +158,22 @@ static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) write_sysreg(pmu->events_host, pmcntenset_el0); } +static const exit_handler_fn hyp_exit_handlers[] = { + [0 ... ESR_ELx_EC_MAX] = NULL, + [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32, + [ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg, + [ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd, + [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, + [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, + [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, + [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, +}; + +static const exit_handler_fn *kvm_get_exit_handler_array(void) +{ + return hyp_exit_handlers; +} + /* Switch to the guest for legacy non-VHE systems */ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) { diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index ded2c66675f0..9aedc8afc8b9 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -96,6 +96,22 @@ void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu) __deactivate_traps_common(vcpu); } +static const exit_handler_fn hyp_exit_handlers[] = { + [0 ... ESR_ELx_EC_MAX] = NULL, + [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32, + [ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg, + [ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd, + [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, + [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, + [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, + [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, +}; + +static const exit_handler_fn *kvm_get_exit_handler_array(void) +{ + return hyp_exit_handlers; +} + /* Switch to the guest for VHE systems running in EL2 */ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { -- cgit v1.2.3 From 3b1a690eda0dc1891e8fc93991b122bff6fabf8c Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Sun, 10 Oct 2021 15:56:29 +0100 Subject: KVM: arm64: Pass struct kvm to per-EC handlers We need struct kvm to check for protected VMs to be able to pick the right handlers for them in subsequent patches. Signed-off-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211010145636.1950948-5-tabba@google.com --- arch/arm64/kvm/hyp/include/hyp/switch.h | 4 ++-- arch/arm64/kvm/hyp/nvhe/switch.c | 2 +- arch/arm64/kvm/hyp/vhe/switch.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 1e4177322be7..481399bf9b94 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -403,7 +403,7 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *); -static const exit_handler_fn *kvm_get_exit_handler_array(void); +static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm); /* * Allow the hypervisor to handle the exit with an exit handler if it has one. @@ -413,7 +413,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(void); */ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code) { - const exit_handler_fn *handlers = kvm_get_exit_handler_array(); + const exit_handler_fn *handlers = kvm_get_exit_handler_array(kern_hyp_va(vcpu->kvm)); exit_handler_fn fn; fn = handlers[kvm_vcpu_trap_get_class(vcpu)]; diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 4f3992a1aabd..8c9a0464be00 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -169,7 +169,7 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; -static const exit_handler_fn *kvm_get_exit_handler_array(void) +static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm) { return hyp_exit_handlers; } diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 9aedc8afc8b9..f6fb97accf65 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -107,7 +107,7 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; -static const exit_handler_fn *kvm_get_exit_handler_array(void) +static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm) { return hyp_exit_handlers; } -- cgit v1.2.3 From 53868390778270f2890621f4498a53587719a3ff Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Sun, 10 Oct 2021 15:56:30 +0100 Subject: KVM: arm64: Add missing field descriptor for MDCR_EL2 It's not currently used. Added for completeness. No functional change intended. Suggested-by: Marc Zyngier Signed-off-by: Fuad Tabba Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211010145636.1950948-6-tabba@google.com --- arch/arm64/include/asm/kvm_arm.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 327120c0089f..a39fcf318c77 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -295,6 +295,7 @@ #define MDCR_EL2_HPMFZO (UL(1) << 29) #define MDCR_EL2_MTPME (UL(1) << 28) #define MDCR_EL2_TDCC (UL(1) << 27) +#define MDCR_EL2_HLP (UL(1) << 26) #define MDCR_EL2_HCCD (UL(1) << 23) #define MDCR_EL2_TTRF (UL(1) << 19) #define MDCR_EL2_HPMD (UL(1) << 17) -- cgit v1.2.3 From 16dd1fbb12f72effcd3539561c2a94aed3ab6581 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Sun, 10 Oct 2021 15:56:31 +0100 Subject: KVM: arm64: Simplify masking out MTE in feature id reg Simplify code for hiding MTE support in feature id register when MTE is not enabled/supported by KVM. No functional change intended. Signed-off-by: Fuad Tabba Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211010145636.1950948-7-tabba@google.com --- arch/arm64/kvm/sys_regs.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 1d46e185f31e..447acce9ca84 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1077,14 +1077,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); break; case SYS_ID_AA64PFR1_EL1: - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); - if (kvm_has_mte(vcpu->kvm)) { - u64 pfr, mte; - - pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); - mte = cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR1_MTE_SHIFT); - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), mte); - } + if (!kvm_has_mte(vcpu->kvm)) + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); break; case SYS_ID_AA64ISAR1_EL1: if (!vcpu_has_ptrauth(vcpu)) -- cgit v1.2.3 From 6c30bfb18d0b7d09593f204c936493cfcd153956 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Sun, 10 Oct 2021 15:56:32 +0100 Subject: KVM: arm64: Add handlers for protected VM System Registers Add system register handlers for protected VMs. These cover Sys64 registers (including feature id registers), and debug. No functional change intended as these are not hooked in yet to the guest exit handlers introduced earlier. So when trapping is triggered, the exit handlers let the host handle it, as before. Reviewed-by: Andrew Jones Signed-off-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211010145636.1950948-8-tabba@google.com --- arch/arm64/include/asm/kvm_fixed_config.h | 195 +++++++++++ arch/arm64/include/asm/kvm_hyp.h | 5 + arch/arm64/kvm/arm.c | 5 + arch/arm64/kvm/hyp/include/nvhe/sys_regs.h | 29 ++ arch/arm64/kvm/hyp/nvhe/Makefile | 2 +- arch/arm64/kvm/hyp/nvhe/setup.c | 3 + arch/arm64/kvm/hyp/nvhe/switch.c | 1 + arch/arm64/kvm/hyp/nvhe/sys_regs.c | 498 +++++++++++++++++++++++++++++ 8 files changed, 737 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/include/asm/kvm_fixed_config.h create mode 100644 arch/arm64/kvm/hyp/include/nvhe/sys_regs.h create mode 100644 arch/arm64/kvm/hyp/nvhe/sys_regs.c diff --git a/arch/arm64/include/asm/kvm_fixed_config.h b/arch/arm64/include/asm/kvm_fixed_config.h new file mode 100644 index 000000000000..0ed06923f7e9 --- /dev/null +++ b/arch/arm64/include/asm/kvm_fixed_config.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Google LLC + * Author: Fuad Tabba + */ + +#ifndef __ARM64_KVM_FIXED_CONFIG_H__ +#define __ARM64_KVM_FIXED_CONFIG_H__ + +#include + +/* + * This file contains definitions for features to be allowed or restricted for + * guest virtual machines, depending on the mode KVM is running in and on the + * type of guest that is running. + * + * The ALLOW masks represent a bitmask of feature fields that are allowed + * without any restrictions as long as they are supported by the system. + * + * The RESTRICT_UNSIGNED masks, if present, represent unsigned fields for + * features that are restricted to support at most the specified feature. + * + * If a feature field is not present in either, than it is not supported. + * + * The approach taken for protected VMs is to allow features that are: + * - Needed by common Linux distributions (e.g., floating point) + * - Trivial to support, e.g., supporting the feature does not introduce or + * require tracking of additional state in KVM + * - Cannot be trapped or prevent the guest from using anyway + */ + +/* + * Allow for protected VMs: + * - Floating-point and Advanced SIMD + * - Data Independent Timing + */ +#define PVM_ID_AA64PFR0_ALLOW (\ + ARM64_FEATURE_MASK(ID_AA64PFR0_FP) | \ + ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD) | \ + ARM64_FEATURE_MASK(ID_AA64PFR0_DIT) \ + ) + +/* + * Restrict to the following *unsigned* features for protected VMs: + * - AArch64 guests only (no support for AArch32 guests): + * AArch32 adds complexity in trap handling, emulation, condition codes, + * etc... + * - RAS (v1) + * Supported by KVM + */ +#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), ID_AA64PFR0_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), ID_AA64PFR0_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL2), ID_AA64PFR0_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL3), ID_AA64PFR0_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), ID_AA64PFR0_RAS_V1) \ + ) + +/* + * Allow for protected VMs: + * - Branch Target Identification + * - Speculative Store Bypassing + */ +#define PVM_ID_AA64PFR1_ALLOW (\ + ARM64_FEATURE_MASK(ID_AA64PFR1_BT) | \ + ARM64_FEATURE_MASK(ID_AA64PFR1_SSBS) \ + ) + +/* + * Allow for protected VMs: + * - Mixed-endian + * - Distinction between Secure and Non-secure Memory + * - Mixed-endian at EL0 only + * - Non-context synchronizing exception entry and exit + */ +#define PVM_ID_AA64MMFR0_ALLOW (\ + ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_SNSMEM) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL0) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_EXS) \ + ) + +/* + * Restrict to the following *unsigned* features for protected VMs: + * - 40-bit IPA + * - 16-bit ASID + */ +#define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_PARANGE), ID_AA64MMFR0_PARANGE_40) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_ASID), ID_AA64MMFR0_ASID_16) \ + ) + +/* + * Allow for protected VMs: + * - Hardware translation table updates to Access flag and Dirty state + * - Number of VMID bits from CPU + * - Hierarchical Permission Disables + * - Privileged Access Never + * - SError interrupt exceptions from speculative reads + * - Enhanced Translation Synchronization + */ +#define PVM_ID_AA64MMFR1_ALLOW (\ + ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_VMIDBITS) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_HPD) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_PAN) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_SPECSEI) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_ETS) \ + ) + +/* + * Allow for protected VMs: + * - Common not Private translations + * - User Access Override + * - IESB bit in the SCTLR_ELx registers + * - Unaligned single-copy atomicity and atomic functions + * - ESR_ELx.EC value on an exception by read access to feature ID space + * - TTL field in address operations. + * - Break-before-make sequences when changing translation block size + * - E0PDx mechanism + */ +#define PVM_ID_AA64MMFR2_ALLOW (\ + ARM64_FEATURE_MASK(ID_AA64MMFR2_CNP) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_UAO) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_IESB) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_AT) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_IDS) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_TTL) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_BBM) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_E0PD) \ + ) + +/* + * No support for Scalable Vectors for protected VMs: + * Requires additional support from KVM, e.g., context-switching and + * trapping at EL2 + */ +#define PVM_ID_AA64ZFR0_ALLOW (0ULL) + +/* + * No support for debug, including breakpoints, and watchpoints for protected + * VMs: + * The Arm architecture mandates support for at least the Armv8 debug + * architecture, which would include at least 2 hardware breakpoints and + * watchpoints. Providing that support to protected guests adds + * considerable state and complexity. Therefore, the reserved value of 0 is + * used for debug-related fields. + */ +#define PVM_ID_AA64DFR0_ALLOW (0ULL) +#define PVM_ID_AA64DFR1_ALLOW (0ULL) + +/* + * No support for implementation defined features. + */ +#define PVM_ID_AA64AFR0_ALLOW (0ULL) +#define PVM_ID_AA64AFR1_ALLOW (0ULL) + +/* + * No restrictions on instructions implemented in AArch64. + */ +#define PVM_ID_AA64ISAR0_ALLOW (\ + ARM64_FEATURE_MASK(ID_AA64ISAR0_AES) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA1) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA2) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_CRC32) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMICS) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_RDM) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA3) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_SM3) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_SM4) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_DP) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_FHM) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_TS) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_TLB) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_RNDR) \ + ) + +#define PVM_ID_AA64ISAR1_ALLOW (\ + ARM64_FEATURE_MASK(ID_AA64ISAR1_DPB) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_API) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_JSCVT) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_FCMA) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_LRCPC) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_FRINTTS) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_SB) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_SPECRES) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_BF16) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_DGH) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_I8MM) \ + ) + +#endif /* __ARM64_KVM_FIXED_CONFIG_H__ */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 657d0c94cf82..5afd14ab15b9 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -115,7 +115,12 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus, void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); #endif +extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val); +extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val); +extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val); +extern u64 kvm_nvhe_sym(id_aa64isar1_el1_sys_val); extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val); extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val); +extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val); #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index fe102cd2e518..6aa7b0c5bf21 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1802,8 +1802,13 @@ static int kvm_hyp_init_protection(u32 hyp_va_bits) void *addr = phys_to_virt(hyp_mem_base); int ret; + kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); + kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1); + kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1); kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1); ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP); if (ret) diff --git a/arch/arm64/kvm/hyp/include/nvhe/sys_regs.h b/arch/arm64/kvm/hyp/include/nvhe/sys_regs.h new file mode 100644 index 000000000000..3288128738aa --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/sys_regs.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Google LLC + * Author: Fuad Tabba + */ + +#ifndef __ARM64_KVM_NVHE_SYS_REGS_H__ +#define __ARM64_KVM_NVHE_SYS_REGS_H__ + +#include + +u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu); +u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu); +u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu); +u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu); +u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu); +u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu); +u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu); +u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu); +u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu); +u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu); +u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu); +u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu); + +bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code); +int kvm_check_pvm_sysreg_table(void); +void inject_undef64(struct kvm_vcpu *vcpu); + +#endif /* __ARM64_KVM_NVHE_SYS_REGS_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index 8d741f71377f..0bbe37a18d5d 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -14,7 +14,7 @@ lib-objs := $(addprefix ../../../lib/, $(lib-objs)) obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \ hyp-main.o hyp-smp.o psci-relay.o early_alloc.o stub.o page_alloc.o \ - cache.o setup.o mm.o mem_protect.o + cache.o setup.o mm.o mem_protect.o sys_regs.o obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ ../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o obj-y += $(lib-objs) diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 57c27846320f..c85ff64e63f2 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -14,6 +14,7 @@ #include #include #include +#include #include struct hyp_pool hpool; @@ -260,6 +261,8 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus, void (*fn)(phys_addr_t params_pa, void *finalize_fn_va); int ret; + BUG_ON(kvm_check_pvm_sysreg_table()); + if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size)) return -EINVAL; diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 8c9a0464be00..17d1a9512507 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -28,6 +28,7 @@ #include #include +#include /* Non-VHE specific context */ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c new file mode 100644 index 000000000000..6e3ea49af302 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Google LLC + * Author: Fuad Tabba + */ + +#include +#include +#include + +#include + +#include + +#include "../../sys_regs.h" + +/* + * Copies of the host's CPU features registers holding sanitized values at hyp. + */ +u64 id_aa64pfr0_el1_sys_val; +u64 id_aa64pfr1_el1_sys_val; +u64 id_aa64isar0_el1_sys_val; +u64 id_aa64isar1_el1_sys_val; +u64 id_aa64mmfr2_el1_sys_val; + +/* + * Inject an unknown/undefined exception to an AArch64 guest while most of its + * sysregs are live. + */ +void inject_undef64(struct kvm_vcpu *vcpu) +{ + u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); + + *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); + *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR); + + vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 | + KVM_ARM64_EXCEPT_AA64_ELx_SYNC | + KVM_ARM64_PENDING_EXCEPTION); + + __kvm_adjust_pc(vcpu); + + write_sysreg_el1(esr, SYS_ESR); + write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR); + write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); + write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); +} + +/* + * Returns the restricted features values of the feature register based on the + * limitations in restrict_fields. + * A feature id field value of 0b0000 does not impose any restrictions. + * Note: Use only for unsigned feature field values. + */ +static u64 get_restricted_features_unsigned(u64 sys_reg_val, + u64 restrict_fields) +{ + u64 value = 0UL; + u64 mask = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0); + + /* + * According to the Arm Architecture Reference Manual, feature fields + * use increasing values to indicate increases in functionality. + * Iterate over the restricted feature fields and calculate the minimum + * unsigned value between the one supported by the system, and what the + * value is being restricted to. + */ + while (sys_reg_val && restrict_fields) { + value |= min(sys_reg_val & mask, restrict_fields & mask); + sys_reg_val &= ~mask; + restrict_fields &= ~mask; + mask <<= ARM64_FEATURE_FIELD_BITS; + } + + return value; +} + +/* + * Functions that return the value of feature id registers for protected VMs + * based on allowed features, system features, and KVM support. + */ + +u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) +{ + const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm); + u64 set_mask = 0; + u64 allow_mask = PVM_ID_AA64PFR0_ALLOW; + + if (!vcpu_has_sve(vcpu)) + allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE); + + set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val, + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED); + + /* Spectre and Meltdown mitigation in KVM */ + set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), + (u64)kvm->arch.pfr0_csv2); + set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), + (u64)kvm->arch.pfr0_csv3); + + return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask; +} + +u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu) +{ + const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm); + u64 allow_mask = PVM_ID_AA64PFR1_ALLOW; + + if (!kvm_has_mte(kvm)) + allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); + + return id_aa64pfr1_el1_sys_val & allow_mask; +} + +u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu) +{ + /* + * No support for Scalable Vectors, therefore, hyp has no sanitized + * copy of the feature id register. + */ + BUILD_BUG_ON(PVM_ID_AA64ZFR0_ALLOW != 0ULL); + return 0; +} + +u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu) +{ + /* + * No support for debug, including breakpoints, and watchpoints, + * therefore, pKVM has no sanitized copy of the feature id register. + */ + BUILD_BUG_ON(PVM_ID_AA64DFR0_ALLOW != 0ULL); + return 0; +} + +u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu) +{ + /* + * No support for debug, therefore, hyp has no sanitized copy of the + * feature id register. + */ + BUILD_BUG_ON(PVM_ID_AA64DFR1_ALLOW != 0ULL); + return 0; +} + +u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu) +{ + /* + * No support for implementation defined features, therefore, hyp has no + * sanitized copy of the feature id register. + */ + BUILD_BUG_ON(PVM_ID_AA64AFR0_ALLOW != 0ULL); + return 0; +} + +u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu) +{ + /* + * No support for implementation defined features, therefore, hyp has no + * sanitized copy of the feature id register. + */ + BUILD_BUG_ON(PVM_ID_AA64AFR1_ALLOW != 0ULL); + return 0; +} + +u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu) +{ + return id_aa64isar0_el1_sys_val & PVM_ID_AA64ISAR0_ALLOW; +} + +u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu) +{ + u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW; + + if (!vcpu_has_ptrauth(vcpu)) + allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) | + ARM64_FEATURE_MASK(ID_AA64ISAR1_API) | + ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) | + ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI)); + + return id_aa64isar1_el1_sys_val & allow_mask; +} + +u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu) +{ + u64 set_mask; + + set_mask = get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val, + PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED); + + return (id_aa64mmfr0_el1_sys_val & PVM_ID_AA64MMFR0_ALLOW) | set_mask; +} + +u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu) +{ + return id_aa64mmfr1_el1_sys_val & PVM_ID_AA64MMFR1_ALLOW; +} + +u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu) +{ + return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW; +} + +/* Read a sanitized cpufeature ID register by its sys_reg_desc. */ +static u64 read_id_reg(const struct kvm_vcpu *vcpu, + struct sys_reg_desc const *r) +{ + u32 id = reg_to_encoding(r); + + switch (id) { + case SYS_ID_AA64PFR0_EL1: + return get_pvm_id_aa64pfr0(vcpu); + case SYS_ID_AA64PFR1_EL1: + return get_pvm_id_aa64pfr1(vcpu); + case SYS_ID_AA64ZFR0_EL1: + return get_pvm_id_aa64zfr0(vcpu); + case SYS_ID_AA64DFR0_EL1: + return get_pvm_id_aa64dfr0(vcpu); + case SYS_ID_AA64DFR1_EL1: + return get_pvm_id_aa64dfr1(vcpu); + case SYS_ID_AA64AFR0_EL1: + return get_pvm_id_aa64afr0(vcpu); + case SYS_ID_AA64AFR1_EL1: + return get_pvm_id_aa64afr1(vcpu); + case SYS_ID_AA64ISAR0_EL1: + return get_pvm_id_aa64isar0(vcpu); + case SYS_ID_AA64ISAR1_EL1: + return get_pvm_id_aa64isar1(vcpu); + case SYS_ID_AA64MMFR0_EL1: + return get_pvm_id_aa64mmfr0(vcpu); + case SYS_ID_AA64MMFR1_EL1: + return get_pvm_id_aa64mmfr1(vcpu); + case SYS_ID_AA64MMFR2_EL1: + return get_pvm_id_aa64mmfr2(vcpu); + default: + /* + * Should never happen because all cases are covered in + * pvm_sys_reg_descs[]. + */ + WARN_ON(1); + break; + } + + return 0; +} + +/* + * Accessor for AArch32 feature id registers. + * + * The value of these registers is "unknown" according to the spec if AArch32 + * isn't supported. + */ +static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (p->is_write) { + inject_undef64(vcpu); + return false; + } + + /* + * No support for AArch32 guests, therefore, pKVM has no sanitized copy + * of AArch32 feature id registers. + */ + BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_ELx_64BIT_ONLY); + + /* Use 0 for architecturally "unknown" values. */ + p->regval = 0; + return true; +} + +/* + * Accessor for AArch64 feature id registers. + * + * If access is allowed, set the regval to the protected VM's view of the + * register and return true. + * Otherwise, inject an undefined exception and return false. + */ +static bool pvm_access_id_aarch64(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (p->is_write) { + inject_undef64(vcpu); + return false; + } + + p->regval = read_id_reg(vcpu, r); + return true; +} + +/* Mark the specified system register as an AArch32 feature id register. */ +#define AARCH32(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch32 } + +/* Mark the specified system register as an AArch64 feature id register. */ +#define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 } + +/* Mark the specified system register as not being handled in hyp. */ +#define HOST_HANDLED(REG) { SYS_DESC(REG), .access = NULL } + +/* + * Architected system registers. + * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 + * + * NOTE: Anything not explicitly listed here is *restricted by default*, i.e., + * it will lead to injecting an exception into the guest. + */ +static const struct sys_reg_desc pvm_sys_reg_descs[] = { + /* Cache maintenance by set/way operations are restricted. */ + + /* Debug and Trace Registers are restricted. */ + + /* AArch64 mappings of the AArch32 ID registers */ + /* CRm=1 */ + AARCH32(SYS_ID_PFR0_EL1), + AARCH32(SYS_ID_PFR1_EL1), + AARCH32(SYS_ID_DFR0_EL1), + AARCH32(SYS_ID_AFR0_EL1), + AARCH32(SYS_ID_MMFR0_EL1), + AARCH32(SYS_ID_MMFR1_EL1), + AARCH32(SYS_ID_MMFR2_EL1), + AARCH32(SYS_ID_MMFR3_EL1), + + /* CRm=2 */ + AARCH32(SYS_ID_ISAR0_EL1), + AARCH32(SYS_ID_ISAR1_EL1), + AARCH32(SYS_ID_ISAR2_EL1), + AARCH32(SYS_ID_ISAR3_EL1), + AARCH32(SYS_ID_ISAR4_EL1), + AARCH32(SYS_ID_ISAR5_EL1), + AARCH32(SYS_ID_MMFR4_EL1), + AARCH32(SYS_ID_ISAR6_EL1), + + /* CRm=3 */ + AARCH32(SYS_MVFR0_EL1), + AARCH32(SYS_MVFR1_EL1), + AARCH32(SYS_MVFR2_EL1), + AARCH32(SYS_ID_PFR2_EL1), + AARCH32(SYS_ID_DFR1_EL1), + AARCH32(SYS_ID_MMFR5_EL1), + + /* AArch64 ID registers */ + /* CRm=4 */ + AARCH64(SYS_ID_AA64PFR0_EL1), + AARCH64(SYS_ID_AA64PFR1_EL1), + AARCH64(SYS_ID_AA64ZFR0_EL1), + AARCH64(SYS_ID_AA64DFR0_EL1), + AARCH64(SYS_ID_AA64DFR1_EL1), + AARCH64(SYS_ID_AA64AFR0_EL1), + AARCH64(SYS_ID_AA64AFR1_EL1), + AARCH64(SYS_ID_AA64ISAR0_EL1), + AARCH64(SYS_ID_AA64ISAR1_EL1), + AARCH64(SYS_ID_AA64MMFR0_EL1), + AARCH64(SYS_ID_AA64MMFR1_EL1), + AARCH64(SYS_ID_AA64MMFR2_EL1), + + HOST_HANDLED(SYS_SCTLR_EL1), + HOST_HANDLED(SYS_ACTLR_EL1), + HOST_HANDLED(SYS_CPACR_EL1), + + HOST_HANDLED(SYS_RGSR_EL1), + HOST_HANDLED(SYS_GCR_EL1), + + /* Scalable Vector Registers are restricted. */ + + HOST_HANDLED(SYS_TTBR0_EL1), + HOST_HANDLED(SYS_TTBR1_EL1), + HOST_HANDLED(SYS_TCR_EL1), + + HOST_HANDLED(SYS_APIAKEYLO_EL1), + HOST_HANDLED(SYS_APIAKEYHI_EL1), + HOST_HANDLED(SYS_APIBKEYLO_EL1), + HOST_HANDLED(SYS_APIBKEYHI_EL1), + HOST_HANDLED(SYS_APDAKEYLO_EL1), + HOST_HANDLED(SYS_APDAKEYHI_EL1), + HOST_HANDLED(SYS_APDBKEYLO_EL1), + HOST_HANDLED(SYS_APDBKEYHI_EL1), + HOST_HANDLED(SYS_APGAKEYLO_EL1), + HOST_HANDLED(SYS_APGAKEYHI_EL1), + + HOST_HANDLED(SYS_AFSR0_EL1), + HOST_HANDLED(SYS_AFSR1_EL1), + HOST_HANDLED(SYS_ESR_EL1), + + HOST_HANDLED(SYS_ERRIDR_EL1), + HOST_HANDLED(SYS_ERRSELR_EL1), + HOST_HANDLED(SYS_ERXFR_EL1), + HOST_HANDLED(SYS_ERXCTLR_EL1), + HOST_HANDLED(SYS_ERXSTATUS_EL1), + HOST_HANDLED(SYS_ERXADDR_EL1), + HOST_HANDLED(SYS_ERXMISC0_EL1), + HOST_HANDLED(SYS_ERXMISC1_EL1), + + HOST_HANDLED(SYS_TFSR_EL1), + HOST_HANDLED(SYS_TFSRE0_EL1), + + HOST_HANDLED(SYS_FAR_EL1), + HOST_HANDLED(SYS_PAR_EL1), + + /* Performance Monitoring Registers are restricted. */ + + HOST_HANDLED(SYS_MAIR_EL1), + HOST_HANDLED(SYS_AMAIR_EL1), + + /* Limited Ordering Regions Registers are restricted. */ + + HOST_HANDLED(SYS_VBAR_EL1), + HOST_HANDLED(SYS_DISR_EL1), + + /* GIC CPU Interface registers are restricted. */ + + HOST_HANDLED(SYS_CONTEXTIDR_EL1), + HOST_HANDLED(SYS_TPIDR_EL1), + + HOST_HANDLED(SYS_SCXTNUM_EL1), + + HOST_HANDLED(SYS_CNTKCTL_EL1), + + HOST_HANDLED(SYS_CCSIDR_EL1), + HOST_HANDLED(SYS_CLIDR_EL1), + HOST_HANDLED(SYS_CSSELR_EL1), + HOST_HANDLED(SYS_CTR_EL0), + + /* Performance Monitoring Registers are restricted. */ + + HOST_HANDLED(SYS_TPIDR_EL0), + HOST_HANDLED(SYS_TPIDRRO_EL0), + + HOST_HANDLED(SYS_SCXTNUM_EL0), + + /* Activity Monitoring Registers are restricted. */ + + HOST_HANDLED(SYS_CNTP_TVAL_EL0), + HOST_HANDLED(SYS_CNTP_CTL_EL0), + HOST_HANDLED(SYS_CNTP_CVAL_EL0), + + /* Performance Monitoring Registers are restricted. */ + + HOST_HANDLED(SYS_DACR32_EL2), + HOST_HANDLED(SYS_IFSR32_EL2), + HOST_HANDLED(SYS_FPEXC32_EL2), +}; + +/* + * Checks that the sysreg table is unique and in-order. + * + * Returns 0 if the table is consistent, or 1 otherwise. + */ +int kvm_check_pvm_sysreg_table(void) +{ + unsigned int i; + + for (i = 1; i < ARRAY_SIZE(pvm_sys_reg_descs); i++) { + if (cmp_sys_reg(&pvm_sys_reg_descs[i-1], &pvm_sys_reg_descs[i]) >= 0) + return 1; + } + + return 0; +} + +/* + * Handler for protected VM MSR, MRS or System instruction execution. + * + * Returns true if the hypervisor has handled the exit, and control should go + * back to the guest, or false if it hasn't, to be handled by the host. + */ +bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + const struct sys_reg_desc *r; + struct sys_reg_params params; + unsigned long esr = kvm_vcpu_get_esr(vcpu); + int Rt = kvm_vcpu_sys_get_rt(vcpu); + + params = esr_sys64_to_params(esr); + params.regval = vcpu_get_reg(vcpu, Rt); + + r = find_reg(¶ms, pvm_sys_reg_descs, ARRAY_SIZE(pvm_sys_reg_descs)); + + /* Undefined (RESTRICTED). */ + if (r == NULL) { + inject_undef64(vcpu); + return true; + } + + /* Handled by the host (HOST_HANDLED) */ + if (r->access == NULL) + return false; + + /* Handled by hyp: skip instruction if instructed to do so. */ + if (r->access(vcpu, ¶ms, r)) + __kvm_skip_instr(vcpu); + + if (!params.is_write) + vcpu_set_reg(vcpu, Rt, params.regval); + + return true; +} -- cgit v1.2.3 From 2a0c343386ae1a6826e1b9d751bfc14f4711c2de Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Sun, 10 Oct 2021 15:56:33 +0100 Subject: KVM: arm64: Initialize trap registers for protected VMs Protected VMs have more restricted features that need to be trapped. Moreover, the host should not be trusted to set the appropriate trapping registers and their values. Initialize the trapping registers, i.e., hcr_el2, mdcr_el2, and cptr_el2 at EL2 for protected guests, based on the values of the guest's feature id registers. No functional change intended as trap handlers introduced in the previous patch are still not hooked in to the guest exit handlers. Reviewed-by: Andrew Jones Signed-off-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211010145636.1950948-9-tabba@google.com --- arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/include/asm/kvm_host.h | 2 + arch/arm64/kvm/arm.c | 8 ++ arch/arm64/kvm/hyp/include/nvhe/trap_handler.h | 2 + arch/arm64/kvm/hyp/nvhe/Makefile | 2 +- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 9 ++ arch/arm64/kvm/hyp/nvhe/pkvm.c | 186 +++++++++++++++++++++++++ 7 files changed, 209 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/kvm/hyp/nvhe/pkvm.c diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index e86045ac43ba..a460e1243cef 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -64,6 +64,7 @@ #define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18 #define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19 #define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 20 +#define __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps 21 #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f8be56d5342b..4a323aa27a6b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -780,6 +780,8 @@ static inline bool kvm_vm_is_protected(struct kvm *kvm) return false; } +void kvm_init_protected_traps(struct kvm_vcpu *vcpu); + int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 6aa7b0c5bf21..3af6d59d1919 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -620,6 +620,14 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ret = kvm_arm_pmu_v3_enable(vcpu); + /* + * Initialize traps for protected VMs. + * NOTE: Move to run in EL2 directly, rather than via a hypercall, once + * the code is in place for first run initialization at EL2. + */ + if (kvm_vm_is_protected(kvm)) + kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu); + return ret; } diff --git a/arch/arm64/kvm/hyp/include/nvhe/trap_handler.h b/arch/arm64/kvm/hyp/include/nvhe/trap_handler.h index 1e6d995968a1..45a84f0ade04 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/trap_handler.h +++ b/arch/arm64/kvm/hyp/include/nvhe/trap_handler.h @@ -15,4 +15,6 @@ #define DECLARE_REG(type, name, ctxt, reg) \ type name = (type)cpu_reg(ctxt, (reg)) +void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu); + #endif /* __ARM64_KVM_NVHE_TRAP_HANDLER_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index 0bbe37a18d5d..c3c11974fa3b 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -14,7 +14,7 @@ lib-objs := $(addprefix ../../../lib/, $(lib-objs)) obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \ hyp-main.o hyp-smp.o psci-relay.o early_alloc.o stub.o page_alloc.o \ - cache.o setup.o mm.o mem_protect.o sys_regs.o + cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ ../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o obj-y += $(lib-objs) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 8ca1104f4774..a6303db09cd6 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -160,6 +160,14 @@ static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt) { cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize(); } + +static void handle___pkvm_vcpu_init_traps(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1); + + __pkvm_vcpu_init_traps(kern_hyp_va(vcpu)); +} + typedef void (*hcall_t)(struct kvm_cpu_context *); #define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x @@ -185,6 +193,7 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__pkvm_host_share_hyp), HANDLE_FUNC(__pkvm_create_private_mapping), HANDLE_FUNC(__pkvm_prot_finalize), + HANDLE_FUNC(__pkvm_vcpu_init_traps), }; static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c new file mode 100644 index 000000000000..633547cc1033 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Google LLC + * Author: Fuad Tabba + */ + +#include +#include +#include +#include +#include + +/* + * Set trap register values based on features in ID_AA64PFR0. + */ +static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) +{ + const u64 feature_ids = get_pvm_id_aa64pfr0(vcpu); + u64 hcr_set = HCR_RW; + u64 hcr_clear = 0; + u64 cptr_set = 0; + + /* Protected KVM does not support AArch32 guests. */ + BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); + BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); + + /* + * Linux guests assume support for floating-point and Advanced SIMD. Do + * not change the trapping behavior for these from the KVM default. + */ + BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP), + PVM_ID_AA64PFR0_ALLOW)); + BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD), + PVM_ID_AA64PFR0_ALLOW)); + + /* Trap RAS unless all current versions are supported */ + if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), feature_ids) < + ID_AA64PFR0_RAS_V1P1) { + hcr_set |= HCR_TERR | HCR_TEA; + hcr_clear |= HCR_FIEN; + } + + /* Trap AMU */ + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_AMU), feature_ids)) { + hcr_clear |= HCR_AMVOFFEN; + cptr_set |= CPTR_EL2_TAM; + } + + /* Trap SVE */ + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_SVE), feature_ids)) + cptr_set |= CPTR_EL2_TZ; + + vcpu->arch.hcr_el2 |= hcr_set; + vcpu->arch.hcr_el2 &= ~hcr_clear; + vcpu->arch.cptr_el2 |= cptr_set; +} + +/* + * Set trap register values based on features in ID_AA64PFR1. + */ +static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu) +{ + const u64 feature_ids = get_pvm_id_aa64pfr1(vcpu); + u64 hcr_set = 0; + u64 hcr_clear = 0; + + /* Memory Tagging: Trap and Treat as Untagged if not supported. */ + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), feature_ids)) { + hcr_set |= HCR_TID5; + hcr_clear |= HCR_DCT | HCR_ATA; + } + + vcpu->arch.hcr_el2 |= hcr_set; + vcpu->arch.hcr_el2 &= ~hcr_clear; +} + +/* + * Set trap register values based on features in ID_AA64DFR0. + */ +static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) +{ + const u64 feature_ids = get_pvm_id_aa64dfr0(vcpu); + u64 mdcr_set = 0; + u64 mdcr_clear = 0; + u64 cptr_set = 0; + + /* Trap/constrain PMU */ + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), feature_ids)) { + mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; + mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME | + MDCR_EL2_HPMN_MASK; + } + + /* Trap Debug */ + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), feature_ids)) + mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE; + + /* Trap OS Double Lock */ + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DOUBLELOCK), feature_ids)) + mdcr_set |= MDCR_EL2_TDOSA; + + /* Trap SPE */ + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER), feature_ids)) { + mdcr_set |= MDCR_EL2_TPMS; + mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; + } + + /* Trap Trace Filter */ + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACE_FILT), feature_ids)) + mdcr_set |= MDCR_EL2_TTRF; + + /* Trap Trace */ + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACEVER), feature_ids)) + cptr_set |= CPTR_EL2_TTA; + + vcpu->arch.mdcr_el2 |= mdcr_set; + vcpu->arch.mdcr_el2 &= ~mdcr_clear; + vcpu->arch.cptr_el2 |= cptr_set; +} + +/* + * Set trap register values based on features in ID_AA64MMFR0. + */ +static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu) +{ + const u64 feature_ids = get_pvm_id_aa64mmfr0(vcpu); + u64 mdcr_set = 0; + + /* Trap Debug Communications Channel registers */ + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_FGT), feature_ids)) + mdcr_set |= MDCR_EL2_TDCC; + + vcpu->arch.mdcr_el2 |= mdcr_set; +} + +/* + * Set trap register values based on features in ID_AA64MMFR1. + */ +static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu) +{ + const u64 feature_ids = get_pvm_id_aa64mmfr1(vcpu); + u64 hcr_set = 0; + + /* Trap LOR */ + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_LOR), feature_ids)) + hcr_set |= HCR_TLOR; + + vcpu->arch.hcr_el2 |= hcr_set; +} + +/* + * Set baseline trap register values. + */ +static void pvm_init_trap_regs(struct kvm_vcpu *vcpu) +{ + const u64 hcr_trap_feat_regs = HCR_TID3; + const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1; + + /* + * Always trap: + * - Feature id registers: to control features exposed to guests + * - Implementation-defined features + */ + vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef; + + /* Clear res0 and set res1 bits to trap potential new features. */ + vcpu->arch.hcr_el2 &= ~(HCR_RES0); + vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0); + vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1; + vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0); +} + +/* + * Initialize trap register values for protected VMs. + */ +void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu) +{ + pvm_init_trap_regs(vcpu); + pvm_init_traps_aa64pfr0(vcpu); + pvm_init_traps_aa64pfr1(vcpu); + pvm_init_traps_aa64dfr0(vcpu); + pvm_init_traps_aa64mmfr0(vcpu); + pvm_init_traps_aa64mmfr1(vcpu); +} -- cgit v1.2.3 From 72e1be120eaaf82a58c81fcf173cdb1d7a5dcfbb Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Sun, 10 Oct 2021 15:56:34 +0100 Subject: KVM: arm64: Move sanitized copies of CPU features Move the sanitized copies of the CPU feature registers to the recently created sys_regs.c. This consolidates all copies in a more relevant file. No functional change intended. Acked-by: Will Deacon Signed-off-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211010145636.1950948-10-tabba@google.com --- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 6 ------ arch/arm64/kvm/hyp/nvhe/sys_regs.c | 2 ++ 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 2a07d63b8498..f6d96e60b323 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -25,12 +25,6 @@ struct host_kvm host_kvm; static struct hyp_pool host_s2_pool; -/* - * Copies of the host's CPU features registers holding sanitized values. - */ -u64 id_aa64mmfr0_el1_sys_val; -u64 id_aa64mmfr1_el1_sys_val; - const u8 pkvm_hyp_id = 1; static void *host_s2_zalloc_pages_exact(size_t size) diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index 6e3ea49af302..6bde2dc5205c 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -21,6 +21,8 @@ u64 id_aa64pfr0_el1_sys_val; u64 id_aa64pfr1_el1_sys_val; u64 id_aa64isar0_el1_sys_val; u64 id_aa64isar1_el1_sys_val; +u64 id_aa64mmfr0_el1_sys_val; +u64 id_aa64mmfr1_el1_sys_val; u64 id_aa64mmfr2_el1_sys_val; /* -- cgit v1.2.3 From 1423afcb411780c7a6a68f801fdcfb6920ad6f06 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Sun, 10 Oct 2021 15:56:35 +0100 Subject: KVM: arm64: Trap access to pVM restricted features Trap accesses to restricted features for VMs running in protected mode. Access to feature registers are emulated, and only supported features are exposed to protected VMs. Accesses to restricted registers as well as restricted instructions are trapped, and an undefined exception is injected into the protected guests, i.e., with EC = 0x0 (unknown reason). This EC is the one used, according to the Arm Architecture Reference Manual, for unallocated or undefined system registers or instructions. Only affects the functionality of protected VMs. Otherwise, should not affect non-protected VMs when KVM is running in protected mode. Signed-off-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211010145636.1950948-11-tabba@google.com --- arch/arm64/kvm/hyp/nvhe/switch.c | 57 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 17d1a9512507..2c72c31e516e 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -159,6 +160,49 @@ static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) write_sysreg(pmu->events_host, pmcntenset_el0); } +/** + * Handler for protected VM restricted exceptions. + * + * Inject an undefined exception into the guest and return true to indicate that + * the hypervisor has handled the exit, and control should go back to the guest. + */ +static bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + inject_undef64(vcpu); + return true; +} + +/** + * Handler for protected VM MSR, MRS or System instruction execution in AArch64. + * + * Returns true if the hypervisor has handled the exit, and control should go + * back to the guest, or false if it hasn't. + */ +static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + if (kvm_handle_pvm_sysreg(vcpu, exit_code)) + return true; + + return kvm_hyp_handle_sysreg(vcpu, exit_code); +} + +/** + * Handler for protected floating-point and Advanced SIMD accesses. + * + * Returns true if the hypervisor has handled the exit, and control should go + * back to the guest, or false if it hasn't. + */ +static bool kvm_handle_pvm_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + /* Linux guests assume support for floating-point and Advanced SIMD. */ + BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP), + PVM_ID_AA64PFR0_ALLOW)); + BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD), + PVM_ID_AA64PFR0_ALLOW)); + + return kvm_hyp_handle_fpsimd(vcpu, exit_code); +} + static const exit_handler_fn hyp_exit_handlers[] = { [0 ... ESR_ELx_EC_MAX] = NULL, [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32, @@ -170,8 +214,21 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; +static const exit_handler_fn pvm_exit_handlers[] = { + [0 ... ESR_ELx_EC_MAX] = NULL, + [ESR_ELx_EC_SYS64] = kvm_handle_pvm_sys64, + [ESR_ELx_EC_SVE] = kvm_handle_pvm_restricted, + [ESR_ELx_EC_FP_ASIMD] = kvm_handle_pvm_fpsimd, + [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, + [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, + [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, +}; + static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm) { + if (unlikely(kvm_vm_is_protected(kvm))) + return pvm_exit_handlers; + return hyp_exit_handlers; } -- cgit v1.2.3 From 5f39efc42052b042c4d7ba6fd77934e8de43e10c Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Sun, 10 Oct 2021 15:56:36 +0100 Subject: KVM: arm64: Handle protected guests at 32 bits Protected KVM does not support protected AArch32 guests. However, it is possible for the guest to force run AArch32, potentially causing problems. Add an extra check so that if the hypervisor catches the guest doing that, it can prevent the guest from running again by resetting vcpu->arch.target and returning ARM_EXCEPTION_IL. If this were to happen, The VMM can try and fix it by re- initializing the vcpu with KVM_ARM_VCPU_INIT, however, this is likely not possible for protected VMs. Adapted from commit 22f553842b14 ("KVM: arm64: Handle Asymmetric AArch32 systems") Signed-off-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211010145636.1950948-12-tabba@google.com --- arch/arm64/kvm/hyp/nvhe/switch.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 2c72c31e516e..f25b6353a598 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -232,6 +232,37 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm) return hyp_exit_handlers; } +/* + * Some guests (e.g., protected VMs) are not be allowed to run in AArch32. + * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a + * guest from dropping to AArch32 EL0 if implemented by the CPU. If the + * hypervisor spots a guest in such a state ensure it is handled, and don't + * trust the host to spot or fix it. The check below is based on the one in + * kvm_arch_vcpu_ioctl_run(). + * + * Returns false if the guest ran in AArch32 when it shouldn't have, and + * thus should exit to the host, or true if a the guest run loop can continue. + */ +static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + + if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) { + /* + * As we have caught the guest red-handed, decide that it isn't + * fit for purpose anymore by making the vcpu invalid. The VMM + * can try and fix it by re-initializing the vcpu with + * KVM_ARM_VCPU_INIT, however, this is likely not possible for + * protected VMs. + */ + vcpu->arch.target = -1; + *exit_code = ARM_EXCEPTION_IL; + return false; + } + + return true; +} + /* Switch to the guest for legacy non-VHE systems */ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) { @@ -294,6 +325,9 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) /* Jump in the fire! */ exit_code = __guest_enter(vcpu); + if (unlikely(!handle_aarch32_guest(vcpu, &exit_code))) + break; + /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); -- cgit v1.2.3 From 53e8ce137f7b34bd7a54429d18e0d0e5f56f54e8 Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Mon, 11 Oct 2021 16:38:35 +0100 Subject: Documentation: admin-guide: Document side effects when pKVM is enabled Recent changes to KVM for arm64 has made it impossible for the host to hibernate or use kexec when protected mode is enabled via the kernel command line. There are people who rely on kexec (for example, developers who use kexec as a quick way to test a new kernel), let's document this change in behaviour, so it doesn't catch them by surprise and we have a place to point people to if it does. Signed-off-by: Alexandru Elisei Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211011153835.291147-1-alexandru.elisei@arm.com --- Documentation/admin-guide/kernel-parameters.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 91ba391f9b32..741e33fd444a 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2372,7 +2372,9 @@ state is kept private from the host. Not valid if the kernel is running in EL2. - Defaults to VHE/nVHE based on hardware support. + Defaults to VHE/nVHE based on hardware support. Setting + mode to "protected" will disable kexec and hibernation + for the host. kvm-arm.vgic_v3_group0_trap= [KVM,ARM] Trap guest accesses to GICv3 group-0 -- cgit v1.2.3 From 69adec18e94ff3ca20447916a3bd23ab1d06b878 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 12 Oct 2021 12:23:12 +0100 Subject: KVM: arm64: Fix reporting of endianess when the access originates at EL0 We currently check SCTLR_EL1.EE when computing the address of a faulting guest access. However, the fault could have occured at EL0, in which case the right bit to check would be SCTLR_EL1.E0E. This is pretty unlikely to cause any issue in practice: You'd have to have a guest with a LE EL1 and a BE EL0 (or the other way around), and have mapped a device into the EL0 page tables. Good luck with that! Signed-off-by: Marc Zyngier Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20211012112312.1247467-1-maz@kernel.org --- arch/arm64/include/asm/kvm_emulate.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index fd418955e31e..f4871e47b2d0 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -396,7 +396,10 @@ static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) if (vcpu_mode_is_32bit(vcpu)) return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT); - return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); + if (vcpu_mode_priv(vcpu)) + return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE); + else + return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E); } static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, -- cgit v1.2.3 From 562e530fd7707aad7fed953692d1835612238966 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 10 Oct 2021 16:09:06 +0100 Subject: KVM: arm64: Force ID_AA64PFR0_EL1.GIC=1 when exposing a virtual GICv3 Until now, we always let ID_AA64PFR0_EL1.GIC reflect the value visible on the host, even if we were running a GICv2-enabled VM on a GICv3+compat host. That's fine, but we also now have the case of a host that does not expose ID_AA64PFR0_EL1.GIC==1 despite having a vGIC. Yes, this is confusing. Thank you M1. Let's go back to first principles and expose ID_AA64PFR0_EL1.GIC=1 when a GICv3 is exposed to the guest. This also hides a GICv4.1 CPU interface from the guest which has no business knowing about the v4.1 extension. Reviewed-by: Alexandru Elisei Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211010150910.2911495-2-maz@kernel.org --- arch/arm64/kvm/sys_regs.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 1d46e185f31e..0e8fc29df19c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1075,6 +1075,11 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); + if (irqchip_in_kernel(vcpu->kvm) && + vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1); + } break; case SYS_ID_AA64PFR1_EL1: val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); -- cgit v1.2.3 From df652bcf1136db7f16e486a204ba4b4fc4181759 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 10 Oct 2021 16:09:07 +0100 Subject: KVM: arm64: vgic-v3: Work around GICv3 locally generated SErrors The infamous M1 has a feature nobody else ever implemented, in the form of the "GIC locally generated SError interrupts", also known as SEIS for short. These SErrors are generated when a guest does something that violates the GIC state machine. It would have been simpler to just *ignore* the damned thing, but that's not what this HW does. Oh well. This part of of the architecture is also amazingly under-specified. There is a whole 10 lines that describe the feature in a spec that is 930 pages long, and some of these lines are factually wrong. Oh, and it is deprecated, so the insentive to clarify it is low. Now, the spec says that this should be a *virtual* SError when HCR_EL2.AMO is set. As it turns out, that's not always the case on this CPU, and the SError sometimes fires on the host as a physical SError. Goodbye, cruel world. This clearly is a HW bug, and it means that a guest can easily take the host down, on demand. Thankfully, we have seen systems that were just as broken in the past, and we have the perfect vaccine for it. Apple M1, please meet the Cavium ThunderX workaround. All your GIC accesses will be trapped, sanitised, and emulated. Only the signalling aspect of the HW will be used. It won't be super speedy, but it will at least be safe. You're most welcome. Given that this has only ever been seen on this single implementation, that the spec is unclear at best and that we cannot trust it to ever be implemented correctly, gate the workaround solely on ICH_VTR_EL2.SEIS being set. Tested-by: Joey Gouly Reviewed-by: Alexandru Elisei Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211010150910.2911495-3-maz@kernel.org --- arch/arm64/kvm/vgic/vgic-v3.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 21a6207fb2ee..ae59e2580bf5 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -671,6 +671,14 @@ int vgic_v3_probe(const struct gic_kvm_info *info) group1_trap = true; } + if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) { + kvm_info("GICv3 with locally generated SEI\n"); + + group0_trap = true; + group1_trap = true; + common_trap = true; + } + if (group0_trap || group1_trap || common_trap) { kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n", group0_trap ? "G0" : "", -- cgit v1.2.3 From 0924729b21bffdd0e13f29ea6256d299fc807cff Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 10 Oct 2021 16:09:08 +0100 Subject: KVM: arm64: vgic-v3: Reduce common group trapping to ICV_DIR_EL1 when possible On systems that advertise ICH_VTR_EL2.SEIS, we trap all GICv3 sysreg accesses from the guest. From a performance perspective, this is OK as long as the guest doesn't hammer the GICv3 CPU interface. In most cases, this is fine, unless the guest actively uses priorities and switches PMR_EL1 very often. Which is exactly what happens when a Linux guest runs with irqchip.gicv3_pseudo_nmi=1. In these condition, the performance plumets as we hit PMR each time we mask/unmask interrupts. Not good. There is however an opportunity for improvement. Careful reading of the architecture specification indicates that the only GICv3 sysreg belonging to the common group (which contains the SGI registers, PMR, DIR, CTLR and RPR) that is allowed to generate a SError is DIR. Everything else is safe. It is thus possible to substitute the trapping of all the common group with just that of DIR if it supported by the implementation. Yes, that's yet another optional bit of the architecture. So let's just do that, as it leads to some impressive result on the M1: Without this change: bash-5.1# /host/home/maz/hackbench 100 process 1000 Running with 100*40 (== 4000) tasks. Time: 56.596 With this change: bash-5.1# /host/home/maz/hackbench 100 process 1000 Running with 100*40 (== 4000) tasks. Time: 8.649 which is a pretty convincing result. Signed-off-by: Marc Zyngier Reviewed-by: Alexandru Elisei Link: https://lore.kernel.org/r/20211010150910.2911495-4-maz@kernel.org --- arch/arm64/include/asm/sysreg.h | 3 +++ arch/arm64/kvm/vgic/vgic-v3.c | 15 +++++++++++---- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b268082d67ed..9412a645a1c0 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1152,6 +1152,7 @@ #define ICH_HCR_TC (1 << 10) #define ICH_HCR_TALL0 (1 << 11) #define ICH_HCR_TALL1 (1 << 12) +#define ICH_HCR_TDIR (1 << 14) #define ICH_HCR_EOIcount_SHIFT 27 #define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT) @@ -1184,6 +1185,8 @@ #define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT) #define ICH_VTR_A3V_SHIFT 21 #define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT) +#define ICH_VTR_TDS_SHIFT 19 +#define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT) #define ARM64_FEATURE_FIELD_BITS 4 diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index ae59e2580bf5..467c22bbade6 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -15,6 +15,7 @@ static bool group0_trap; static bool group1_trap; static bool common_trap; +static bool dir_trap; static bool gicv4_enable; void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) @@ -296,6 +297,8 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu) vgic_v3->vgic_hcr |= ICH_HCR_TALL1; if (common_trap) vgic_v3->vgic_hcr |= ICH_HCR_TC; + if (dir_trap) + vgic_v3->vgic_hcr |= ICH_HCR_TDIR; } int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) @@ -676,14 +679,18 @@ int vgic_v3_probe(const struct gic_kvm_info *info) group0_trap = true; group1_trap = true; - common_trap = true; + if (ich_vtr_el2 & ICH_VTR_TDS_MASK) + dir_trap = true; + else + common_trap = true; } - if (group0_trap || group1_trap || common_trap) { - kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n", + if (group0_trap || group1_trap || common_trap | dir_trap) { + kvm_info("GICv3 sysreg trapping enabled ([%s%s%s%s], reduced performance)\n", group0_trap ? "G0" : "", group1_trap ? "G1" : "", - common_trap ? "C" : ""); + common_trap ? "C" : "", + dir_trap ? "D" : ""); static_branch_enable(&vgic_v3_cpuif_trap); } -- cgit v1.2.3 From f87ab682722299cddf8cf5f7bc17053d70300ee0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 10 Oct 2021 16:09:09 +0100 Subject: KVM: arm64: vgic-v3: Don't advertise ICC_CTLR_EL1.SEIS Since we are trapping all sysreg accesses when ICH_VTR_EL2.SEIS is set, and that we never deliver an SError when emulating any of the GICv3 sysregs, don't advertise ICC_CTLR_EL1.SEIS. Reviewed-by: Alexandru Elisei Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211010150910.2911495-5-maz@kernel.org --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 39f8f7f9227c..b3b50de496a3 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -987,8 +987,6 @@ static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt) val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT; /* IDbits */ val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT; - /* SEIS */ - val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT; /* A3V */ val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT; /* EOImode */ -- cgit v1.2.3 From 9d449c71bd8f74282e84213c8f0b8328293ab0a7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 10 Oct 2021 16:09:10 +0100 Subject: KVM: arm64: vgic-v3: Align emulated cpuif LPI state machine with the pseudocode Having realised that a virtual LPI does transition through an active state that does not exist on bare metal, align the CPU interface emulation with the behaviour specified in the architecture pseudocode. The LPIs now transition to active on IAR read, and to inactive on EOI write. Special care is taken not to increment the EOIcount for an LPI that isn't present in the LRs. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211010150910.2911495-6-maz@kernel.org --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index b3b50de496a3..20db2f281cf2 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -695,9 +695,7 @@ static void __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt) goto spurious; lr_val &= ~ICH_LR_STATE; - /* No active state for LPIs */ - if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI) - lr_val |= ICH_LR_ACTIVE_BIT; + lr_val |= ICH_LR_ACTIVE_BIT; __gic_v3_set_lr(lr_val, lr); __vgic_v3_set_active_priority(lr_prio, vmcr, grp); vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); @@ -764,20 +762,18 @@ static void __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt) /* Drop priority in any case */ act_prio = __vgic_v3_clear_highest_active_priority(); - /* If EOIing an LPI, no deactivate to be performed */ - if (vid >= VGIC_MIN_LPI) - return; - - /* EOImode == 1, nothing to be done here */ - if (vmcr & ICH_VMCR_EOIM_MASK) - return; - lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val); if (lr == -1) { - __vgic_v3_bump_eoicount(); + /* Do not bump EOIcount for LPIs that aren't in the LRs */ + if (!(vid >= VGIC_MIN_LPI)) + __vgic_v3_bump_eoicount(); return; } + /* EOImode == 1 and not an LPI, nothing to be done here */ + if ((vmcr & ICH_VMCR_EOIM_MASK) && !(vid >= VGIC_MIN_LPI)) + return; + lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; /* If priorities or group do not match, the guest has fscked-up. */ -- cgit v1.2.3 From 88ec7e258b70eed5e532d32115fccd11ea2a6287 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Thu, 7 Oct 2021 23:34:25 +0000 Subject: KVM: arm64: selftests: Add MMIO readl/writel support Define the readl() and writel() functions for the guests to access (4-byte) the MMIO region. The routines, and their dependents, are inspired from the kernel's arch/arm64/include/asm/io.h and arch/arm64/include/asm/barrier.h. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Oliver Upton Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211007233439.1826892-2-rananta@google.com --- .../selftests/kvm/include/aarch64/processor.h | 46 +++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h index c0273aefa63d..96578bd46a85 100644 --- a/tools/testing/selftests/kvm/include/aarch64/processor.h +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h @@ -9,6 +9,7 @@ #include "kvm_util.h" #include +#include #define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ @@ -130,6 +131,49 @@ void vm_install_sync_handler(struct kvm_vm *vm, val; \ }) -#define isb() asm volatile("isb" : : : "memory") +#define isb() asm volatile("isb" : : : "memory") +#define dsb(opt) asm volatile("dsb " #opt : : : "memory") +#define dmb(opt) asm volatile("dmb " #opt : : : "memory") + +#define dma_wmb() dmb(oshst) +#define __iowmb() dma_wmb() + +#define dma_rmb() dmb(oshld) + +#define __iormb(v) \ +({ \ + unsigned long tmp; \ + \ + dma_rmb(); \ + \ + /* \ + * Courtesy of arch/arm64/include/asm/io.h: \ + * Create a dummy control dependency from the IO read to any \ + * later instructions. This ensures that a subsequent call \ + * to udelay() will be ordered due to the ISB in __delay(). \ + */ \ + asm volatile("eor %0, %1, %1\n" \ + "cbnz %0, ." \ + : "=r" (tmp) : "r" ((unsigned long)(v)) \ + : "memory"); \ +}) + +static __always_inline void __raw_writel(u32 val, volatile void *addr) +{ + asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr)); +} + +static __always_inline u32 __raw_readl(const volatile void *addr) +{ + u32 val; + asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr)); + return val; +} + +#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) +#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) + +#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c));}) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; }) #endif /* SELFTEST_KVM_PROCESSOR_H */ -- cgit v1.2.3 From 272a067df3c89f6f2176a350f88661625a2c8b3b Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Thu, 7 Oct 2021 23:34:26 +0000 Subject: tools: arm64: Import sysreg.h Bring-in the kernel's arch/arm64/include/asm/sysreg.h into tools/ for arm64 to make use of all the standard register definitions in consistence with the kernel. Make use of the register read/write definitions from sysreg.h, instead of the existing definitions. A syntax correction is needed for the files that use write_sysreg() to make it compliant with the new (kernel's) syntax. Reviewed-by: Andrew Jones Reviewed-by: Oliver Upton Signed-off-by: Raghavendra Rao Ananta [maz: squashed two commits in order to keep the series bisectable] Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211007233439.1826892-3-rananta@google.com Link: https://lore.kernel.org/r/20211007233439.1826892-4-rananta@google.com --- tools/arch/arm64/include/asm/sysreg.h | 1296 ++++++++++++++++++++ .../selftests/kvm/aarch64/debug-exceptions.c | 28 +- .../selftests/kvm/include/aarch64/processor.h | 13 +- 3 files changed, 1311 insertions(+), 26 deletions(-) create mode 100644 tools/arch/arm64/include/asm/sysreg.h diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h new file mode 100644 index 000000000000..7640fa27be94 --- /dev/null +++ b/tools/arch/arm64/include/asm/sysreg.h @@ -0,0 +1,1296 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Macros for accessing system registers with older binutils. + * + * Copyright (C) 2014 ARM Ltd. + * Author: Catalin Marinas + */ + +#ifndef __ASM_SYSREG_H +#define __ASM_SYSREG_H + +#include +#include + +/* + * ARMv8 ARM reserves the following encoding for system registers: + * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview", + * C5.2, version:ARM DDI 0487A.f) + * [20-19] : Op0 + * [18-16] : Op1 + * [15-12] : CRn + * [11-8] : CRm + * [7-5] : Op2 + */ +#define Op0_shift 19 +#define Op0_mask 0x3 +#define Op1_shift 16 +#define Op1_mask 0x7 +#define CRn_shift 12 +#define CRn_mask 0xf +#define CRm_shift 8 +#define CRm_mask 0xf +#define Op2_shift 5 +#define Op2_mask 0x7 + +#define sys_reg(op0, op1, crn, crm, op2) \ + (((op0) << Op0_shift) | ((op1) << Op1_shift) | \ + ((crn) << CRn_shift) | ((crm) << CRm_shift) | \ + ((op2) << Op2_shift)) + +#define sys_insn sys_reg + +#define sys_reg_Op0(id) (((id) >> Op0_shift) & Op0_mask) +#define sys_reg_Op1(id) (((id) >> Op1_shift) & Op1_mask) +#define sys_reg_CRn(id) (((id) >> CRn_shift) & CRn_mask) +#define sys_reg_CRm(id) (((id) >> CRm_shift) & CRm_mask) +#define sys_reg_Op2(id) (((id) >> Op2_shift) & Op2_mask) + +#ifndef CONFIG_BROKEN_GAS_INST + +#ifdef __ASSEMBLY__ +// The space separator is omitted so that __emit_inst(x) can be parsed as +// either an assembler directive or an assembler macro argument. +#define __emit_inst(x) .inst(x) +#else +#define __emit_inst(x) ".inst " __stringify((x)) "\n\t" +#endif + +#else /* CONFIG_BROKEN_GAS_INST */ + +#ifndef CONFIG_CPU_BIG_ENDIAN +#define __INSTR_BSWAP(x) (x) +#else /* CONFIG_CPU_BIG_ENDIAN */ +#define __INSTR_BSWAP(x) ((((x) << 24) & 0xff000000) | \ + (((x) << 8) & 0x00ff0000) | \ + (((x) >> 8) & 0x0000ff00) | \ + (((x) >> 24) & 0x000000ff)) +#endif /* CONFIG_CPU_BIG_ENDIAN */ + +#ifdef __ASSEMBLY__ +#define __emit_inst(x) .long __INSTR_BSWAP(x) +#else /* __ASSEMBLY__ */ +#define __emit_inst(x) ".long " __stringify(__INSTR_BSWAP(x)) "\n\t" +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_BROKEN_GAS_INST */ + +/* + * Instructions for modifying PSTATE fields. + * As per Arm ARM for v8-A, Section "C.5.1.3 op0 == 0b00, architectural hints, + * barriers and CLREX, and PSTATE access", ARM DDI 0487 C.a, system instructions + * for accessing PSTATE fields have the following encoding: + * Op0 = 0, CRn = 4 + * Op1, Op2 encodes the PSTATE field modified and defines the constraints. + * CRm = Imm4 for the instruction. + * Rt = 0x1f + */ +#define pstate_field(op1, op2) ((op1) << Op1_shift | (op2) << Op2_shift) +#define PSTATE_Imm_shift CRm_shift + +#define PSTATE_PAN pstate_field(0, 4) +#define PSTATE_UAO pstate_field(0, 3) +#define PSTATE_SSBS pstate_field(3, 1) +#define PSTATE_TCO pstate_field(3, 4) + +#define SET_PSTATE_PAN(x) __emit_inst(0xd500401f | PSTATE_PAN | ((!!x) << PSTATE_Imm_shift)) +#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift)) +#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift)) +#define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift)) + +#define set_pstate_pan(x) asm volatile(SET_PSTATE_PAN(x)) +#define set_pstate_uao(x) asm volatile(SET_PSTATE_UAO(x)) +#define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x)) + +#define __SYS_BARRIER_INSN(CRm, op2, Rt) \ + __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f)) + +#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31) + +#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) +#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) +#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2) + +/* + * System registers, organised loosely by encoding but grouped together + * where the architected name contains an index. e.g. ID_MMFR_EL1. + */ +#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2) +#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0) +#define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2) +#define SYS_OSDTRTX_EL1 sys_reg(2, 0, 0, 3, 2) +#define SYS_OSECCR_EL1 sys_reg(2, 0, 0, 6, 2) +#define SYS_DBGBVRn_EL1(n) sys_reg(2, 0, 0, n, 4) +#define SYS_DBGBCRn_EL1(n) sys_reg(2, 0, 0, n, 5) +#define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6) +#define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7) +#define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0) +#define SYS_OSLAR_EL1 sys_reg(2, 0, 1, 0, 4) +#define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4) +#define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4) +#define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4) +#define SYS_DBGCLAIMSET_EL1 sys_reg(2, 0, 7, 8, 6) +#define SYS_DBGCLAIMCLR_EL1 sys_reg(2, 0, 7, 9, 6) +#define SYS_DBGAUTHSTATUS_EL1 sys_reg(2, 0, 7, 14, 6) +#define SYS_MDCCSR_EL0 sys_reg(2, 3, 0, 1, 0) +#define SYS_DBGDTR_EL0 sys_reg(2, 3, 0, 4, 0) +#define SYS_DBGDTRRX_EL0 sys_reg(2, 3, 0, 5, 0) +#define SYS_DBGDTRTX_EL0 sys_reg(2, 3, 0, 5, 0) +#define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0) + +#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0) +#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5) +#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6) + +#define SYS_ID_PFR0_EL1 sys_reg(3, 0, 0, 1, 0) +#define SYS_ID_PFR1_EL1 sys_reg(3, 0, 0, 1, 1) +#define SYS_ID_PFR2_EL1 sys_reg(3, 0, 0, 3, 4) +#define SYS_ID_DFR0_EL1 sys_reg(3, 0, 0, 1, 2) +#define SYS_ID_DFR1_EL1 sys_reg(3, 0, 0, 3, 5) +#define SYS_ID_AFR0_EL1 sys_reg(3, 0, 0, 1, 3) +#define SYS_ID_MMFR0_EL1 sys_reg(3, 0, 0, 1, 4) +#define SYS_ID_MMFR1_EL1 sys_reg(3, 0, 0, 1, 5) +#define SYS_ID_MMFR2_EL1 sys_reg(3, 0, 0, 1, 6) +#define SYS_ID_MMFR3_EL1 sys_reg(3, 0, 0, 1, 7) +#define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6) +#define SYS_ID_MMFR5_EL1 sys_reg(3, 0, 0, 3, 6) + +#define SYS_ID_ISAR0_EL1 sys_reg(3, 0, 0, 2, 0) +#define SYS_ID_ISAR1_EL1 sys_reg(3, 0, 0, 2, 1) +#define SYS_ID_ISAR2_EL1 sys_reg(3, 0, 0, 2, 2) +#define SYS_ID_ISAR3_EL1 sys_reg(3, 0, 0, 2, 3) +#define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4) +#define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5) +#define SYS_ID_ISAR6_EL1 sys_reg(3, 0, 0, 2, 7) + +#define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0) +#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) +#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2) + +#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0) +#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1) +#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4) + +#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0) +#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1) + +#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4) +#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5) + +#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0) +#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1) + +#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0) +#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1) +#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2) + +#define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0) +#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) +#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2) +#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) +#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) + +#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0) +#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1) + +#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0) +#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1) +#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) + +#define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0) +#define SYS_APIAKEYHI_EL1 sys_reg(3, 0, 2, 1, 1) +#define SYS_APIBKEYLO_EL1 sys_reg(3, 0, 2, 1, 2) +#define SYS_APIBKEYHI_EL1 sys_reg(3, 0, 2, 1, 3) + +#define SYS_APDAKEYLO_EL1 sys_reg(3, 0, 2, 2, 0) +#define SYS_APDAKEYHI_EL1 sys_reg(3, 0, 2, 2, 1) +#define SYS_APDBKEYLO_EL1 sys_reg(3, 0, 2, 2, 2) +#define SYS_APDBKEYHI_EL1 sys_reg(3, 0, 2, 2, 3) + +#define SYS_APGAKEYLO_EL1 sys_reg(3, 0, 2, 3, 0) +#define SYS_APGAKEYHI_EL1 sys_reg(3, 0, 2, 3, 1) + +#define SYS_SPSR_EL1 sys_reg(3, 0, 4, 0, 0) +#define SYS_ELR_EL1 sys_reg(3, 0, 4, 0, 1) + +#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) + +#define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0) +#define SYS_AFSR1_EL1 sys_reg(3, 0, 5, 1, 1) +#define SYS_ESR_EL1 sys_reg(3, 0, 5, 2, 0) + +#define SYS_ERRIDR_EL1 sys_reg(3, 0, 5, 3, 0) +#define SYS_ERRSELR_EL1 sys_reg(3, 0, 5, 3, 1) +#define SYS_ERXFR_EL1 sys_reg(3, 0, 5, 4, 0) +#define SYS_ERXCTLR_EL1 sys_reg(3, 0, 5, 4, 1) +#define SYS_ERXSTATUS_EL1 sys_reg(3, 0, 5, 4, 2) +#define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3) +#define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0) +#define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1) +#define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0) +#define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1) + +#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) +#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) + +#define SYS_PAR_EL1_F BIT(0) +#define SYS_PAR_EL1_FST GENMASK(6, 1) + +/*** Statistical Profiling Extension ***/ +/* ID registers */ +#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) +#define SYS_PMSIDR_EL1_FE_SHIFT 0 +#define SYS_PMSIDR_EL1_FT_SHIFT 1 +#define SYS_PMSIDR_EL1_FL_SHIFT 2 +#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3 +#define SYS_PMSIDR_EL1_LDS_SHIFT 4 +#define SYS_PMSIDR_EL1_ERND_SHIFT 5 +#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8 +#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL +#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12 +#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL +#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16 +#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL + +#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7) +#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0 +#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU +#define SYS_PMBIDR_EL1_P_SHIFT 4 +#define SYS_PMBIDR_EL1_F_SHIFT 5 + +/* Sampling controls */ +#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0) +#define SYS_PMSCR_EL1_E0SPE_SHIFT 0 +#define SYS_PMSCR_EL1_E1SPE_SHIFT 1 +#define SYS_PMSCR_EL1_CX_SHIFT 3 +#define SYS_PMSCR_EL1_PA_SHIFT 4 +#define SYS_PMSCR_EL1_TS_SHIFT 5 +#define SYS_PMSCR_EL1_PCT_SHIFT 6 + +#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0) +#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0 +#define SYS_PMSCR_EL2_E2SPE_SHIFT 1 +#define SYS_PMSCR_EL2_CX_SHIFT 3 +#define SYS_PMSCR_EL2_PA_SHIFT 4 +#define SYS_PMSCR_EL2_TS_SHIFT 5 +#define SYS_PMSCR_EL2_PCT_SHIFT 6 + +#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2) + +#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3) +#define SYS_PMSIRR_EL1_RND_SHIFT 0 +#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8 +#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL + +/* Filtering controls */ +#define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1) + +#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4) +#define SYS_PMSFCR_EL1_FE_SHIFT 0 +#define SYS_PMSFCR_EL1_FT_SHIFT 1 +#define SYS_PMSFCR_EL1_FL_SHIFT 2 +#define SYS_PMSFCR_EL1_B_SHIFT 16 +#define SYS_PMSFCR_EL1_LD_SHIFT 17 +#define SYS_PMSFCR_EL1_ST_SHIFT 18 + +#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) +#define SYS_PMSEVFR_EL1_RES0_8_2 \ + (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\ + BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0)) +#define SYS_PMSEVFR_EL1_RES0_8_3 \ + (SYS_PMSEVFR_EL1_RES0_8_2 & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) + +#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) +#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0 + +/* Buffer controls */ +#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0) +#define SYS_PMBLIMITR_EL1_E_SHIFT 0 +#define SYS_PMBLIMITR_EL1_FM_SHIFT 1 +#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL +#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT) + +#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1) + +/* Buffer error reporting */ +#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3) +#define SYS_PMBSR_EL1_COLL_SHIFT 16 +#define SYS_PMBSR_EL1_S_SHIFT 17 +#define SYS_PMBSR_EL1_EA_SHIFT 18 +#define SYS_PMBSR_EL1_DL_SHIFT 19 +#define SYS_PMBSR_EL1_EC_SHIFT 26 +#define SYS_PMBSR_EL1_EC_MASK 0x3fUL + +#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT) +#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT) +#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT) + +#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0 +#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL + +#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0 +#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL + +#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT) + +/*** End of Statistical Profiling Extension ***/ + +/* + * TRBE Registers + */ +#define SYS_TRBLIMITR_EL1 sys_reg(3, 0, 9, 11, 0) +#define SYS_TRBPTR_EL1 sys_reg(3, 0, 9, 11, 1) +#define SYS_TRBBASER_EL1 sys_reg(3, 0, 9, 11, 2) +#define SYS_TRBSR_EL1 sys_reg(3, 0, 9, 11, 3) +#define SYS_TRBMAR_EL1 sys_reg(3, 0, 9, 11, 4) +#define SYS_TRBTRG_EL1 sys_reg(3, 0, 9, 11, 6) +#define SYS_TRBIDR_EL1 sys_reg(3, 0, 9, 11, 7) + +#define TRBLIMITR_LIMIT_MASK GENMASK_ULL(51, 0) +#define TRBLIMITR_LIMIT_SHIFT 12 +#define TRBLIMITR_NVM BIT(5) +#define TRBLIMITR_TRIG_MODE_MASK GENMASK(1, 0) +#define TRBLIMITR_TRIG_MODE_SHIFT 3 +#define TRBLIMITR_FILL_MODE_MASK GENMASK(1, 0) +#define TRBLIMITR_FILL_MODE_SHIFT 1 +#define TRBLIMITR_ENABLE BIT(0) +#define TRBPTR_PTR_MASK GENMASK_ULL(63, 0) +#define TRBPTR_PTR_SHIFT 0 +#define TRBBASER_BASE_MASK GENMASK_ULL(51, 0) +#define TRBBASER_BASE_SHIFT 12 +#define TRBSR_EC_MASK GENMASK(5, 0) +#define TRBSR_EC_SHIFT 26 +#define TRBSR_IRQ BIT(22) +#define TRBSR_TRG BIT(21) +#define TRBSR_WRAP BIT(20) +#define TRBSR_ABORT BIT(18) +#define TRBSR_STOP BIT(17) +#define TRBSR_MSS_MASK GENMASK(15, 0) +#define TRBSR_MSS_SHIFT 0 +#define TRBSR_BSC_MASK GENMASK(5, 0) +#define TRBSR_BSC_SHIFT 0 +#define TRBSR_FSC_MASK GENMASK(5, 0) +#define TRBSR_FSC_SHIFT 0 +#define TRBMAR_SHARE_MASK GENMASK(1, 0) +#define TRBMAR_SHARE_SHIFT 8 +#define TRBMAR_OUTER_MASK GENMASK(3, 0) +#define TRBMAR_OUTER_SHIFT 4 +#define TRBMAR_INNER_MASK GENMASK(3, 0) +#define TRBMAR_INNER_SHIFT 0 +#define TRBTRG_TRG_MASK GENMASK(31, 0) +#define TRBTRG_TRG_SHIFT 0 +#define TRBIDR_FLAG BIT(5) +#define TRBIDR_PROG BIT(4) +#define TRBIDR_ALIGN_MASK GENMASK(3, 0) +#define TRBIDR_ALIGN_SHIFT 0 + +#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1) +#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2) + +#define SYS_PMMIR_EL1 sys_reg(3, 0, 9, 14, 6) + +#define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0) +#define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0) + +#define SYS_LORSA_EL1 sys_reg(3, 0, 10, 4, 0) +#define SYS_LOREA_EL1 sys_reg(3, 0, 10, 4, 1) +#define SYS_LORN_EL1 sys_reg(3, 0, 10, 4, 2) +#define SYS_LORC_EL1 sys_reg(3, 0, 10, 4, 3) +#define SYS_LORID_EL1 sys_reg(3, 0, 10, 4, 7) + +#define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0) +#define SYS_DISR_EL1 sys_reg(3, 0, 12, 1, 1) + +#define SYS_ICC_IAR0_EL1 sys_reg(3, 0, 12, 8, 0) +#define SYS_ICC_EOIR0_EL1 sys_reg(3, 0, 12, 8, 1) +#define SYS_ICC_HPPIR0_EL1 sys_reg(3, 0, 12, 8, 2) +#define SYS_ICC_BPR0_EL1 sys_reg(3, 0, 12, 8, 3) +#define SYS_ICC_AP0Rn_EL1(n) sys_reg(3, 0, 12, 8, 4 | n) +#define SYS_ICC_AP0R0_EL1 SYS_ICC_AP0Rn_EL1(0) +#define SYS_ICC_AP0R1_EL1 SYS_ICC_AP0Rn_EL1(1) +#define SYS_ICC_AP0R2_EL1 SYS_ICC_AP0Rn_EL1(2) +#define SYS_ICC_AP0R3_EL1 SYS_ICC_AP0Rn_EL1(3) +#define SYS_ICC_AP1Rn_EL1(n) sys_reg(3, 0, 12, 9, n) +#define SYS_ICC_AP1R0_EL1 SYS_ICC_AP1Rn_EL1(0) +#define SYS_ICC_AP1R1_EL1 SYS_ICC_AP1Rn_EL1(1) +#define SYS_ICC_AP1R2_EL1 SYS_ICC_AP1Rn_EL1(2) +#define SYS_ICC_AP1R3_EL1 SYS_ICC_AP1Rn_EL1(3) +#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1) +#define SYS_ICC_RPR_EL1 sys_reg(3, 0, 12, 11, 3) +#define SYS_ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) +#define SYS_ICC_ASGI1R_EL1 sys_reg(3, 0, 12, 11, 6) +#define SYS_ICC_SGI0R_EL1 sys_reg(3, 0, 12, 11, 7) +#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) +#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) +#define SYS_ICC_HPPIR1_EL1 sys_reg(3, 0, 12, 12, 2) +#define SYS_ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3) +#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4) +#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) +#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6) +#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) + +#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1) +#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4) + +#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7) + +#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) + +#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) +#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1) +#define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4) +#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) + +#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0) + +#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1) +#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7) + +#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) +#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) + +#define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0) +#define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1) +#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2) +#define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3) +#define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4) +#define SYS_PMSELR_EL0 sys_reg(3, 3, 9, 12, 5) +#define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6) +#define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7) +#define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0) +#define SYS_PMXEVTYPER_EL0 sys_reg(3, 3, 9, 13, 1) +#define SYS_PMXEVCNTR_EL0 sys_reg(3, 3, 9, 13, 2) +#define SYS_PMUSERENR_EL0 sys_reg(3, 3, 9, 14, 0) +#define SYS_PMOVSSET_EL0 sys_reg(3, 3, 9, 14, 3) + +#define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2) +#define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3) + +#define SYS_SCXTNUM_EL0 sys_reg(3, 3, 13, 0, 7) + +/* Definitions for system register interface to AMU for ARMv8.4 onwards */ +#define SYS_AM_EL0(crm, op2) sys_reg(3, 3, 13, (crm), (op2)) +#define SYS_AMCR_EL0 SYS_AM_EL0(2, 0) +#define SYS_AMCFGR_EL0 SYS_AM_EL0(2, 1) +#define SYS_AMCGCR_EL0 SYS_AM_EL0(2, 2) +#define SYS_AMUSERENR_EL0 SYS_AM_EL0(2, 3) +#define SYS_AMCNTENCLR0_EL0 SYS_AM_EL0(2, 4) +#define SYS_AMCNTENSET0_EL0 SYS_AM_EL0(2, 5) +#define SYS_AMCNTENCLR1_EL0 SYS_AM_EL0(3, 0) +#define SYS_AMCNTENSET1_EL0 SYS_AM_EL0(3, 1) + +/* + * Group 0 of activity monitors (architected): + * op0 op1 CRn CRm op2 + * Counter: 11 011 1101 010:n<3> n<2:0> + * Type: 11 011 1101 011:n<3> n<2:0> + * n: 0-15 + * + * Group 1 of activity monitors (auxiliary): + * op0 op1 CRn CRm op2 + * Counter: 11 011 1101 110:n<3> n<2:0> + * Type: 11 011 1101 111:n<3> n<2:0> + * n: 0-15 + */ + +#define SYS_AMEVCNTR0_EL0(n) SYS_AM_EL0(4 + ((n) >> 3), (n) & 7) +#define SYS_AMEVTYPER0_EL0(n) SYS_AM_EL0(6 + ((n) >> 3), (n) & 7) +#define SYS_AMEVCNTR1_EL0(n) SYS_AM_EL0(12 + ((n) >> 3), (n) & 7) +#define SYS_AMEVTYPER1_EL0(n) SYS_AM_EL0(14 + ((n) >> 3), (n) & 7) + +/* AMU v1: Fixed (architecturally defined) activity monitors */ +#define SYS_AMEVCNTR0_CORE_EL0 SYS_AMEVCNTR0_EL0(0) +#define SYS_AMEVCNTR0_CONST_EL0 SYS_AMEVCNTR0_EL0(1) +#define SYS_AMEVCNTR0_INST_RET_EL0 SYS_AMEVCNTR0_EL0(2) +#define SYS_AMEVCNTR0_MEM_STALL SYS_AMEVCNTR0_EL0(3) + +#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0) + +#define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0) +#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1) +#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2) + +#define SYS_CNTV_CTL_EL0 sys_reg(3, 3, 14, 3, 1) +#define SYS_CNTV_CVAL_EL0 sys_reg(3, 3, 14, 3, 2) + +#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0) +#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1) +#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0) + +#define __PMEV_op2(n) ((n) & 0x7) +#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3)) +#define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n)) +#define __TYPER_CRm(n) (0xc | (((n) >> 3) & 0x3)) +#define SYS_PMEVTYPERn_EL0(n) sys_reg(3, 3, 14, __TYPER_CRm(n), __PMEV_op2(n)) + +#define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7) + +#define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0) +#define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4) +#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5) +#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6) +#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) +#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) +#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) +#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) +#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) +#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) +#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) +#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) +#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1) +#define SYS_ESR_EL2 sys_reg(3, 4, 5, 2, 0) +#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3) +#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0) +#define SYS_TFSR_EL2 sys_reg(3, 4, 5, 6, 0) +#define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0) + +#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1) +#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) +#define SYS_ICH_AP0R0_EL2 __SYS__AP0Rx_EL2(0) +#define SYS_ICH_AP0R1_EL2 __SYS__AP0Rx_EL2(1) +#define SYS_ICH_AP0R2_EL2 __SYS__AP0Rx_EL2(2) +#define SYS_ICH_AP0R3_EL2 __SYS__AP0Rx_EL2(3) + +#define __SYS__AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x) +#define SYS_ICH_AP1R0_EL2 __SYS__AP1Rx_EL2(0) +#define SYS_ICH_AP1R1_EL2 __SYS__AP1Rx_EL2(1) +#define SYS_ICH_AP1R2_EL2 __SYS__AP1Rx_EL2(2) +#define SYS_ICH_AP1R3_EL2 __SYS__AP1Rx_EL2(3) + +#define SYS_ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4) +#define SYS_ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5) +#define SYS_ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0) +#define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1) +#define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2) +#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3) +#define SYS_ICH_ELRSR_EL2 sys_reg(3, 4, 12, 11, 5) +#define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7) + +#define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x) +#define SYS_ICH_LR0_EL2 __SYS__LR0_EL2(0) +#define SYS_ICH_LR1_EL2 __SYS__LR0_EL2(1) +#define SYS_ICH_LR2_EL2 __SYS__LR0_EL2(2) +#define SYS_ICH_LR3_EL2 __SYS__LR0_EL2(3) +#define SYS_ICH_LR4_EL2 __SYS__LR0_EL2(4) +#define SYS_ICH_LR5_EL2 __SYS__LR0_EL2(5) +#define SYS_ICH_LR6_EL2 __SYS__LR0_EL2(6) +#define SYS_ICH_LR7_EL2 __SYS__LR0_EL2(7) + +#define __SYS__LR8_EL2(x) sys_reg(3, 4, 12, 13, x) +#define SYS_ICH_LR8_EL2 __SYS__LR8_EL2(0) +#define SYS_ICH_LR9_EL2 __SYS__LR8_EL2(1) +#define SYS_ICH_LR10_EL2 __SYS__LR8_EL2(2) +#define SYS_ICH_LR11_EL2 __SYS__LR8_EL2(3) +#define SYS_ICH_LR12_EL2 __SYS__LR8_EL2(4) +#define SYS_ICH_LR13_EL2 __SYS__LR8_EL2(5) +#define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6) +#define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7) + +/* VHE encodings for architectural EL0/1 system registers */ +#define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0) +#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2) +#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) +#define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0) +#define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1) +#define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2) +#define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0) +#define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1) +#define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0) +#define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1) +#define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0) +#define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0) +#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0) +#define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) +#define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) +#define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) +#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1) +#define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0) +#define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0) +#define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1) +#define SYS_CNTP_CVAL_EL02 sys_reg(3, 5, 14, 2, 2) +#define SYS_CNTV_TVAL_EL02 sys_reg(3, 5, 14, 3, 0) +#define SYS_CNTV_CTL_EL02 sys_reg(3, 5, 14, 3, 1) +#define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2) + +/* Common SCTLR_ELx flags. */ +#define SCTLR_ELx_DSSBS (BIT(44)) +#define SCTLR_ELx_ATA (BIT(43)) + +#define SCTLR_ELx_TCF_SHIFT 40 +#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT) +#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT) +#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT) +#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT) + +#define SCTLR_ELx_ENIA_SHIFT 31 + +#define SCTLR_ELx_ITFSB (BIT(37)) +#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT)) +#define SCTLR_ELx_ENIB (BIT(30)) +#define SCTLR_ELx_ENDA (BIT(27)) +#define SCTLR_ELx_EE (BIT(25)) +#define SCTLR_ELx_IESB (BIT(21)) +#define SCTLR_ELx_WXN (BIT(19)) +#define SCTLR_ELx_ENDB (BIT(13)) +#define SCTLR_ELx_I (BIT(12)) +#define SCTLR_ELx_SA (BIT(3)) +#define SCTLR_ELx_C (BIT(2)) +#define SCTLR_ELx_A (BIT(1)) +#define SCTLR_ELx_M (BIT(0)) + +/* SCTLR_EL2 specific flags. */ +#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \ + (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \ + (BIT(29))) + +#ifdef CONFIG_CPU_BIG_ENDIAN +#define ENDIAN_SET_EL2 SCTLR_ELx_EE +#else +#define ENDIAN_SET_EL2 0 +#endif + +#define INIT_SCTLR_EL2_MMU_ON \ + (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_ELx_I | \ + SCTLR_ELx_IESB | SCTLR_ELx_WXN | ENDIAN_SET_EL2 | \ + SCTLR_ELx_ITFSB | SCTLR_EL2_RES1) + +#define INIT_SCTLR_EL2_MMU_OFF \ + (SCTLR_EL2_RES1 | ENDIAN_SET_EL2) + +/* SCTLR_EL1 specific flags. */ +#define SCTLR_EL1_EPAN (BIT(57)) +#define SCTLR_EL1_ATA0 (BIT(42)) + +#define SCTLR_EL1_TCF0_SHIFT 38 +#define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT) +#define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT) +#define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT) +#define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT) + +#define SCTLR_EL1_BT1 (BIT(36)) +#define SCTLR_EL1_BT0 (BIT(35)) +#define SCTLR_EL1_UCI (BIT(26)) +#define SCTLR_EL1_E0E (BIT(24)) +#define SCTLR_EL1_SPAN (BIT(23)) +#define SCTLR_EL1_NTWE (BIT(18)) +#define SCTLR_EL1_NTWI (BIT(16)) +#define SCTLR_EL1_UCT (BIT(15)) +#define SCTLR_EL1_DZE (BIT(14)) +#define SCTLR_EL1_UMA (BIT(9)) +#define SCTLR_EL1_SED (BIT(8)) +#define SCTLR_EL1_ITD (BIT(7)) +#define SCTLR_EL1_CP15BEN (BIT(5)) +#define SCTLR_EL1_SA0 (BIT(4)) + +#define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \ + (BIT(29))) + +#ifdef CONFIG_CPU_BIG_ENDIAN +#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) +#else +#define ENDIAN_SET_EL1 0 +#endif + +#define INIT_SCTLR_EL1_MMU_OFF \ + (ENDIAN_SET_EL1 | SCTLR_EL1_RES1) + +#define INIT_SCTLR_EL1_MMU_ON \ + (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \ + SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \ + SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \ + SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \ + SCTLR_EL1_EPAN | SCTLR_EL1_RES1) + +/* MAIR_ELx memory attributes (used by Linux) */ +#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) +#define MAIR_ATTR_DEVICE_nGnRE UL(0x04) +#define MAIR_ATTR_NORMAL_NC UL(0x44) +#define MAIR_ATTR_NORMAL_TAGGED UL(0xf0) +#define MAIR_ATTR_NORMAL UL(0xff) +#define MAIR_ATTR_MASK UL(0xff) + +/* Position the attr at the correct index */ +#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) + +/* id_aa64isar0 */ +#define ID_AA64ISAR0_RNDR_SHIFT 60 +#define ID_AA64ISAR0_TLB_SHIFT 56 +#define ID_AA64ISAR0_TS_SHIFT 52 +#define ID_AA64ISAR0_FHM_SHIFT 48 +#define ID_AA64ISAR0_DP_SHIFT 44 +#define ID_AA64ISAR0_SM4_SHIFT 40 +#define ID_AA64ISAR0_SM3_SHIFT 36 +#define ID_AA64ISAR0_SHA3_SHIFT 32 +#define ID_AA64ISAR0_RDM_SHIFT 28 +#define ID_AA64ISAR0_ATOMICS_SHIFT 20 +#define ID_AA64ISAR0_CRC32_SHIFT 16 +#define ID_AA64ISAR0_SHA2_SHIFT 12 +#define ID_AA64ISAR0_SHA1_SHIFT 8 +#define ID_AA64ISAR0_AES_SHIFT 4 + +#define ID_AA64ISAR0_TLB_RANGE_NI 0x0 +#define ID_AA64ISAR0_TLB_RANGE 0x2 + +/* id_aa64isar1 */ +#define ID_AA64ISAR1_I8MM_SHIFT 52 +#define ID_AA64ISAR1_DGH_SHIFT 48 +#define ID_AA64ISAR1_BF16_SHIFT 44 +#define ID_AA64ISAR1_SPECRES_SHIFT 40 +#define ID_AA64ISAR1_SB_SHIFT 36 +#define ID_AA64ISAR1_FRINTTS_SHIFT 32 +#define ID_AA64ISAR1_GPI_SHIFT 28 +#define ID_AA64ISAR1_GPA_SHIFT 24 +#define ID_AA64ISAR1_LRCPC_SHIFT 20 +#define ID_AA64ISAR1_FCMA_SHIFT 16 +#define ID_AA64ISAR1_JSCVT_SHIFT 12 +#define ID_AA64ISAR1_API_SHIFT 8 +#define ID_AA64ISAR1_APA_SHIFT 4 +#define ID_AA64ISAR1_DPB_SHIFT 0 + +#define ID_AA64ISAR1_APA_NI 0x0 +#define ID_AA64ISAR1_APA_ARCHITECTED 0x1 +#define ID_AA64ISAR1_APA_ARCH_EPAC 0x2 +#define ID_AA64ISAR1_APA_ARCH_EPAC2 0x3 +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC 0x4 +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB 0x5 +#define ID_AA64ISAR1_API_NI 0x0 +#define ID_AA64ISAR1_API_IMP_DEF 0x1 +#define ID_AA64ISAR1_API_IMP_DEF_EPAC 0x2 +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2 0x3 +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC 0x4 +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB 0x5 +#define ID_AA64ISAR1_GPA_NI 0x0 +#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1 +#define ID_AA64ISAR1_GPI_NI 0x0 +#define ID_AA64ISAR1_GPI_IMP_DEF 0x1 + +/* id_aa64pfr0 */ +#define ID_AA64PFR0_CSV3_SHIFT 60 +#define ID_AA64PFR0_CSV2_SHIFT 56 +#define ID_AA64PFR0_DIT_SHIFT 48 +#define ID_AA64PFR0_AMU_SHIFT 44 +#define ID_AA64PFR0_MPAM_SHIFT 40 +#define ID_AA64PFR0_SEL2_SHIFT 36 +#define ID_AA64PFR0_SVE_SHIFT 32 +#define ID_AA64PFR0_RAS_SHIFT 28 +#define ID_AA64PFR0_GIC_SHIFT 24 +#define ID_AA64PFR0_ASIMD_SHIFT 20 +#define ID_AA64PFR0_FP_SHIFT 16 +#define ID_AA64PFR0_EL3_SHIFT 12 +#define ID_AA64PFR0_EL2_SHIFT 8 +#define ID_AA64PFR0_EL1_SHIFT 4 +#define ID_AA64PFR0_EL0_SHIFT 0 + +#define ID_AA64PFR0_AMU 0x1 +#define ID_AA64PFR0_SVE 0x1 +#define ID_AA64PFR0_RAS_V1 0x1 +#define ID_AA64PFR0_RAS_V1P1 0x2 +#define ID_AA64PFR0_FP_NI 0xf +#define ID_AA64PFR0_FP_SUPPORTED 0x0 +#define ID_AA64PFR0_ASIMD_NI 0xf +#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0 +#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1 +#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2 + +/* id_aa64pfr1 */ +#define ID_AA64PFR1_MPAMFRAC_SHIFT 16 +#define ID_AA64PFR1_RASFRAC_SHIFT 12 +#define ID_AA64PFR1_MTE_SHIFT 8 +#define ID_AA64PFR1_SSBS_SHIFT 4 +#define ID_AA64PFR1_BT_SHIFT 0 + +#define ID_AA64PFR1_SSBS_PSTATE_NI 0 +#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 +#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 +#define ID_AA64PFR1_BT_BTI 0x1 + +#define ID_AA64PFR1_MTE_NI 0x0 +#define ID_AA64PFR1_MTE_EL0 0x1 +#define ID_AA64PFR1_MTE 0x2 + +/* id_aa64zfr0 */ +#define ID_AA64ZFR0_F64MM_SHIFT 56 +#define ID_AA64ZFR0_F32MM_SHIFT 52 +#define ID_AA64ZFR0_I8MM_SHIFT 44 +#define ID_AA64ZFR0_SM4_SHIFT 40 +#define ID_AA64ZFR0_SHA3_SHIFT 32 +#define ID_AA64ZFR0_BF16_SHIFT 20 +#define ID_AA64ZFR0_BITPERM_SHIFT 16 +#define ID_AA64ZFR0_AES_SHIFT 4 +#define ID_AA64ZFR0_SVEVER_SHIFT 0 + +#define ID_AA64ZFR0_F64MM 0x1 +#define ID_AA64ZFR0_F32MM 0x1 +#define ID_AA64ZFR0_I8MM 0x1 +#define ID_AA64ZFR0_BF16 0x1 +#define ID_AA64ZFR0_SM4 0x1 +#define ID_AA64ZFR0_SHA3 0x1 +#define ID_AA64ZFR0_BITPERM 0x1 +#define ID_AA64ZFR0_AES 0x1 +#define ID_AA64ZFR0_AES_PMULL 0x2 +#define ID_AA64ZFR0_SVEVER_SVE2 0x1 + +/* id_aa64mmfr0 */ +#define ID_AA64MMFR0_ECV_SHIFT 60 +#define ID_AA64MMFR0_FGT_SHIFT 56 +#define ID_AA64MMFR0_EXS_SHIFT 44 +#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40 +#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36 +#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32 +#define ID_AA64MMFR0_TGRAN4_SHIFT 28 +#define ID_AA64MMFR0_TGRAN64_SHIFT 24 +#define ID_AA64MMFR0_TGRAN16_SHIFT 20 +#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16 +#define ID_AA64MMFR0_SNSMEM_SHIFT 12 +#define ID_AA64MMFR0_BIGENDEL_SHIFT 8 +#define ID_AA64MMFR0_ASID_SHIFT 4 +#define ID_AA64MMFR0_PARANGE_SHIFT 0 + +#define ID_AA64MMFR0_ASID_8 0x0 +#define ID_AA64MMFR0_ASID_16 0x2 + +#define ID_AA64MMFR0_TGRAN4_NI 0xf +#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_TGRAN64_NI 0xf +#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_TGRAN16_NI 0x0 +#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1 +#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf + +#define ID_AA64MMFR0_PARANGE_32 0x0 +#define ID_AA64MMFR0_PARANGE_36 0x1 +#define ID_AA64MMFR0_PARANGE_40 0x2 +#define ID_AA64MMFR0_PARANGE_42 0x3 +#define ID_AA64MMFR0_PARANGE_44 0x4 +#define ID_AA64MMFR0_PARANGE_48 0x5 +#define ID_AA64MMFR0_PARANGE_52 0x6 + +#define ARM64_MIN_PARANGE_BITS 32 + +#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0 +#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1 +#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2 +#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7 + +#ifdef CONFIG_ARM64_PA_BITS_52 +#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52 +#else +#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48 +#endif + +/* id_aa64mmfr1 */ +#define ID_AA64MMFR1_ETS_SHIFT 36 +#define ID_AA64MMFR1_TWED_SHIFT 32 +#define ID_AA64MMFR1_XNX_SHIFT 28 +#define ID_AA64MMFR1_SPECSEI_SHIFT 24 +#define ID_AA64MMFR1_PAN_SHIFT 20 +#define ID_AA64MMFR1_LOR_SHIFT 16 +#define ID_AA64MMFR1_HPD_SHIFT 12 +#define ID_AA64MMFR1_VHE_SHIFT 8 +#define ID_AA64MMFR1_VMIDBITS_SHIFT 4 +#define ID_AA64MMFR1_HADBS_SHIFT 0 + +#define ID_AA64MMFR1_VMIDBITS_8 0 +#define ID_AA64MMFR1_VMIDBITS_16 2 + +/* id_aa64mmfr2 */ +#define ID_AA64MMFR2_E0PD_SHIFT 60 +#define ID_AA64MMFR2_EVT_SHIFT 56 +#define ID_AA64MMFR2_BBM_SHIFT 52 +#define ID_AA64MMFR2_TTL_SHIFT 48 +#define ID_AA64MMFR2_FWB_SHIFT 40 +#define ID_AA64MMFR2_IDS_SHIFT 36 +#define ID_AA64MMFR2_AT_SHIFT 32 +#define ID_AA64MMFR2_ST_SHIFT 28 +#define ID_AA64MMFR2_NV_SHIFT 24 +#define ID_AA64MMFR2_CCIDX_SHIFT 20 +#define ID_AA64MMFR2_LVA_SHIFT 16 +#define ID_AA64MMFR2_IESB_SHIFT 12 +#define ID_AA64MMFR2_LSM_SHIFT 8 +#define ID_AA64MMFR2_UAO_SHIFT 4 +#define ID_AA64MMFR2_CNP_SHIFT 0 + +/* id_aa64dfr0 */ +#define ID_AA64DFR0_MTPMU_SHIFT 48 +#define ID_AA64DFR0_TRBE_SHIFT 44 +#define ID_AA64DFR0_TRACE_FILT_SHIFT 40 +#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36 +#define ID_AA64DFR0_PMSVER_SHIFT 32 +#define ID_AA64DFR0_CTX_CMPS_SHIFT 28 +#define ID_AA64DFR0_WRPS_SHIFT 20 +#define ID_AA64DFR0_BRPS_SHIFT 12 +#define ID_AA64DFR0_PMUVER_SHIFT 8 +#define ID_AA64DFR0_TRACEVER_SHIFT 4 +#define ID_AA64DFR0_DEBUGVER_SHIFT 0 + +#define ID_AA64DFR0_PMUVER_8_0 0x1 +#define ID_AA64DFR0_PMUVER_8_1 0x4 +#define ID_AA64DFR0_PMUVER_8_4 0x5 +#define ID_AA64DFR0_PMUVER_8_5 0x6 +#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf + +#define ID_AA64DFR0_PMSVER_8_2 0x1 +#define ID_AA64DFR0_PMSVER_8_3 0x2 + +#define ID_DFR0_PERFMON_SHIFT 24 + +#define ID_DFR0_PERFMON_8_0 0x3 +#define ID_DFR0_PERFMON_8_1 0x4 +#define ID_DFR0_PERFMON_8_4 0x5 +#define ID_DFR0_PERFMON_8_5 0x6 + +#define ID_ISAR4_SWP_FRAC_SHIFT 28 +#define ID_ISAR4_PSR_M_SHIFT 24 +#define ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT 20 +#define ID_ISAR4_BARRIER_SHIFT 16 +#define ID_ISAR4_SMC_SHIFT 12 +#define ID_ISAR4_WRITEBACK_SHIFT 8 +#define ID_ISAR4_WITHSHIFTS_SHIFT 4 +#define ID_ISAR4_UNPRIV_SHIFT 0 + +#define ID_DFR1_MTPMU_SHIFT 0 + +#define ID_ISAR0_DIVIDE_SHIFT 24 +#define ID_ISAR0_DEBUG_SHIFT 20 +#define ID_ISAR0_COPROC_SHIFT 16 +#define ID_ISAR0_CMPBRANCH_SHIFT 12 +#define ID_ISAR0_BITFIELD_SHIFT 8 +#define ID_ISAR0_BITCOUNT_SHIFT 4 +#define ID_ISAR0_SWAP_SHIFT 0 + +#define ID_ISAR5_RDM_SHIFT 24 +#define ID_ISAR5_CRC32_SHIFT 16 +#define ID_ISAR5_SHA2_SHIFT 12 +#define ID_ISAR5_SHA1_SHIFT 8 +#define ID_ISAR5_AES_SHIFT 4 +#define ID_ISAR5_SEVL_SHIFT 0 + +#define ID_ISAR6_I8MM_SHIFT 24 +#define ID_ISAR6_BF16_SHIFT 20 +#define ID_ISAR6_SPECRES_SHIFT 16 +#define ID_ISAR6_SB_SHIFT 12 +#define ID_ISAR6_FHM_SHIFT 8 +#define ID_ISAR6_DP_SHIFT 4 +#define ID_ISAR6_JSCVT_SHIFT 0 + +#define ID_MMFR0_INNERSHR_SHIFT 28 +#define ID_MMFR0_FCSE_SHIFT 24 +#define ID_MMFR0_AUXREG_SHIFT 20 +#define ID_MMFR0_TCM_SHIFT 16 +#define ID_MMFR0_SHARELVL_SHIFT 12 +#define ID_MMFR0_OUTERSHR_SHIFT 8 +#define ID_MMFR0_PMSA_SHIFT 4 +#define ID_MMFR0_VMSA_SHIFT 0 + +#define ID_MMFR4_EVT_SHIFT 28 +#define ID_MMFR4_CCIDX_SHIFT 24 +#define ID_MMFR4_LSM_SHIFT 20 +#define ID_MMFR4_HPDS_SHIFT 16 +#define ID_MMFR4_CNP_SHIFT 12 +#define ID_MMFR4_XNX_SHIFT 8 +#define ID_MMFR4_AC2_SHIFT 4 +#define ID_MMFR4_SPECSEI_SHIFT 0 + +#define ID_MMFR5_ETS_SHIFT 0 + +#define ID_PFR0_DIT_SHIFT 24 +#define ID_PFR0_CSV2_SHIFT 16 +#define ID_PFR0_STATE3_SHIFT 12 +#define ID_PFR0_STATE2_SHIFT 8 +#define ID_PFR0_STATE1_SHIFT 4 +#define ID_PFR0_STATE0_SHIFT 0 + +#define ID_DFR0_PERFMON_SHIFT 24 +#define ID_DFR0_MPROFDBG_SHIFT 20 +#define ID_DFR0_MMAPTRC_SHIFT 16 +#define ID_DFR0_COPTRC_SHIFT 12 +#define ID_DFR0_MMAPDBG_SHIFT 8 +#define ID_DFR0_COPSDBG_SHIFT 4 +#define ID_DFR0_COPDBG_SHIFT 0 + +#define ID_PFR2_SSBS_SHIFT 4 +#define ID_PFR2_CSV3_SHIFT 0 + +#define MVFR0_FPROUND_SHIFT 28 +#define MVFR0_FPSHVEC_SHIFT 24 +#define MVFR0_FPSQRT_SHIFT 20 +#define MVFR0_FPDIVIDE_SHIFT 16 +#define MVFR0_FPTRAP_SHIFT 12 +#define MVFR0_FPDP_SHIFT 8 +#define MVFR0_FPSP_SHIFT 4 +#define MVFR0_SIMD_SHIFT 0 + +#define MVFR1_SIMDFMAC_SHIFT 28 +#define MVFR1_FPHP_SHIFT 24 +#define MVFR1_SIMDHP_SHIFT 20 +#define MVFR1_SIMDSP_SHIFT 16 +#define MVFR1_SIMDINT_SHIFT 12 +#define MVFR1_SIMDLS_SHIFT 8 +#define MVFR1_FPDNAN_SHIFT 4 +#define MVFR1_FPFTZ_SHIFT 0 + +#define ID_PFR1_GIC_SHIFT 28 +#define ID_PFR1_VIRT_FRAC_SHIFT 24 +#define ID_PFR1_SEC_FRAC_SHIFT 20 +#define ID_PFR1_GENTIMER_SHIFT 16 +#define ID_PFR1_VIRTUALIZATION_SHIFT 12 +#define ID_PFR1_MPROGMOD_SHIFT 8 +#define ID_PFR1_SECURITY_SHIFT 4 +#define ID_PFR1_PROGMOD_SHIFT 0 + +#if defined(CONFIG_ARM64_4K_PAGES) +#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX +#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN4_2_SHIFT +#elif defined(CONFIG_ARM64_16K_PAGES) +#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX +#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN16_2_SHIFT +#elif defined(CONFIG_ARM64_64K_PAGES) +#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX +#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN64_2_SHIFT +#endif + +#define MVFR2_FPMISC_SHIFT 4 +#define MVFR2_SIMDMISC_SHIFT 0 + +#define DCZID_DZP_SHIFT 4 +#define DCZID_BS_SHIFT 0 + +/* + * The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which + * are reserved by the SVE architecture for future expansion of the LEN + * field, with compatible semantics. + */ +#define ZCR_ELx_LEN_SHIFT 0 +#define ZCR_ELx_LEN_SIZE 9 +#define ZCR_ELx_LEN_MASK 0x1ff + +#define CPACR_EL1_ZEN_EL1EN (BIT(16)) /* enable EL1 access */ +#define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */ +#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) + +/* TCR EL1 Bit Definitions */ +#define SYS_TCR_EL1_TCMA1 (BIT(58)) +#define SYS_TCR_EL1_TCMA0 (BIT(57)) + +/* GCR_EL1 Definitions */ +#define SYS_GCR_EL1_RRND (BIT(16)) +#define SYS_GCR_EL1_EXCL_MASK 0xffffUL + +/* RGSR_EL1 Definitions */ +#define SYS_RGSR_EL1_TAG_MASK 0xfUL +#define SYS_RGSR_EL1_SEED_SHIFT 8 +#define SYS_RGSR_EL1_SEED_MASK 0xffffUL + +/* GMID_EL1 field definitions */ +#define SYS_GMID_EL1_BS_SHIFT 0 +#define SYS_GMID_EL1_BS_SIZE 4 + +/* TFSR{,E0}_EL1 bit definitions */ +#define SYS_TFSR_EL1_TF0_SHIFT 0 +#define SYS_TFSR_EL1_TF1_SHIFT 1 +#define SYS_TFSR_EL1_TF0 (UL(1) << SYS_TFSR_EL1_TF0_SHIFT) +#define SYS_TFSR_EL1_TF1 (UL(1) << SYS_TFSR_EL1_TF1_SHIFT) + +/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ +#define SYS_MPIDR_SAFE_VAL (BIT(31)) + +#define TRFCR_ELx_TS_SHIFT 5 +#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT) +#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT) +#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT) +#define TRFCR_EL2_CX BIT(3) +#define TRFCR_ELx_ExTRE BIT(1) +#define TRFCR_ELx_E0TRE BIT(0) + + +/* GIC Hypervisor interface registers */ +/* ICH_MISR_EL2 bit definitions */ +#define ICH_MISR_EOI (1 << 0) +#define ICH_MISR_U (1 << 1) + +/* ICH_LR*_EL2 bit definitions */ +#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) + +#define ICH_LR_EOI (1ULL << 41) +#define ICH_LR_GROUP (1ULL << 60) +#define ICH_LR_HW (1ULL << 61) +#define ICH_LR_STATE (3ULL << 62) +#define ICH_LR_PENDING_BIT (1ULL << 62) +#define ICH_LR_ACTIVE_BIT (1ULL << 63) +#define ICH_LR_PHYS_ID_SHIFT 32 +#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) +#define ICH_LR_PRIORITY_SHIFT 48 +#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT) + +/* ICH_HCR_EL2 bit definitions */ +#define ICH_HCR_EN (1 << 0) +#define ICH_HCR_UIE (1 << 1) +#define ICH_HCR_NPIE (1 << 3) +#define ICH_HCR_TC (1 << 10) +#define ICH_HCR_TALL0 (1 << 11) +#define ICH_HCR_TALL1 (1 << 12) +#define ICH_HCR_EOIcount_SHIFT 27 +#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT) + +/* ICH_VMCR_EL2 bit definitions */ +#define ICH_VMCR_ACK_CTL_SHIFT 2 +#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT) +#define ICH_VMCR_FIQ_EN_SHIFT 3 +#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT) +#define ICH_VMCR_CBPR_SHIFT 4 +#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) +#define ICH_VMCR_EOIM_SHIFT 9 +#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT) +#define ICH_VMCR_BPR1_SHIFT 18 +#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) +#define ICH_VMCR_BPR0_SHIFT 21 +#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) +#define ICH_VMCR_PMR_SHIFT 24 +#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) +#define ICH_VMCR_ENG0_SHIFT 0 +#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT) +#define ICH_VMCR_ENG1_SHIFT 1 +#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT) + +/* ICH_VTR_EL2 bit definitions */ +#define ICH_VTR_PRI_BITS_SHIFT 29 +#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT) +#define ICH_VTR_ID_BITS_SHIFT 23 +#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT) +#define ICH_VTR_SEIS_SHIFT 22 +#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT) +#define ICH_VTR_A3V_SHIFT 21 +#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT) + +#define ARM64_FEATURE_FIELD_BITS 4 + +/* Create a mask for the feature bits of the specified feature. */ +#define ARM64_FEATURE_MASK(x) (GENMASK_ULL(x##_SHIFT + ARM64_FEATURE_FIELD_BITS - 1, x##_SHIFT)) + +#ifdef __ASSEMBLY__ + + .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 + .equ .L__reg_num_x\num, \num + .endr + .equ .L__reg_num_xzr, 31 + + .macro mrs_s, rt, sreg + __emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt)) + .endm + + .macro msr_s, sreg, rt + __emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt)) + .endm + +#else + +#include +#include +#include + +#define __DEFINE_MRS_MSR_S_REGNUM \ +" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \ +" .equ .L__reg_num_x\\num, \\num\n" \ +" .endr\n" \ +" .equ .L__reg_num_xzr, 31\n" + +#define DEFINE_MRS_S \ + __DEFINE_MRS_MSR_S_REGNUM \ +" .macro mrs_s, rt, sreg\n" \ + __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) \ +" .endm\n" + +#define DEFINE_MSR_S \ + __DEFINE_MRS_MSR_S_REGNUM \ +" .macro msr_s, sreg, rt\n" \ + __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) \ +" .endm\n" + +#define UNDEFINE_MRS_S \ +" .purgem mrs_s\n" + +#define UNDEFINE_MSR_S \ +" .purgem msr_s\n" + +#define __mrs_s(v, r) \ + DEFINE_MRS_S \ +" mrs_s " v ", " __stringify(r) "\n" \ + UNDEFINE_MRS_S + +#define __msr_s(r, v) \ + DEFINE_MSR_S \ +" msr_s " __stringify(r) ", " v "\n" \ + UNDEFINE_MSR_S + +/* + * Unlike read_cpuid, calls to read_sysreg are never expected to be + * optimized away or replaced with synthetic values. + */ +#define read_sysreg(r) ({ \ + u64 __val; \ + asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \ + __val; \ +}) + +/* + * The "Z" constraint normally means a zero immediate, but when combined with + * the "%x0" template means XZR. + */ +#define write_sysreg(v, r) do { \ + u64 __val = (u64)(v); \ + asm volatile("msr " __stringify(r) ", %x0" \ + : : "rZ" (__val)); \ +} while (0) + +/* + * For registers without architectural names, or simply unsupported by + * GAS. + */ +#define read_sysreg_s(r) ({ \ + u64 __val; \ + asm volatile(__mrs_s("%0", r) : "=r" (__val)); \ + __val; \ +}) + +#define write_sysreg_s(v, r) do { \ + u64 __val = (u64)(v); \ + asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \ +} while (0) + +/* + * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the + * set mask are set. Other bits are left as-is. + */ +#define sysreg_clear_set(sysreg, clear, set) do { \ + u64 __scs_val = read_sysreg(sysreg); \ + u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \ + if (__scs_new != __scs_val) \ + write_sysreg(__scs_new, sysreg); \ +} while (0) + +#define sysreg_clear_set_s(sysreg, clear, set) do { \ + u64 __scs_val = read_sysreg_s(sysreg); \ + u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \ + if (__scs_new != __scs_val) \ + write_sysreg_s(__scs_new, sysreg); \ +} while (0) + +#define read_sysreg_par() ({ \ + u64 par; \ + asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \ + par = read_sysreg(par_el1); \ + asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \ + par; \ +}) + +#endif + +#endif /* __ASM_SYSREG_H */ diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c index e5e6c92b60da..11fd23e21cb4 100644 --- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c +++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c @@ -34,16 +34,16 @@ static void reset_debug_state(void) { asm volatile("msr daifset, #8"); - write_sysreg(osdlr_el1, 0); - write_sysreg(oslar_el1, 0); + write_sysreg(0, osdlr_el1); + write_sysreg(0, oslar_el1); isb(); - write_sysreg(mdscr_el1, 0); + write_sysreg(0, mdscr_el1); /* This test only uses the first bp and wp slot. */ - write_sysreg(dbgbvr0_el1, 0); - write_sysreg(dbgbcr0_el1, 0); - write_sysreg(dbgwcr0_el1, 0); - write_sysreg(dbgwvr0_el1, 0); + write_sysreg(0, dbgbvr0_el1); + write_sysreg(0, dbgbcr0_el1); + write_sysreg(0, dbgwcr0_el1); + write_sysreg(0, dbgwvr0_el1); isb(); } @@ -53,14 +53,14 @@ static void install_wp(uint64_t addr) uint32_t mdscr; wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E; - write_sysreg(dbgwcr0_el1, wcr); - write_sysreg(dbgwvr0_el1, addr); + write_sysreg(wcr, dbgwcr0_el1); + write_sysreg(addr, dbgwvr0_el1); isb(); asm volatile("msr daifclr, #8"); mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE; - write_sysreg(mdscr_el1, mdscr); + write_sysreg(mdscr, mdscr_el1); isb(); } @@ -70,14 +70,14 @@ static void install_hw_bp(uint64_t addr) uint32_t mdscr; bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E; - write_sysreg(dbgbcr0_el1, bcr); - write_sysreg(dbgbvr0_el1, addr); + write_sysreg(bcr, dbgbcr0_el1); + write_sysreg(addr, dbgbvr0_el1); isb(); asm volatile("msr daifclr, #8"); mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE; - write_sysreg(mdscr_el1, mdscr); + write_sysreg(mdscr, mdscr_el1); isb(); } @@ -88,7 +88,7 @@ static void install_ss(void) asm volatile("msr daifclr, #8"); mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_SS; - write_sysreg(mdscr_el1, mdscr); + write_sysreg(mdscr, mdscr_el1); isb(); } diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h index 96578bd46a85..7989e832cafb 100644 --- a/tools/testing/selftests/kvm/include/aarch64/processor.h +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h @@ -10,6 +10,7 @@ #include "kvm_util.h" #include #include +#include #define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ @@ -119,18 +120,6 @@ void vm_install_exception_handler(struct kvm_vm *vm, void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec, handler_fn handler); -#define write_sysreg(reg, val) \ -({ \ - u64 __val = (u64)(val); \ - asm volatile("msr " __stringify(reg) ", %x0" : : "rZ" (__val)); \ -}) - -#define read_sysreg(reg) \ -({ u64 val; \ - asm volatile("mrs %0, "__stringify(reg) : "=r"(val) : : "memory");\ - val; \ -}) - #define isb() asm volatile("isb" : : : "memory") #define dsb(opt) asm volatile("dsb " #opt : : : "memory") #define dmb(opt) asm volatile("dmb " #opt : : : "memory") -- cgit v1.2.3 From b3c79c6130bcfdb0ff3819077deaddce981a0718 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Thu, 7 Oct 2021 23:34:28 +0000 Subject: KVM: arm64: selftests: Introduce ARM64_SYS_KVM_REG With the inclusion of sysreg.h, that brings in system register encodings, it would be redundant to re-define register encodings again in processor.h to use it with ARM64_SYS_REG for the KVM functions such as set_reg() or get_reg(). Hence, add helper macro, ARM64_SYS_KVM_REG, that converts SYS_* definitions in sysreg.h into ARM64_SYS_REG definitions. Also replace all the users of ARM64_SYS_REG, relying on the encodings created in processor.h, with ARM64_SYS_KVM_REG and remove the definitions. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Ricardo Koller Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211007233439.1826892-5-rananta@google.com --- .../testing/selftests/kvm/aarch64/debug-exceptions.c | 2 +- .../testing/selftests/kvm/aarch64/psci_cpu_on_test.c | 2 +- .../selftests/kvm/include/aarch64/processor.h | 20 +++++++++++--------- tools/testing/selftests/kvm/lib/aarch64/processor.c | 16 ++++++++-------- 4 files changed, 21 insertions(+), 19 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c index 11fd23e21cb4..ea189d83abf7 100644 --- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c +++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c @@ -190,7 +190,7 @@ static int debug_version(struct kvm_vm *vm) { uint64_t id_aa64dfr0; - get_reg(vm, VCPU_ID, ARM64_SYS_REG(ID_AA64DFR0_EL1), &id_aa64dfr0); + get_reg(vm, VCPU_ID, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0); return id_aa64dfr0 & 0xf; } diff --git a/tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c b/tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c index 018c269990e1..4c5f6814030f 100644 --- a/tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c +++ b/tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c @@ -91,7 +91,7 @@ int main(void) init.features[0] |= (1 << KVM_ARM_VCPU_POWER_OFF); aarch64_vcpu_add_default(vm, VCPU_ID_TARGET, &init, guest_main); - get_reg(vm, VCPU_ID_TARGET, ARM64_SYS_REG(MPIDR_EL1), &target_mpidr); + get_reg(vm, VCPU_ID_TARGET, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr); vcpu_args_set(vm, VCPU_ID_SOURCE, 1, target_mpidr & MPIDR_HWID_BITMASK); vcpu_run(vm, VCPU_ID_SOURCE); diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h index 7989e832cafb..93797783abad 100644 --- a/tools/testing/selftests/kvm/include/aarch64/processor.h +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h @@ -16,15 +16,17 @@ #define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) -#define CPACR_EL1 3, 0, 1, 0, 2 -#define TCR_EL1 3, 0, 2, 0, 2 -#define MAIR_EL1 3, 0, 10, 2, 0 -#define MPIDR_EL1 3, 0, 0, 0, 5 -#define TTBR0_EL1 3, 0, 2, 0, 0 -#define SCTLR_EL1 3, 0, 1, 0, 0 -#define VBAR_EL1 3, 0, 12, 0, 0 - -#define ID_AA64DFR0_EL1 3, 0, 0, 5, 0 +/* + * KVM_ARM64_SYS_REG(sys_reg_id): Helper macro to convert + * SYS_* register definitions in asm/sysreg.h to use in KVM + * calls such as get_reg() and set_reg(). + */ +#define KVM_ARM64_SYS_REG(sys_reg_id) \ + ARM64_SYS_REG(sys_reg_Op0(sys_reg_id), \ + sys_reg_Op1(sys_reg_id), \ + sys_reg_CRn(sys_reg_id), \ + sys_reg_CRm(sys_reg_id), \ + sys_reg_Op2(sys_reg_id)) /* * Default MAIR diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index 632b74d6b3ca..db64ee206064 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -232,10 +232,10 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *ini * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15 * registers, which the variable argument list macros do. */ - set_reg(vm, vcpuid, ARM64_SYS_REG(CPACR_EL1), 3 << 20); + set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20); - get_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), &sctlr_el1); - get_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), &tcr_el1); + get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1); + get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1); switch (vm->mode) { case VM_MODE_P52V48_4K: @@ -273,10 +273,10 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *ini tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12); tcr_el1 |= (64 - vm->va_bits) /* T0SZ */; - set_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), sctlr_el1); - set_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), tcr_el1); - set_reg(vm, vcpuid, ARM64_SYS_REG(MAIR_EL1), DEFAULT_MAIR_EL1); - set_reg(vm, vcpuid, ARM64_SYS_REG(TTBR0_EL1), vm->pgd); + set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1); + set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1); + set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1); + set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd); } void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) @@ -362,7 +362,7 @@ void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid) { extern char vectors; - set_reg(vm, vcpuid, ARM64_SYS_REG(VBAR_EL1), (uint64_t)&vectors); + set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors); } void route_exception(struct ex_regs *regs, int vector) -- cgit v1.2.3 From 740826ec02a65a5b25335fddfe8bce4ac99c7a11 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Thu, 7 Oct 2021 23:34:29 +0000 Subject: KVM: arm64: selftests: Add support for cpu_relax Implement the guest helper routine, cpu_relax(), to yield the processor to other tasks. The function was derived from arch/arm64/include/asm/vdso/processor.h. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Oliver Upton Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211007233439.1826892-6-rananta@google.com --- tools/testing/selftests/kvm/include/aarch64/processor.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h index 93797783abad..265054c24481 100644 --- a/tools/testing/selftests/kvm/include/aarch64/processor.h +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h @@ -122,6 +122,11 @@ void vm_install_exception_handler(struct kvm_vm *vm, void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec, handler_fn handler); +static inline void cpu_relax(void) +{ + asm volatile("yield" ::: "memory"); +} + #define isb() asm volatile("isb" : : : "memory") #define dsb(opt) asm volatile("dsb " #opt : : : "memory") #define dmb(opt) asm volatile("dmb " #opt : : : "memory") -- cgit v1.2.3 From d977ed39940231839f6856637fe24f41860f7969 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Thu, 7 Oct 2021 23:34:30 +0000 Subject: KVM: arm64: selftests: Add basic support for arch_timers Add a minimalistic library support to access the virtual timers, that can be used for simple timing functionalities, such as introducing delays in the guest. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211007233439.1826892-7-rananta@google.com --- .../selftests/kvm/include/aarch64/arch_timer.h | 142 +++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 tools/testing/selftests/kvm/include/aarch64/arch_timer.h diff --git a/tools/testing/selftests/kvm/include/aarch64/arch_timer.h b/tools/testing/selftests/kvm/include/aarch64/arch_timer.h new file mode 100644 index 000000000000..cb7c03de3a21 --- /dev/null +++ b/tools/testing/selftests/kvm/include/aarch64/arch_timer.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ARM Generic Timer specific interface + */ + +#ifndef SELFTEST_KVM_ARCH_TIMER_H +#define SELFTEST_KVM_ARCH_TIMER_H + +#include "processor.h" + +enum arch_timer { + VIRTUAL, + PHYSICAL, +}; + +#define CTL_ENABLE (1 << 0) +#define CTL_IMASK (1 << 1) +#define CTL_ISTATUS (1 << 2) + +#define msec_to_cycles(msec) \ + (timer_get_cntfrq() * (uint64_t)(msec) / 1000) + +#define usec_to_cycles(usec) \ + (timer_get_cntfrq() * (uint64_t)(usec) / 1000000) + +#define cycles_to_usec(cycles) \ + ((uint64_t)(cycles) * 1000000 / timer_get_cntfrq()) + +static inline uint32_t timer_get_cntfrq(void) +{ + return read_sysreg(cntfrq_el0); +} + +static inline uint64_t timer_get_cntct(enum arch_timer timer) +{ + isb(); + + switch (timer) { + case VIRTUAL: + return read_sysreg(cntvct_el0); + case PHYSICAL: + return read_sysreg(cntpct_el0); + default: + GUEST_ASSERT_1(0, timer); + } + + /* We should not reach here */ + return 0; +} + +static inline void timer_set_cval(enum arch_timer timer, uint64_t cval) +{ + switch (timer) { + case VIRTUAL: + write_sysreg(cval, cntv_cval_el0); + break; + case PHYSICAL: + write_sysreg(cval, cntp_cval_el0); + break; + default: + GUEST_ASSERT_1(0, timer); + } + + isb(); +} + +static inline uint64_t timer_get_cval(enum arch_timer timer) +{ + switch (timer) { + case VIRTUAL: + return read_sysreg(cntv_cval_el0); + case PHYSICAL: + return read_sysreg(cntp_cval_el0); + default: + GUEST_ASSERT_1(0, timer); + } + + /* We should not reach here */ + return 0; +} + +static inline void timer_set_tval(enum arch_timer timer, uint32_t tval) +{ + switch (timer) { + case VIRTUAL: + write_sysreg(tval, cntv_tval_el0); + break; + case PHYSICAL: + write_sysreg(tval, cntp_tval_el0); + break; + default: + GUEST_ASSERT_1(0, timer); + } + + isb(); +} + +static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl) +{ + switch (timer) { + case VIRTUAL: + write_sysreg(ctl, cntv_ctl_el0); + break; + case PHYSICAL: + write_sysreg(ctl, cntp_ctl_el0); + break; + default: + GUEST_ASSERT_1(0, timer); + } + + isb(); +} + +static inline uint32_t timer_get_ctl(enum arch_timer timer) +{ + switch (timer) { + case VIRTUAL: + return read_sysreg(cntv_ctl_el0); + case PHYSICAL: + return read_sysreg(cntp_ctl_el0); + default: + GUEST_ASSERT_1(0, timer); + } + + /* We should not reach here */ + return 0; +} + +static inline void timer_set_next_cval_ms(enum arch_timer timer, uint32_t msec) +{ + uint64_t now_ct = timer_get_cntct(timer); + uint64_t next_ct = now_ct + msec_to_cycles(msec); + + timer_set_cval(timer, next_ct); +} + +static inline void timer_set_next_tval_ms(enum arch_timer timer, uint32_t msec) +{ + timer_set_tval(timer, msec_to_cycles(msec)); +} + +#endif /* SELFTEST_KVM_ARCH_TIMER_H */ -- cgit v1.2.3 From 80166904655976bb9babc48fd283c2bba5799920 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Thu, 7 Oct 2021 23:34:31 +0000 Subject: KVM: arm64: selftests: Add basic support to generate delays Add udelay() support to generate a delay in the guest. The routines are derived and simplified from kernel's arch/arm64/lib/delay.c. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Andrew Jones Reviewed-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211007233439.1826892-8-rananta@google.com --- .../testing/selftests/kvm/include/aarch64/delay.h | 25 ++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 tools/testing/selftests/kvm/include/aarch64/delay.h diff --git a/tools/testing/selftests/kvm/include/aarch64/delay.h b/tools/testing/selftests/kvm/include/aarch64/delay.h new file mode 100644 index 000000000000..329e4f5079ea --- /dev/null +++ b/tools/testing/selftests/kvm/include/aarch64/delay.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ARM simple delay routines + */ + +#ifndef SELFTEST_KVM_ARM_DELAY_H +#define SELFTEST_KVM_ARM_DELAY_H + +#include "arch_timer.h" + +static inline void __delay(uint64_t cycles) +{ + enum arch_timer timer = VIRTUAL; + uint64_t start = timer_get_cntct(timer); + + while ((timer_get_cntct(timer) - start) < cycles) + cpu_relax(); +} + +static inline void udelay(unsigned long usec) +{ + __delay(usec_to_cycles(usec)); +} + +#endif /* SELFTEST_KVM_ARM_DELAY_H */ -- cgit v1.2.3 From 5c636d585cfd0d01a89b18fced77a07ab2ef386a Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Thu, 7 Oct 2021 23:34:32 +0000 Subject: KVM: arm64: selftests: Add support to disable and enable local IRQs Add functions local_irq_enable() and local_irq_disable() to enable and disable the IRQs from the guest, respectively. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Oliver Upton Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211007233439.1826892-9-rananta@google.com --- tools/testing/selftests/kvm/include/aarch64/processor.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h index 265054c24481..515d04a3c27d 100644 --- a/tools/testing/selftests/kvm/include/aarch64/processor.h +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h @@ -172,4 +172,14 @@ static __always_inline u32 __raw_readl(const volatile void *addr) #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c));}) #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; }) +static inline void local_irq_enable(void) +{ + asm volatile("msr daifclr, #3" : : : "memory"); +} + +static inline void local_irq_disable(void) +{ + asm volatile("msr daifset, #3" : : : "memory"); +} + #endif /* SELFTEST_KVM_PROCESSOR_H */ -- cgit v1.2.3 From 0226cd531c587e0cd51e5ce5622051d319182506 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Thu, 7 Oct 2021 23:34:33 +0000 Subject: KVM: arm64: selftests: Maintain consistency for vcpuid type The prototype of aarch64_vcpu_setup() accepts vcpuid as 'int', while the rest of the aarch64 (and struct vcpu) carries it as 'uint32_t'. Hence, change the prototype to make it consistent throughout the board. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211007233439.1826892-10-rananta@google.com --- tools/testing/selftests/kvm/include/aarch64/processor.h | 2 +- tools/testing/selftests/kvm/lib/aarch64/processor.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h index 515d04a3c27d..27d8e1bb5b36 100644 --- a/tools/testing/selftests/kvm/include/aarch64/processor.h +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h @@ -63,7 +63,7 @@ static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, ®); } -void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *init); +void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init); void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init, void *guest_code); diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index db64ee206064..34f6bd47661f 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -212,7 +212,7 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) } } -void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *init) +void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init) { struct kvm_vcpu_init default_init = { .target = -1, }; uint64_t sctlr_el1, tcr_el1; -- cgit v1.2.3 From 17229bdc86c9e618e8832b5ca8451e367e07511b Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Thu, 7 Oct 2021 23:34:34 +0000 Subject: KVM: arm64: selftests: Add guest support to get the vcpuid At times, such as when in the interrupt handler, the guest wants to get the vcpuid that it's running on to pull the per-cpu private data. As a result, introduce guest_get_vcpuid() that returns the vcpuid of the calling vcpu. The interface is architecture independent, but defined only for arm64 as of now. Suggested-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Ricardo Koller Reviewed-by: Reiji Watanabe Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211007233439.1826892-11-rananta@google.com --- tools/testing/selftests/kvm/include/kvm_util.h | 2 ++ tools/testing/selftests/kvm/lib/aarch64/processor.c | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 010b59b13917..bcf05f5381ed 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -400,4 +400,6 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc); int vm_get_stats_fd(struct kvm_vm *vm); int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid); +uint32_t guest_get_vcpuid(void); + #endif /* SELFTEST_KVM_UTIL_H */ diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index 34f6bd47661f..b4eeeafd2a70 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -277,6 +277,7 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1); set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1); set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd); + set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpuid); } void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) @@ -426,3 +427,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, assert(vector < VECTOR_NUM); handlers->exception_handlers[vector][0] = handler; } + +uint32_t guest_get_vcpuid(void) +{ + return read_sysreg(tpidr_el1); +} -- cgit v1.2.3 From 414de89df1ec453ff4adb9d77ffd596096cb44bd Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Thu, 7 Oct 2021 23:34:35 +0000 Subject: KVM: arm64: selftests: Add light-weight spinlock support Add a simpler version of spinlock support for ARM64 for the guests to use. The implementation is loosely based on the spinlock implementation in kvm-unit-tests. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Oliver Upton Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211007233439.1826892-12-rananta@google.com --- tools/testing/selftests/kvm/Makefile | 2 +- .../selftests/kvm/include/aarch64/spinlock.h | 13 +++++++++++ tools/testing/selftests/kvm/lib/aarch64/spinlock.c | 27 ++++++++++++++++++++++ 3 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/kvm/include/aarch64/spinlock.h create mode 100644 tools/testing/selftests/kvm/lib/aarch64/spinlock.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index d1774f461393..d8fb91a5ea7b 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -35,7 +35,7 @@ endif LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c LIBKVM_x86_64 = lib/x86_64/apic.c lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S -LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S +LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S lib/aarch64/spinlock.c LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test diff --git a/tools/testing/selftests/kvm/include/aarch64/spinlock.h b/tools/testing/selftests/kvm/include/aarch64/spinlock.h new file mode 100644 index 000000000000..cf0984106d14 --- /dev/null +++ b/tools/testing/selftests/kvm/include/aarch64/spinlock.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef SELFTEST_KVM_ARM64_SPINLOCK_H +#define SELFTEST_KVM_ARM64_SPINLOCK_H + +struct spinlock { + int v; +}; + +extern void spin_lock(struct spinlock *lock); +extern void spin_unlock(struct spinlock *lock); + +#endif /* SELFTEST_KVM_ARM64_SPINLOCK_H */ diff --git a/tools/testing/selftests/kvm/lib/aarch64/spinlock.c b/tools/testing/selftests/kvm/lib/aarch64/spinlock.c new file mode 100644 index 000000000000..a076e780be5d --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/spinlock.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM64 Spinlock support + */ +#include + +#include "spinlock.h" + +void spin_lock(struct spinlock *lock) +{ + int val, res; + + asm volatile( + "1: ldaxr %w0, [%2]\n" + " cbnz %w0, 1b\n" + " mov %w0, #1\n" + " stxr %w1, %w0, [%2]\n" + " cbnz %w1, 1b\n" + : "=&r" (val), "=&r" (res) + : "r" (&lock->v) + : "memory"); +} + +void spin_unlock(struct spinlock *lock) +{ + asm volatile("stlr wzr, [%0]\n" : : "r" (&lock->v) : "memory"); +} -- cgit v1.2.3 From 28281652f90acc138f8b4bae8a3bf8cf1ce0d29e Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Thu, 7 Oct 2021 23:34:36 +0000 Subject: KVM: arm64: selftests: Add basic GICv3 support Add basic support for ARM Generic Interrupt Controller v3. The support provides guests to setup interrupts. The work is inspired from kvm-unit-tests and the kernel's GIC driver (drivers/irqchip/irq-gic-v3.c). Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Andrew Jones Reviewed-by: Ricardo Koller Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211007233439.1826892-13-rananta@google.com --- tools/testing/selftests/kvm/Makefile | 2 +- tools/testing/selftests/kvm/include/aarch64/gic.h | 21 ++ tools/testing/selftests/kvm/lib/aarch64/gic.c | 95 ++++++++ .../selftests/kvm/lib/aarch64/gic_private.h | 21 ++ tools/testing/selftests/kvm/lib/aarch64/gic_v3.c | 240 +++++++++++++++++++++ tools/testing/selftests/kvm/lib/aarch64/gic_v3.h | 70 ++++++ 6 files changed, 448 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/kvm/include/aarch64/gic.h create mode 100644 tools/testing/selftests/kvm/lib/aarch64/gic.c create mode 100644 tools/testing/selftests/kvm/lib/aarch64/gic_private.h create mode 100644 tools/testing/selftests/kvm/lib/aarch64/gic_v3.c create mode 100644 tools/testing/selftests/kvm/lib/aarch64/gic_v3.h diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index d8fb91a5ea7b..b8441dc33b78 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -35,7 +35,7 @@ endif LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c LIBKVM_x86_64 = lib/x86_64/apic.c lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S -LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S lib/aarch64/spinlock.c +LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S lib/aarch64/spinlock.c lib/aarch64/gic.c lib/aarch64/gic_v3.c LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test diff --git a/tools/testing/selftests/kvm/include/aarch64/gic.h b/tools/testing/selftests/kvm/include/aarch64/gic.h new file mode 100644 index 000000000000..85dd1e53048e --- /dev/null +++ b/tools/testing/selftests/kvm/include/aarch64/gic.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ARM Generic Interrupt Controller (GIC) specific defines + */ + +#ifndef SELFTEST_KVM_GIC_H +#define SELFTEST_KVM_GIC_H + +enum gic_type { + GIC_V3, + GIC_TYPE_MAX, +}; + +void gic_init(enum gic_type type, unsigned int nr_cpus, + void *dist_base, void *redist_base); +void gic_irq_enable(unsigned int intid); +void gic_irq_disable(unsigned int intid); +unsigned int gic_get_and_ack_irq(void); +void gic_set_eoi(unsigned int intid); + +#endif /* SELFTEST_KVM_GIC_H */ diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic.c b/tools/testing/selftests/kvm/lib/aarch64/gic.c new file mode 100644 index 000000000000..fff4fc27504d --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/gic.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM Generic Interrupt Controller (GIC) support + */ + +#include +#include +#include + +#include "kvm_util.h" + +#include +#include "gic_private.h" +#include "processor.h" +#include "spinlock.h" + +static const struct gic_common_ops *gic_common_ops; +static struct spinlock gic_lock; + +static void gic_cpu_init(unsigned int cpu, void *redist_base) +{ + gic_common_ops->gic_cpu_init(cpu, redist_base); +} + +static void +gic_dist_init(enum gic_type type, unsigned int nr_cpus, void *dist_base) +{ + const struct gic_common_ops *gic_ops = NULL; + + spin_lock(&gic_lock); + + /* Distributor initialization is needed only once per VM */ + if (gic_common_ops) { + spin_unlock(&gic_lock); + return; + } + + if (type == GIC_V3) + gic_ops = &gicv3_ops; + + GUEST_ASSERT(gic_ops); + + gic_ops->gic_init(nr_cpus, dist_base); + gic_common_ops = gic_ops; + + /* Make sure that the initialized data is visible to all the vCPUs */ + dsb(sy); + + spin_unlock(&gic_lock); +} + +void gic_init(enum gic_type type, unsigned int nr_cpus, + void *dist_base, void *redist_base) +{ + uint32_t cpu = guest_get_vcpuid(); + + GUEST_ASSERT(type < GIC_TYPE_MAX); + GUEST_ASSERT(dist_base); + GUEST_ASSERT(redist_base); + GUEST_ASSERT(nr_cpus); + + gic_dist_init(type, nr_cpus, dist_base); + gic_cpu_init(cpu, redist_base); +} + +void gic_irq_enable(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_irq_enable(intid); +} + +void gic_irq_disable(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_irq_disable(intid); +} + +unsigned int gic_get_and_ack_irq(void) +{ + uint64_t irqstat; + unsigned int intid; + + GUEST_ASSERT(gic_common_ops); + + irqstat = gic_common_ops->gic_read_iar(); + intid = irqstat & GENMASK(23, 0); + + return intid; +} + +void gic_set_eoi(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_write_eoir(intid); +} diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_private.h b/tools/testing/selftests/kvm/lib/aarch64/gic_private.h new file mode 100644 index 000000000000..d81d739433dc --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/gic_private.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ARM Generic Interrupt Controller (GIC) private defines that's only + * shared among the GIC library code. + */ + +#ifndef SELFTEST_KVM_GIC_PRIVATE_H +#define SELFTEST_KVM_GIC_PRIVATE_H + +struct gic_common_ops { + void (*gic_init)(unsigned int nr_cpus, void *dist_base); + void (*gic_cpu_init)(unsigned int cpu, void *redist_base); + void (*gic_irq_enable)(unsigned int intid); + void (*gic_irq_disable)(unsigned int intid); + uint64_t (*gic_read_iar)(void); + void (*gic_write_eoir)(uint32_t irq); +}; + +extern const struct gic_common_ops gicv3_ops; + +#endif /* SELFTEST_KVM_GIC_PRIVATE_H */ diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c new file mode 100644 index 000000000000..2dbf3339b62e --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM Generic Interrupt Controller (GIC) v3 support + */ + +#include + +#include "kvm_util.h" +#include "processor.h" +#include "delay.h" + +#include "gic_v3.h" +#include "gic_private.h" + +struct gicv3_data { + void *dist_base; + void *redist_base[GICV3_MAX_CPUS]; + unsigned int nr_cpus; + unsigned int nr_spis; +}; + +#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K) + +enum gicv3_intid_range { + SGI_RANGE, + PPI_RANGE, + SPI_RANGE, + INVALID_RANGE, +}; + +static struct gicv3_data gicv3_data; + +static void gicv3_gicd_wait_for_rwp(void) +{ + unsigned int count = 100000; /* 1s */ + + while (readl(gicv3_data.dist_base + GICD_CTLR) & GICD_CTLR_RWP) { + GUEST_ASSERT(count--); + udelay(10); + } +} + +static void gicv3_gicr_wait_for_rwp(void *redist_base) +{ + unsigned int count = 100000; /* 1s */ + + while (readl(redist_base + GICR_CTLR) & GICR_CTLR_RWP) { + GUEST_ASSERT(count--); + udelay(10); + } +} + +static enum gicv3_intid_range get_intid_range(unsigned int intid) +{ + switch (intid) { + case 0 ... 15: + return SGI_RANGE; + case 16 ... 31: + return PPI_RANGE; + case 32 ... 1019: + return SPI_RANGE; + } + + /* We should not be reaching here */ + GUEST_ASSERT(0); + + return INVALID_RANGE; +} + +static uint64_t gicv3_read_iar(void) +{ + uint64_t irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1); + + dsb(sy); + return irqstat; +} + +static void gicv3_write_eoir(uint32_t irq) +{ + write_sysreg_s(irq, SYS_ICC_EOIR1_EL1); + isb(); +} + +static void +gicv3_config_irq(unsigned int intid, unsigned int offset) +{ + uint32_t cpu = guest_get_vcpuid(); + uint32_t mask = 1 << (intid % 32); + enum gicv3_intid_range intid_range = get_intid_range(intid); + void *reg; + + /* We care about 'cpu' only for SGIs or PPIs */ + if (intid_range == SGI_RANGE || intid_range == PPI_RANGE) { + GUEST_ASSERT(cpu < gicv3_data.nr_cpus); + + reg = sgi_base_from_redist(gicv3_data.redist_base[cpu]) + + offset; + writel(mask, reg); + gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu]); + } else if (intid_range == SPI_RANGE) { + reg = gicv3_data.dist_base + offset + (intid / 32) * 4; + writel(mask, reg); + gicv3_gicd_wait_for_rwp(); + } else { + GUEST_ASSERT(0); + } +} + +static void gicv3_irq_enable(unsigned int intid) +{ + gicv3_config_irq(intid, GICD_ISENABLER); +} + +static void gicv3_irq_disable(unsigned int intid) +{ + gicv3_config_irq(intid, GICD_ICENABLER); +} + +static void gicv3_enable_redist(void *redist_base) +{ + uint32_t val = readl(redist_base + GICR_WAKER); + unsigned int count = 100000; /* 1s */ + + val &= ~GICR_WAKER_ProcessorSleep; + writel(val, redist_base + GICR_WAKER); + + /* Wait until the processor is 'active' */ + while (readl(redist_base + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) { + GUEST_ASSERT(count--); + udelay(10); + } +} + +static inline void *gicr_base_cpu(void *redist_base, uint32_t cpu) +{ + /* Align all the redistributors sequentially */ + return redist_base + cpu * SZ_64K * 2; +} + +static void gicv3_cpu_init(unsigned int cpu, void *redist_base) +{ + void *sgi_base; + unsigned int i; + void *redist_base_cpu; + + GUEST_ASSERT(cpu < gicv3_data.nr_cpus); + + redist_base_cpu = gicr_base_cpu(redist_base, cpu); + sgi_base = sgi_base_from_redist(redist_base_cpu); + + gicv3_enable_redist(redist_base_cpu); + + /* + * Mark all the SGI and PPI interrupts as non-secure Group-1. + * Also, deactivate and disable them. + */ + writel(~0, sgi_base + GICR_IGROUPR0); + writel(~0, sgi_base + GICR_ICACTIVER0); + writel(~0, sgi_base + GICR_ICENABLER0); + + /* Set a default priority for all the SGIs and PPIs */ + for (i = 0; i < 32; i += 4) + writel(GICD_INT_DEF_PRI_X4, + sgi_base + GICR_IPRIORITYR0 + i); + + gicv3_gicr_wait_for_rwp(redist_base_cpu); + + /* Enable the GIC system register (ICC_*) access */ + write_sysreg_s(read_sysreg_s(SYS_ICC_SRE_EL1) | ICC_SRE_EL1_SRE, + SYS_ICC_SRE_EL1); + + /* Set a default priority threshold */ + write_sysreg_s(ICC_PMR_DEF_PRIO, SYS_ICC_PMR_EL1); + + /* Enable non-secure Group-1 interrupts */ + write_sysreg_s(ICC_IGRPEN1_EL1_ENABLE, SYS_ICC_GRPEN1_EL1); + + gicv3_data.redist_base[cpu] = redist_base_cpu; +} + +static void gicv3_dist_init(void) +{ + void *dist_base = gicv3_data.dist_base; + unsigned int i; + + /* Disable the distributor until we set things up */ + writel(0, dist_base + GICD_CTLR); + gicv3_gicd_wait_for_rwp(); + + /* + * Mark all the SPI interrupts as non-secure Group-1. + * Also, deactivate and disable them. + */ + for (i = 32; i < gicv3_data.nr_spis; i += 32) { + writel(~0, dist_base + GICD_IGROUPR + i / 8); + writel(~0, dist_base + GICD_ICACTIVER + i / 8); + writel(~0, dist_base + GICD_ICENABLER + i / 8); + } + + /* Set a default priority for all the SPIs */ + for (i = 32; i < gicv3_data.nr_spis; i += 4) + writel(GICD_INT_DEF_PRI_X4, + dist_base + GICD_IPRIORITYR + i); + + /* Wait for the settings to sync-in */ + gicv3_gicd_wait_for_rwp(); + + /* Finally, enable the distributor globally with ARE */ + writel(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | + GICD_CTLR_ENABLE_G1, dist_base + GICD_CTLR); + gicv3_gicd_wait_for_rwp(); +} + +static void gicv3_init(unsigned int nr_cpus, void *dist_base) +{ + GUEST_ASSERT(nr_cpus <= GICV3_MAX_CPUS); + + gicv3_data.nr_cpus = nr_cpus; + gicv3_data.dist_base = dist_base; + gicv3_data.nr_spis = GICD_TYPER_SPIS( + readl(gicv3_data.dist_base + GICD_TYPER)); + if (gicv3_data.nr_spis > 1020) + gicv3_data.nr_spis = 1020; + + /* + * Initialize only the distributor for now. + * The redistributor and CPU interfaces are initialized + * later for every PE. + */ + gicv3_dist_init(); +} + +const struct gic_common_ops gicv3_ops = { + .gic_init = gicv3_init, + .gic_cpu_init = gicv3_cpu_init, + .gic_irq_enable = gicv3_irq_enable, + .gic_irq_disable = gicv3_irq_disable, + .gic_read_iar = gicv3_read_iar, + .gic_write_eoir = gicv3_write_eoir, +}; diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.h b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.h new file mode 100644 index 000000000000..b51536d469a6 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ARM Generic Interrupt Controller (GIC) v3 specific defines + */ + +#ifndef SELFTEST_KVM_GICV3_H +#define SELFTEST_KVM_GICV3_H + +#include + +/* + * Distributor registers + */ +#define GICD_CTLR 0x0000 +#define GICD_TYPER 0x0004 +#define GICD_IGROUPR 0x0080 +#define GICD_ISENABLER 0x0100 +#define GICD_ICENABLER 0x0180 +#define GICD_ICACTIVER 0x0380 +#define GICD_IPRIORITYR 0x0400 + +/* + * The assumption is that the guest runs in a non-secure mode. + * The following bits of GICD_CTLR are defined accordingly. + */ +#define GICD_CTLR_RWP (1U << 31) +#define GICD_CTLR_nASSGIreq (1U << 8) +#define GICD_CTLR_ARE_NS (1U << 4) +#define GICD_CTLR_ENABLE_G1A (1U << 1) +#define GICD_CTLR_ENABLE_G1 (1U << 0) + +#define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32) +#define GICD_INT_DEF_PRI_X4 0xa0a0a0a0 + +/* + * Redistributor registers + */ +#define GICR_CTLR 0x000 +#define GICR_WAKER 0x014 + +#define GICR_CTLR_RWP (1U << 3) + +#define GICR_WAKER_ProcessorSleep (1U << 1) +#define GICR_WAKER_ChildrenAsleep (1U << 2) + +/* + * Redistributor registers, offsets from SGI base + */ +#define GICR_IGROUPR0 GICD_IGROUPR +#define GICR_ISENABLER0 GICD_ISENABLER +#define GICR_ICENABLER0 GICD_ICENABLER +#define GICR_ICACTIVER0 GICD_ICACTIVER +#define GICR_IPRIORITYR0 GICD_IPRIORITYR + +/* CPU interface registers */ +#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) +#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) +#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) +#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) +#define SYS_ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) + +#define ICC_PMR_DEF_PRIO 0xf0 + +#define ICC_SRE_EL1_SRE (1U << 0) + +#define ICC_IGRPEN1_EL1_ENABLE (1U << 0) + +#define GICV3_MAX_CPUS 512 + +#endif /* SELFTEST_KVM_GICV3_H */ -- cgit v1.2.3 From 250b8d6cb3b0312341304fa323b82355d656c018 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Thu, 7 Oct 2021 23:34:37 +0000 Subject: KVM: arm64: selftests: Add host support for vGIC Implement a simple library to perform vGIC-v3 setup from a host point of view. This includes creating a vGIC device, setting up distributor and redistributor attributes, and mapping the guest physical addresses. The definition of REDIST_REGION_ATTR_ADDR is taken from aarch64/vgic_init test. Hence, replace the definition by including vgic.h in the test file. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Ricardo Koller Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211007233439.1826892-14-rananta@google.com --- tools/testing/selftests/kvm/Makefile | 2 +- tools/testing/selftests/kvm/aarch64/vgic_init.c | 3 +- tools/testing/selftests/kvm/include/aarch64/vgic.h | 20 +++++++ tools/testing/selftests/kvm/lib/aarch64/vgic.c | 70 ++++++++++++++++++++++ 4 files changed, 92 insertions(+), 3 deletions(-) create mode 100644 tools/testing/selftests/kvm/include/aarch64/vgic.h create mode 100644 tools/testing/selftests/kvm/lib/aarch64/vgic.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index b8441dc33b78..cb5bdd4f0694 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -35,7 +35,7 @@ endif LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c LIBKVM_x86_64 = lib/x86_64/apic.c lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S -LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S lib/aarch64/spinlock.c lib/aarch64/gic.c lib/aarch64/gic_v3.c +LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S lib/aarch64/spinlock.c lib/aarch64/gic.c lib/aarch64/gic_v3.c lib/aarch64/vgic.c LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c index 623f31a14326..157fc24f39c5 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_init.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c @@ -13,11 +13,10 @@ #include "test_util.h" #include "kvm_util.h" #include "processor.h" +#include "vgic.h" #define NR_VCPUS 4 -#define REDIST_REGION_ATTR_ADDR(count, base, flags, index) (((uint64_t)(count) << 52) | \ - ((uint64_t)((base) >> 16) << 16) | ((uint64_t)(flags) << 12) | index) #define REG_OFFSET(vcpu, offset) (((uint64_t)vcpu << 32) | offset) #define GICR_TYPER 0x8 diff --git a/tools/testing/selftests/kvm/include/aarch64/vgic.h b/tools/testing/selftests/kvm/include/aarch64/vgic.h new file mode 100644 index 000000000000..0ecfb253893c --- /dev/null +++ b/tools/testing/selftests/kvm/include/aarch64/vgic.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ARM Generic Interrupt Controller (GIC) host specific defines + */ + +#ifndef SELFTEST_KVM_VGIC_H +#define SELFTEST_KVM_VGIC_H + +#include + +#define REDIST_REGION_ATTR_ADDR(count, base, flags, index) \ + (((uint64_t)(count) << 52) | \ + ((uint64_t)((base) >> 16) << 16) | \ + ((uint64_t)(flags) << 12) | \ + index) + +int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, + uint64_t gicd_base_gpa, uint64_t gicr_base_gpa); + +#endif /* SELFTEST_KVM_VGIC_H */ diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c new file mode 100644 index 000000000000..b9b271ff520d --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM Generic Interrupt Controller (GIC) v3 host support + */ + +#include +#include +#include + +#include "kvm_util.h" +#include "../kvm_util_internal.h" +#include "vgic.h" + +/* + * vGIC-v3 default host setup + * + * Input args: + * vm - KVM VM + * nr_vcpus - Number of vCPUs supported by this VM + * gicd_base_gpa - Guest Physical Address of the Distributor region + * gicr_base_gpa - Guest Physical Address of the Redistributor region + * + * Output args: None + * + * Return: GIC file-descriptor or negative error code upon failure + * + * The function creates a vGIC-v3 device and maps the distributor and + * redistributor regions of the guest. Since it depends on the number of + * vCPUs for the VM, it must be called after all the vCPUs have been created. + */ +int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, + uint64_t gicd_base_gpa, uint64_t gicr_base_gpa) +{ + int gic_fd; + uint64_t redist_attr; + struct list_head *iter; + unsigned int nr_gic_pages, nr_vcpus_created = 0; + + TEST_ASSERT(nr_vcpus, "Number of vCPUs cannot be empty\n"); + + /* + * Make sure that the caller is infact calling this + * function after all the vCPUs are added. + */ + list_for_each(iter, &vm->vcpus) + nr_vcpus_created++; + TEST_ASSERT(nr_vcpus == nr_vcpus_created, + "Number of vCPUs requested (%u) doesn't match with the ones created for the VM (%u)\n", + nr_vcpus, nr_vcpus_created); + + /* Distributor setup */ + gic_fd = kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); + kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa, true); + nr_gic_pages = vm_calc_num_guest_pages(vm->mode, KVM_VGIC_V3_DIST_SIZE); + virt_map(vm, gicd_base_gpa, gicd_base_gpa, nr_gic_pages); + + /* Redistributor setup */ + redist_attr = REDIST_REGION_ATTR_ADDR(nr_vcpus, gicr_base_gpa, 0, 0); + kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &redist_attr, true); + nr_gic_pages = vm_calc_num_guest_pages(vm->mode, + KVM_VGIC_V3_REDIST_SIZE * nr_vcpus); + virt_map(vm, gicr_base_gpa, gicr_base_gpa, nr_gic_pages); + + kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true); + + return gic_fd; +} -- cgit v1.2.3 From 4959d8650e9f4095a5df6e578377d850f1b94d2f Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Thu, 7 Oct 2021 23:34:38 +0000 Subject: KVM: arm64: selftests: Add arch_timer test Add a KVM selftest to validate the arch_timer functionality. Primarily, the test sets up periodic timer interrupts and validates the basic architectural expectations upon its receipt. The test provides command-line options to configure the period of the timer, number of iterations, and number of vCPUs. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211007233439.1826892-15-rananta@google.com --- tools/testing/selftests/kvm/.gitignore | 1 + tools/testing/selftests/kvm/Makefile | 1 + tools/testing/selftests/kvm/aarch64/arch_timer.c | 366 +++++++++++++++++++++++ 3 files changed, 368 insertions(+) create mode 100644 tools/testing/selftests/kvm/aarch64/arch_timer.c diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index b8dbabe24ac2..02444fc69bae 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only +/aarch64/arch_timer /aarch64/debug-exceptions /aarch64/get-reg-list /aarch64/psci_cpu_on_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index cb5bdd4f0694..79947dde0b66 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -86,6 +86,7 @@ TEST_GEN_PROGS_x86_64 += set_memory_region_test TEST_GEN_PROGS_x86_64 += steal_time TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test +TEST_GEN_PROGS_aarch64 += aarch64/arch_timer TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c new file mode 100644 index 000000000000..3b6ea6a462f4 --- /dev/null +++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c @@ -0,0 +1,366 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * arch_timer.c - Tests the aarch64 timer IRQ functionality + * + * The test validates both the virtual and physical timer IRQs using + * CVAL and TVAL registers. This consitutes the four stages in the test. + * The guest's main thread configures the timer interrupt for a stage + * and waits for it to fire, with a timeout equal to the timer period. + * It asserts that the timeout doesn't exceed the timer period. + * + * On the other hand, upon receipt of an interrupt, the guest's interrupt + * handler validates the interrupt by checking if the architectural state + * is in compliance with the specifications. + * + * The test provides command-line options to configure the timer's + * period (-p), number of vCPUs (-n), and iterations per stage (-i). + * + * Copyright (c) 2021, Google LLC. + */ + +#define _GNU_SOURCE + +#include +#include +#include +#include + +#include "kvm_util.h" +#include "processor.h" +#include "delay.h" +#include "arch_timer.h" +#include "gic.h" +#include "vgic.h" + +#define NR_VCPUS_DEF 4 +#define NR_TEST_ITERS_DEF 5 +#define TIMER_TEST_PERIOD_MS_DEF 10 +#define TIMER_TEST_ERR_MARGIN_US 100 + +struct test_args { + int nr_vcpus; + int nr_iter; + int timer_period_ms; +}; + +static struct test_args test_args = { + .nr_vcpus = NR_VCPUS_DEF, + .nr_iter = NR_TEST_ITERS_DEF, + .timer_period_ms = TIMER_TEST_PERIOD_MS_DEF, +}; + +#define msecs_to_usecs(msec) ((msec) * 1000LL) + +#define GICD_BASE_GPA 0x8000000ULL +#define GICR_BASE_GPA 0x80A0000ULL + +enum guest_stage { + GUEST_STAGE_VTIMER_CVAL = 1, + GUEST_STAGE_VTIMER_TVAL, + GUEST_STAGE_PTIMER_CVAL, + GUEST_STAGE_PTIMER_TVAL, + GUEST_STAGE_MAX, +}; + +/* Shared variables between host and guest */ +struct test_vcpu_shared_data { + int nr_iter; + enum guest_stage guest_stage; + uint64_t xcnt; +}; + +struct test_vcpu { + uint32_t vcpuid; + pthread_t pt_vcpu_run; + struct kvm_vm *vm; +}; + +static struct test_vcpu test_vcpu[KVM_MAX_VCPUS]; +static struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS]; + +static int vtimer_irq, ptimer_irq; + +static void +guest_configure_timer_action(struct test_vcpu_shared_data *shared_data) +{ + switch (shared_data->guest_stage) { + case GUEST_STAGE_VTIMER_CVAL: + timer_set_next_cval_ms(VIRTUAL, test_args.timer_period_ms); + shared_data->xcnt = timer_get_cntct(VIRTUAL); + timer_set_ctl(VIRTUAL, CTL_ENABLE); + break; + case GUEST_STAGE_VTIMER_TVAL: + timer_set_next_tval_ms(VIRTUAL, test_args.timer_period_ms); + shared_data->xcnt = timer_get_cntct(VIRTUAL); + timer_set_ctl(VIRTUAL, CTL_ENABLE); + break; + case GUEST_STAGE_PTIMER_CVAL: + timer_set_next_cval_ms(PHYSICAL, test_args.timer_period_ms); + shared_data->xcnt = timer_get_cntct(PHYSICAL); + timer_set_ctl(PHYSICAL, CTL_ENABLE); + break; + case GUEST_STAGE_PTIMER_TVAL: + timer_set_next_tval_ms(PHYSICAL, test_args.timer_period_ms); + shared_data->xcnt = timer_get_cntct(PHYSICAL); + timer_set_ctl(PHYSICAL, CTL_ENABLE); + break; + default: + GUEST_ASSERT(0); + } +} + +static void guest_validate_irq(unsigned int intid, + struct test_vcpu_shared_data *shared_data) +{ + enum guest_stage stage = shared_data->guest_stage; + uint64_t xcnt = 0, xcnt_diff_us, cval = 0; + unsigned long xctl = 0; + unsigned int timer_irq = 0; + + if (stage == GUEST_STAGE_VTIMER_CVAL || + stage == GUEST_STAGE_VTIMER_TVAL) { + xctl = timer_get_ctl(VIRTUAL); + timer_set_ctl(VIRTUAL, CTL_IMASK); + xcnt = timer_get_cntct(VIRTUAL); + cval = timer_get_cval(VIRTUAL); + timer_irq = vtimer_irq; + } else if (stage == GUEST_STAGE_PTIMER_CVAL || + stage == GUEST_STAGE_PTIMER_TVAL) { + xctl = timer_get_ctl(PHYSICAL); + timer_set_ctl(PHYSICAL, CTL_IMASK); + xcnt = timer_get_cntct(PHYSICAL); + cval = timer_get_cval(PHYSICAL); + timer_irq = ptimer_irq; + } else { + GUEST_ASSERT(0); + } + + xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt); + + /* Make sure we are dealing with the correct timer IRQ */ + GUEST_ASSERT_2(intid == timer_irq, intid, timer_irq); + + /* Basic 'timer condition met' check */ + GUEST_ASSERT_3(xcnt >= cval, xcnt, cval, xcnt_diff_us); + GUEST_ASSERT_1(xctl & CTL_ISTATUS, xctl); +} + +static void guest_irq_handler(struct ex_regs *regs) +{ + unsigned int intid = gic_get_and_ack_irq(); + uint32_t cpu = guest_get_vcpuid(); + struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; + + guest_validate_irq(intid, shared_data); + + WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1); + + gic_set_eoi(intid); +} + +static void guest_run_stage(struct test_vcpu_shared_data *shared_data, + enum guest_stage stage) +{ + uint32_t irq_iter, config_iter; + + shared_data->guest_stage = stage; + shared_data->nr_iter = 0; + + for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) { + /* Setup the next interrupt */ + guest_configure_timer_action(shared_data); + + /* Setup a timeout for the interrupt to arrive */ + udelay(msecs_to_usecs(test_args.timer_period_ms) + + TIMER_TEST_ERR_MARGIN_US); + + irq_iter = READ_ONCE(shared_data->nr_iter); + GUEST_ASSERT_2(config_iter + 1 == irq_iter, + config_iter + 1, irq_iter); + } +} + +static void guest_code(void) +{ + uint32_t cpu = guest_get_vcpuid(); + struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; + + local_irq_disable(); + + gic_init(GIC_V3, test_args.nr_vcpus, + (void *)GICD_BASE_GPA, (void *)GICR_BASE_GPA); + + timer_set_ctl(VIRTUAL, CTL_IMASK); + timer_set_ctl(PHYSICAL, CTL_IMASK); + + gic_irq_enable(vtimer_irq); + gic_irq_enable(ptimer_irq); + local_irq_enable(); + + guest_run_stage(shared_data, GUEST_STAGE_VTIMER_CVAL); + guest_run_stage(shared_data, GUEST_STAGE_VTIMER_TVAL); + guest_run_stage(shared_data, GUEST_STAGE_PTIMER_CVAL); + guest_run_stage(shared_data, GUEST_STAGE_PTIMER_TVAL); + + GUEST_DONE(); +} + +static void *test_vcpu_run(void *arg) +{ + struct ucall uc; + struct test_vcpu *vcpu = arg; + struct kvm_vm *vm = vcpu->vm; + uint32_t vcpuid = vcpu->vcpuid; + struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpuid]; + + vcpu_run(vm, vcpuid); + + switch (get_ucall(vm, vcpuid, &uc)) { + case UCALL_SYNC: + case UCALL_DONE: + break; + case UCALL_ABORT: + sync_global_from_guest(vm, *shared_data); + TEST_FAIL("%s at %s:%ld\n\tvalues: %lu, %lu; %lu, vcpu: %u; stage: %u; iter: %u", + (const char *)uc.args[0], __FILE__, uc.args[1], + uc.args[2], uc.args[3], uc.args[4], vcpuid, + shared_data->guest_stage, shared_data->nr_iter); + break; + default: + TEST_FAIL("Unexpected guest exit\n"); + } + + return NULL; +} + +static void test_run(struct kvm_vm *vm) +{ + int i, ret; + + for (i = 0; i < test_args.nr_vcpus; i++) { + ret = pthread_create(&test_vcpu[i].pt_vcpu_run, NULL, + test_vcpu_run, &test_vcpu[i]); + TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread\n", i); + } + + for (i = 0; i < test_args.nr_vcpus; i++) + pthread_join(test_vcpu[i].pt_vcpu_run, NULL); +} + +static void test_init_timer_irq(struct kvm_vm *vm) +{ + /* Timer initid should be same for all the vCPUs, so query only vCPU-0 */ + int vcpu0_fd = vcpu_get_fd(vm, 0); + + kvm_device_access(vcpu0_fd, KVM_ARM_VCPU_TIMER_CTRL, + KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq, false); + kvm_device_access(vcpu0_fd, KVM_ARM_VCPU_TIMER_CTRL, + KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq, false); + + sync_global_to_guest(vm, ptimer_irq); + sync_global_to_guest(vm, vtimer_irq); + + pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq); +} + +static struct kvm_vm *test_vm_create(void) +{ + struct kvm_vm *vm; + unsigned int i; + int nr_vcpus = test_args.nr_vcpus; + + vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL); + + vm_init_descriptor_tables(vm); + vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler); + + for (i = 0; i < nr_vcpus; i++) { + vcpu_init_descriptor_tables(vm, i); + + test_vcpu[i].vcpuid = i; + test_vcpu[i].vm = vm; + } + + ucall_init(vm, NULL); + test_init_timer_irq(vm); + vgic_v3_setup(vm, nr_vcpus, GICD_BASE_GPA, GICR_BASE_GPA); + + /* Make all the test's cmdline args visible to the guest */ + sync_global_to_guest(vm, test_args); + + return vm; +} + +static void test_print_help(char *name) +{ + pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n", + name); + pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n", + NR_VCPUS_DEF, KVM_MAX_VCPUS); + pr_info("\t-i: Number of iterations per stage (default: %u)\n", + NR_TEST_ITERS_DEF); + pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n", + TIMER_TEST_PERIOD_MS_DEF); + pr_info("\t-h: print this help screen\n"); +} + +static bool parse_args(int argc, char *argv[]) +{ + int opt; + + while ((opt = getopt(argc, argv, "hn:i:p:")) != -1) { + switch (opt) { + case 'n': + test_args.nr_vcpus = atoi(optarg); + if (test_args.nr_vcpus <= 0) { + pr_info("Positive value needed for -n\n"); + goto err; + } else if (test_args.nr_vcpus > KVM_MAX_VCPUS) { + pr_info("Max allowed vCPUs: %u\n", + KVM_MAX_VCPUS); + goto err; + } + break; + case 'i': + test_args.nr_iter = atoi(optarg); + if (test_args.nr_iter <= 0) { + pr_info("Positive value needed for -i\n"); + goto err; + } + break; + case 'p': + test_args.timer_period_ms = atoi(optarg); + if (test_args.timer_period_ms <= 0) { + pr_info("Positive value needed for -p\n"); + goto err; + } + break; + case 'h': + default: + goto err; + } + } + + return true; + +err: + test_print_help(argv[0]); + return false; +} + +int main(int argc, char *argv[]) +{ + struct kvm_vm *vm; + + /* Tell stdout not to buffer its content */ + setbuf(stdout, NULL); + + if (!parse_args(argc, argv)) + exit(KSFT_SKIP); + + vm = test_vm_create(); + test_run(vm); + kvm_vm_free(vm); + + return 0; +} -- cgit v1.2.3 From 61f6fadbf9bd6694c72e40d9fa186ceff730ef33 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Thu, 7 Oct 2021 23:34:39 +0000 Subject: KVM: arm64: selftests: arch_timer: Support vCPU migration Since the timer stack (hardware and KVM) is per-CPU, there are potential chances for races to occur when the scheduler decides to migrate a vCPU thread to a different physical CPU. Hence, include an option to stress-test this part as well by forcing the vCPUs to migrate across physical CPUs in the system at a particular rate. Originally, the bug for the fix with commit 3134cc8beb69d0d ("KVM: arm64: vgic: Resample HW pending state on deactivation") was discovered using arch_timer test with vCPU migrations and can be easily reproduced. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211007233439.1826892-16-rananta@google.com --- tools/testing/selftests/kvm/aarch64/arch_timer.c | 115 ++++++++++++++++++++++- 1 file changed, 114 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c index 3b6ea6a462f4..bf6a45b0b8dc 100644 --- a/tools/testing/selftests/kvm/aarch64/arch_timer.c +++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c @@ -14,6 +14,8 @@ * * The test provides command-line options to configure the timer's * period (-p), number of vCPUs (-n), and iterations per stage (-i). + * To stress-test the timer stack even more, an option to migrate the + * vCPUs across pCPUs (-m), at a particular rate, is also provided. * * Copyright (c) 2021, Google LLC. */ @@ -24,6 +26,8 @@ #include #include #include +#include +#include #include "kvm_util.h" #include "processor.h" @@ -36,17 +40,20 @@ #define NR_TEST_ITERS_DEF 5 #define TIMER_TEST_PERIOD_MS_DEF 10 #define TIMER_TEST_ERR_MARGIN_US 100 +#define TIMER_TEST_MIGRATION_FREQ_MS 2 struct test_args { int nr_vcpus; int nr_iter; int timer_period_ms; + int migration_freq_ms; }; static struct test_args test_args = { .nr_vcpus = NR_VCPUS_DEF, .nr_iter = NR_TEST_ITERS_DEF, .timer_period_ms = TIMER_TEST_PERIOD_MS_DEF, + .migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS, }; #define msecs_to_usecs(msec) ((msec) * 1000LL) @@ -80,6 +87,9 @@ static struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS]; static int vtimer_irq, ptimer_irq; +static unsigned long *vcpu_done_map; +static pthread_mutex_t vcpu_done_map_lock; + static void guest_configure_timer_action(struct test_vcpu_shared_data *shared_data) { @@ -215,6 +225,11 @@ static void *test_vcpu_run(void *arg) vcpu_run(vm, vcpuid); + /* Currently, any exit from guest is an indication of completion */ + pthread_mutex_lock(&vcpu_done_map_lock); + set_bit(vcpuid, vcpu_done_map); + pthread_mutex_unlock(&vcpu_done_map_lock); + switch (get_ucall(vm, vcpuid, &uc)) { case UCALL_SYNC: case UCALL_DONE: @@ -233,9 +248,78 @@ static void *test_vcpu_run(void *arg) return NULL; } +static uint32_t test_get_pcpu(void) +{ + uint32_t pcpu; + unsigned int nproc_conf; + cpu_set_t online_cpuset; + + nproc_conf = get_nprocs_conf(); + sched_getaffinity(0, sizeof(cpu_set_t), &online_cpuset); + + /* Randomly find an available pCPU to place a vCPU on */ + do { + pcpu = rand() % nproc_conf; + } while (!CPU_ISSET(pcpu, &online_cpuset)); + + return pcpu; +} + +static int test_migrate_vcpu(struct test_vcpu *vcpu) +{ + int ret; + cpu_set_t cpuset; + uint32_t new_pcpu = test_get_pcpu(); + + CPU_ZERO(&cpuset); + CPU_SET(new_pcpu, &cpuset); + + pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu->vcpuid, new_pcpu); + + ret = pthread_setaffinity_np(vcpu->pt_vcpu_run, + sizeof(cpuset), &cpuset); + + /* Allow the error where the vCPU thread is already finished */ + TEST_ASSERT(ret == 0 || ret == ESRCH, + "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d\n", + vcpu->vcpuid, new_pcpu, ret); + + return ret; +} + +static void *test_vcpu_migration(void *arg) +{ + unsigned int i, n_done; + bool vcpu_done; + + do { + usleep(msecs_to_usecs(test_args.migration_freq_ms)); + + for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) { + pthread_mutex_lock(&vcpu_done_map_lock); + vcpu_done = test_bit(i, vcpu_done_map); + pthread_mutex_unlock(&vcpu_done_map_lock); + + if (vcpu_done) { + n_done++; + continue; + } + + test_migrate_vcpu(&test_vcpu[i]); + } + } while (test_args.nr_vcpus != n_done); + + return NULL; +} + static void test_run(struct kvm_vm *vm) { int i, ret; + pthread_t pt_vcpu_migration; + + pthread_mutex_init(&vcpu_done_map_lock, NULL); + vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus); + TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap\n"); for (i = 0; i < test_args.nr_vcpus; i++) { ret = pthread_create(&test_vcpu[i].pt_vcpu_run, NULL, @@ -243,8 +327,23 @@ static void test_run(struct kvm_vm *vm) TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread\n", i); } + /* Spawn a thread to control the vCPU migrations */ + if (test_args.migration_freq_ms) { + srand(time(NULL)); + + ret = pthread_create(&pt_vcpu_migration, NULL, + test_vcpu_migration, NULL); + TEST_ASSERT(!ret, "Failed to create the migration pthread\n"); + } + + for (i = 0; i < test_args.nr_vcpus; i++) pthread_join(test_vcpu[i].pt_vcpu_run, NULL); + + if (test_args.migration_freq_ms) + pthread_join(pt_vcpu_migration, NULL); + + bitmap_free(vcpu_done_map); } static void test_init_timer_irq(struct kvm_vm *vm) @@ -301,6 +400,8 @@ static void test_print_help(char *name) NR_TEST_ITERS_DEF); pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n", TIMER_TEST_PERIOD_MS_DEF); + pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n", + TIMER_TEST_MIGRATION_FREQ_MS); pr_info("\t-h: print this help screen\n"); } @@ -308,7 +409,7 @@ static bool parse_args(int argc, char *argv[]) { int opt; - while ((opt = getopt(argc, argv, "hn:i:p:")) != -1) { + while ((opt = getopt(argc, argv, "hn:i:p:m:")) != -1) { switch (opt) { case 'n': test_args.nr_vcpus = atoi(optarg); @@ -335,6 +436,13 @@ static bool parse_args(int argc, char *argv[]) goto err; } break; + case 'm': + test_args.migration_freq_ms = atoi(optarg); + if (test_args.migration_freq_ms < 0) { + pr_info("0 or positive value needed for -m\n"); + goto err; + } + break; case 'h': default: goto err; @@ -358,6 +466,11 @@ int main(int argc, char *argv[]) if (!parse_args(argc, argv)) exit(KSFT_SKIP); + if (test_args.migration_freq_ms && get_nprocs() < 2) { + print_skip("At least two physical CPUs needed for vCPU migration"); + exit(KSFT_SKIP); + } + vm = test_vm_create(); test_run(vm); kvm_vm_free(vm); -- cgit v1.2.3 From 3ef231670b9e9001316a426e794b2c74b8f6b4f6 Mon Sep 17 00:00:00 2001 From: Jia He Date: Tue, 7 Sep 2021 20:31:11 +0800 Subject: KVM: arm64: vgic: Add memcg accounting to vgic allocations Inspired by commit 254272ce6505 ("kvm: x86: Add memcg accounting to KVM allocations"), it would be better to make arm64 vgic consistent with common kvm codes. The memory allocations of VM scope should be charged into VM process cgroup, hence change GFP_KERNEL to GFP_KERNEL_ACCOUNT. There remain a few cases since these allocations are global, not in VM scope. Signed-off-by: Jia He Reviewed-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210907123112.10232-2-justin.he@arm.com --- arch/arm64/kvm/vgic/vgic-init.c | 2 +- arch/arm64/kvm/vgic/vgic-irqfd.c | 2 +- arch/arm64/kvm/vgic/vgic-its.c | 14 +++++++------- arch/arm64/kvm/vgic/vgic-mmio-v3.c | 2 +- arch/arm64/kvm/vgic/vgic-v4.c | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 340c51d87677..0a06d0648970 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -134,7 +134,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0); int i; - dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL); + dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT); if (!dist->spis) return -ENOMEM; diff --git a/arch/arm64/kvm/vgic/vgic-irqfd.c b/arch/arm64/kvm/vgic/vgic-irqfd.c index 79f8899b234c..475059bacedf 100644 --- a/arch/arm64/kvm/vgic/vgic-irqfd.c +++ b/arch/arm64/kvm/vgic/vgic-irqfd.c @@ -139,7 +139,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm) u32 nr = dist->nr_spis; int i, ret; - entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL); + entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL_ACCOUNT); if (!entries) return -ENOMEM; diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 61728c543eb9..b99f47103056 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -48,7 +48,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, if (irq) return irq; - irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL); + irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT); if (!irq) return ERR_PTR(-ENOMEM); @@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr) * we must be careful not to overrun the array. */ irq_count = READ_ONCE(dist->lpi_list_count); - intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL); + intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL_ACCOUNT); if (!intids) return -ENOMEM; @@ -985,7 +985,7 @@ static int vgic_its_alloc_collection(struct vgic_its *its, if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL)) return E_ITS_MAPC_COLLECTION_OOR; - collection = kzalloc(sizeof(*collection), GFP_KERNEL); + collection = kzalloc(sizeof(*collection), GFP_KERNEL_ACCOUNT); if (!collection) return -ENOMEM; @@ -1029,7 +1029,7 @@ static struct its_ite *vgic_its_alloc_ite(struct its_device *device, { struct its_ite *ite; - ite = kzalloc(sizeof(*ite), GFP_KERNEL); + ite = kzalloc(sizeof(*ite), GFP_KERNEL_ACCOUNT); if (!ite) return ERR_PTR(-ENOMEM); @@ -1150,7 +1150,7 @@ static struct its_device *vgic_its_alloc_device(struct vgic_its *its, { struct its_device *device; - device = kzalloc(sizeof(*device), GFP_KERNEL); + device = kzalloc(sizeof(*device), GFP_KERNEL_ACCOUNT); if (!device) return ERR_PTR(-ENOMEM); @@ -1847,7 +1847,7 @@ void vgic_lpi_translation_cache_init(struct kvm *kvm) struct vgic_translation_cache_entry *cte; /* An allocation failure is not fatal */ - cte = kzalloc(sizeof(*cte), GFP_KERNEL); + cte = kzalloc(sizeof(*cte), GFP_KERNEL_ACCOUNT); if (WARN_ON(!cte)) break; @@ -1888,7 +1888,7 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) if (type != KVM_DEV_TYPE_ARM_VGIC_ITS) return -ENODEV; - its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL); + its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL_ACCOUNT); if (!its) return -ENOMEM; diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index a09cdc0b953c..d3fd8a6c0c9a 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -834,7 +834,7 @@ static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index, if (vgic_v3_rdist_overlap(kvm, base, size)) return -EINVAL; - rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL); + rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL_ACCOUNT); if (!rdreg) return -ENOMEM; diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index c1845d8f5f7e..772dd15a22c7 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -246,7 +246,7 @@ int vgic_v4_init(struct kvm *kvm) nr_vcpus = atomic_read(&kvm->online_vcpus); dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes), - GFP_KERNEL); + GFP_KERNEL_ACCOUNT); if (!dist->its_vm.vpes) return -ENOMEM; -- cgit v1.2.3 From 115bae923ac8bb29ee635e0ed6b4d5a3eec9371e Mon Sep 17 00:00:00 2001 From: Jia He Date: Tue, 7 Sep 2021 20:31:12 +0800 Subject: KVM: arm64: Add memcg accounting to KVM allocations Inspired by commit 254272ce6505 ("kvm: x86: Add memcg accounting to KVM allocations"), it would be better to make arm64 KVM consistent with common kvm codes. The memory allocations of VM scope should be charged into VM process cgroup, hence change GFP_KERNEL to GFP_KERNEL_ACCOUNT. There remain a few cases since these allocations are global, not in VM scope. Signed-off-by: Jia He Reviewed-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210907123112.10232-3-justin.he@arm.com --- arch/arm64/kvm/arm.c | 6 ++++-- arch/arm64/kvm/mmu.c | 2 +- arch/arm64/kvm/pmu-emul.c | 2 +- arch/arm64/kvm/reset.c | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index fe102cd2e518..e3d903e1b7da 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -291,10 +291,12 @@ long kvm_arch_dev_ioctl(struct file *filp, struct kvm *kvm_arch_alloc_vm(void) { + size_t sz = sizeof(struct kvm); + if (!has_vhe()) - return kzalloc(sizeof(struct kvm), GFP_KERNEL); + return kzalloc(sz, GFP_KERNEL_ACCOUNT); - return vzalloc(sizeof(struct kvm)); + return __vmalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM | __GFP_ZERO); } void kvm_arch_free_vm(struct kvm *kvm) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 1a94a7ca48f2..da10996dcdf1 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -512,7 +512,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) return -EINVAL; } - pgt = kzalloc(sizeof(*pgt), GFP_KERNEL); + pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT); if (!pgt) return -ENOMEM; diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 2af3c37445e0..a5e4bbf5e68f 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -978,7 +978,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) mutex_lock(&vcpu->kvm->lock); if (!vcpu->kvm->arch.pmu_filter) { - vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL); + vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT); if (!vcpu->kvm->arch.pmu_filter) { mutex_unlock(&vcpu->kvm->lock); return -ENOMEM; diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 5ce36b0a3343..acad22ebac12 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -106,7 +106,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) vl > SVE_VL_ARCH_MAX)) return -EIO; - buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL); + buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL_ACCOUNT); if (!buf) return -ENOMEM; -- cgit v1.2.3 From a7cc099f2ec3117678adeb69749bef7e9dde3148 Mon Sep 17 00:00:00 2001 From: Andrei Vagin Date: Fri, 15 Oct 2021 09:32:21 -0700 Subject: KVM: x86/mmu: kvm_faultin_pfn has to return false if pfh is returned This looks like a typo in 8f32d5e563cb. This change didn't intend to do any functional changes. The problem was caught by gVisor tests. Fixes: 8f32d5e563cb ("KVM: x86/mmu: allow kvm_faultin_pfn to return page fault handling code") Cc: Maxim Levitsky Cc: Paolo Bonzini Signed-off-by: Andrei Vagin Message-Id: <20211015163221.472508-1-avagin@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 24a9f4c3f5e7..29e7a4bb26e9 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3879,6 +3879,7 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL, fault->write, &fault->map_writable, &fault->hva); + return false; out_retry: *r = RET_PF_RETRY; -- cgit v1.2.3 From 8a049862c38f0c78b0e01ab5d36db1bffc832675 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 13 Oct 2021 13:03:36 +0100 Subject: KVM: arm64: Fix early exit ptrauth handling The previous rework of the early exit code to provide an EC-based decoding tree missed the fact that we have two trap paths for ptrauth: the instructions (EC_PAC) and the sysregs (EC_SYS64). Rework the handlers to call the ptrauth handling code on both paths. Signed-off-by: Marc Zyngier Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Link: https://lore.kernel.org/r/20211013120346.2926621-2-maz@kernel.org --- arch/arm64/kvm/hyp/include/hyp/switch.h | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 481399bf9b94..4126926c3e06 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -282,14 +282,6 @@ static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu) static inline bool esr_is_ptrauth_trap(u32 esr) { - u32 ec = ESR_ELx_EC(esr); - - if (ec == ESR_ELx_EC_PAC) - return true; - - if (ec != ESR_ELx_EC_SYS64) - return false; - switch (esr_sys64_to_sysreg(esr)) { case SYS_APIAKEYLO_EL1: case SYS_APIAKEYHI_EL1: @@ -323,8 +315,7 @@ static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code) struct kvm_cpu_context *ctxt; u64 val; - if (!vcpu_has_ptrauth(vcpu) || - !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu))) + if (!vcpu_has_ptrauth(vcpu)) return false; ctxt = this_cpu_ptr(&kvm_hyp_ctxt); @@ -353,6 +344,9 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) __vgic_v3_perform_cpuif_access(vcpu) == 1) return true; + if (esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu))) + return kvm_hyp_handle_ptrauth(vcpu, exit_code); + return false; } -- cgit v1.2.3 From ce75916749b8cb5ec795f1157a5c426f6765a48c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 13 Oct 2021 13:03:37 +0100 Subject: KVM: arm64: pkvm: Use a single function to expose all id-regs Rather than exposing a whole set of helper functions to retrieve individual ID registers, use the existing decoding tree and expose a single helper instead. This allow a number of functions to be made static, and we now have a single entry point to maintain. Signed-off-by: Marc Zyngier Reviewed-by: Andrew Jones Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Link: https://lore.kernel.org/r/20211013120346.2926621-3-maz@kernel.org --- arch/arm64/kvm/hyp/include/nvhe/sys_regs.h | 14 +---------- arch/arm64/kvm/hyp/nvhe/pkvm.c | 10 ++++---- arch/arm64/kvm/hyp/nvhe/sys_regs.c | 37 ++++++++++++++++-------------- 3 files changed, 26 insertions(+), 35 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/nvhe/sys_regs.h b/arch/arm64/kvm/hyp/include/nvhe/sys_regs.h index 3288128738aa..8adc13227b1a 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/sys_regs.h +++ b/arch/arm64/kvm/hyp/include/nvhe/sys_regs.h @@ -9,19 +9,7 @@ #include -u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu); -u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu); -u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu); -u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu); -u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu); -u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu); -u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu); -u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu); -u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu); -u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu); -u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu); -u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu); - +u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id); bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code); int kvm_check_pvm_sysreg_table(void); void inject_undef64(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 633547cc1033..62377fa8a4cb 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -15,7 +15,7 @@ */ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) { - const u64 feature_ids = get_pvm_id_aa64pfr0(vcpu); + const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1); u64 hcr_set = HCR_RW; u64 hcr_clear = 0; u64 cptr_set = 0; @@ -62,7 +62,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) */ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu) { - const u64 feature_ids = get_pvm_id_aa64pfr1(vcpu); + const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1); u64 hcr_set = 0; u64 hcr_clear = 0; @@ -81,7 +81,7 @@ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu) */ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) { - const u64 feature_ids = get_pvm_id_aa64dfr0(vcpu); + const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1); u64 mdcr_set = 0; u64 mdcr_clear = 0; u64 cptr_set = 0; @@ -125,7 +125,7 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) */ static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu) { - const u64 feature_ids = get_pvm_id_aa64mmfr0(vcpu); + const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1); u64 mdcr_set = 0; /* Trap Debug Communications Channel registers */ @@ -140,7 +140,7 @@ static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu) */ static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu) { - const u64 feature_ids = get_pvm_id_aa64mmfr1(vcpu); + const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1); u64 hcr_set = 0; /* Trap LOR */ diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index 6bde2dc5205c..f125d6a52880 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -82,7 +82,7 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val, * based on allowed features, system features, and KVM support. */ -u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) +static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) { const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm); u64 set_mask = 0; @@ -103,7 +103,7 @@ u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask; } -u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu) +static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu) { const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm); u64 allow_mask = PVM_ID_AA64PFR1_ALLOW; @@ -114,7 +114,7 @@ u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu) return id_aa64pfr1_el1_sys_val & allow_mask; } -u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu) +static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu) { /* * No support for Scalable Vectors, therefore, hyp has no sanitized @@ -124,7 +124,7 @@ u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu) return 0; } -u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu) +static u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu) { /* * No support for debug, including breakpoints, and watchpoints, @@ -134,7 +134,7 @@ u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu) return 0; } -u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu) +static u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu) { /* * No support for debug, therefore, hyp has no sanitized copy of the @@ -144,7 +144,7 @@ u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu) return 0; } -u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu) +static u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu) { /* * No support for implementation defined features, therefore, hyp has no @@ -154,7 +154,7 @@ u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu) return 0; } -u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu) +static u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu) { /* * No support for implementation defined features, therefore, hyp has no @@ -164,12 +164,12 @@ u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu) return 0; } -u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu) +static u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu) { return id_aa64isar0_el1_sys_val & PVM_ID_AA64ISAR0_ALLOW; } -u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu) +static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu) { u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW; @@ -182,7 +182,7 @@ u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu) return id_aa64isar1_el1_sys_val & allow_mask; } -u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu) +static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu) { u64 set_mask; @@ -192,22 +192,19 @@ u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu) return (id_aa64mmfr0_el1_sys_val & PVM_ID_AA64MMFR0_ALLOW) | set_mask; } -u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu) +static u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu) { return id_aa64mmfr1_el1_sys_val & PVM_ID_AA64MMFR1_ALLOW; } -u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu) +static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu) { return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW; } -/* Read a sanitized cpufeature ID register by its sys_reg_desc. */ -static u64 read_id_reg(const struct kvm_vcpu *vcpu, - struct sys_reg_desc const *r) +/* Read a sanitized cpufeature ID register by its encoding */ +u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id) { - u32 id = reg_to_encoding(r); - switch (id) { case SYS_ID_AA64PFR0_EL1: return get_pvm_id_aa64pfr0(vcpu); @@ -245,6 +242,12 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, return 0; } +static u64 read_id_reg(const struct kvm_vcpu *vcpu, + struct sys_reg_desc const *r) +{ + return pvm_read_id_reg(vcpu, reg_to_encoding(r)); +} + /* * Accessor for AArch32 feature id registers. * -- cgit v1.2.3 From 8ffb41888334c1247bd9b4d6ff6c092a90e8d0b8 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 13 Oct 2021 13:03:38 +0100 Subject: KVM: arm64: pkvm: Make the ERR/ERX*_EL1 registers RAZ/WI The ERR*/ERX* registers should be handled as RAZ/WI, and there should be no need to involve EL1 for that. Add a helper that handles such registers, and repaint the sysreg table to declare these registers as RAZ/WI. Signed-off-by: Marc Zyngier Reviewed-by: Andrew Jones Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Link: https://lore.kernel.org/r/20211013120346.2926621-4-maz@kernel.org --- arch/arm64/kvm/hyp/nvhe/sys_regs.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index f125d6a52880..042a1c0be7e0 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -248,6 +248,16 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, return pvm_read_id_reg(vcpu, reg_to_encoding(r)); } +/* Handler to RAZ/WI sysregs */ +static bool pvm_access_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (!p->is_write) + p->regval = 0; + + return true; +} + /* * Accessor for AArch32 feature id registers. * @@ -270,9 +280,7 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu, BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_ELx_64BIT_ONLY); - /* Use 0 for architecturally "unknown" values. */ - p->regval = 0; - return true; + return pvm_access_raz_wi(vcpu, p, r); } /* @@ -301,6 +309,9 @@ static bool pvm_access_id_aarch64(struct kvm_vcpu *vcpu, /* Mark the specified system register as an AArch64 feature id register. */ #define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 } +/* Mark the specified system register as Read-As-Zero/Write-Ignored */ +#define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi } + /* Mark the specified system register as not being handled in hyp. */ #define HOST_HANDLED(REG) { SYS_DESC(REG), .access = NULL } @@ -388,14 +399,14 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = { HOST_HANDLED(SYS_AFSR1_EL1), HOST_HANDLED(SYS_ESR_EL1), - HOST_HANDLED(SYS_ERRIDR_EL1), - HOST_HANDLED(SYS_ERRSELR_EL1), - HOST_HANDLED(SYS_ERXFR_EL1), - HOST_HANDLED(SYS_ERXCTLR_EL1), - HOST_HANDLED(SYS_ERXSTATUS_EL1), - HOST_HANDLED(SYS_ERXADDR_EL1), - HOST_HANDLED(SYS_ERXMISC0_EL1), - HOST_HANDLED(SYS_ERXMISC1_EL1), + RAZ_WI(SYS_ERRIDR_EL1), + RAZ_WI(SYS_ERRSELR_EL1), + RAZ_WI(SYS_ERXFR_EL1), + RAZ_WI(SYS_ERXCTLR_EL1), + RAZ_WI(SYS_ERXSTATUS_EL1), + RAZ_WI(SYS_ERXADDR_EL1), + RAZ_WI(SYS_ERXMISC0_EL1), + RAZ_WI(SYS_ERXMISC1_EL1), HOST_HANDLED(SYS_TFSR_EL1), HOST_HANDLED(SYS_TFSRE0_EL1), -- cgit v1.2.3 From 3c90cb15e2e66bcc526d25133747b2af747f6cd8 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 13 Oct 2021 13:03:39 +0100 Subject: KVM: arm64: pkvm: Drop AArch32-specific registers All the SYS_*32_EL2 registers are AArch32-specific. Since we forbid AArch32, there is no need to handle those in any way. Signed-off-by: Marc Zyngier Reviewed-by: Andrew Jones Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Link: https://lore.kernel.org/r/20211013120346.2926621-5-maz@kernel.org --- arch/arm64/kvm/hyp/nvhe/sys_regs.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index 042a1c0be7e0..e2b3a9e167da 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -452,10 +452,6 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = { HOST_HANDLED(SYS_CNTP_CVAL_EL0), /* Performance Monitoring Registers are restricted. */ - - HOST_HANDLED(SYS_DACR32_EL2), - HOST_HANDLED(SYS_IFSR32_EL2), - HOST_HANDLED(SYS_FPEXC32_EL2), }; /* -- cgit v1.2.3 From f3d5ccabab20c1be5838831f460f320a12e5e2c9 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 13 Oct 2021 13:03:40 +0100 Subject: KVM: arm64: pkvm: Drop sysregs that should never be routed to the host A bunch of system registers (most of them MM related) should never trap to the host under any circumstance. Keep them close to our chest. Signed-off-by: Marc Zyngier Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Link: https://lore.kernel.org/r/20211013120346.2926621-6-maz@kernel.org --- arch/arm64/kvm/hyp/nvhe/sys_regs.c | 50 -------------------------------------- 1 file changed, 50 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index e2b3a9e167da..eb4ee2589316 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -371,34 +371,8 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = { AARCH64(SYS_ID_AA64MMFR1_EL1), AARCH64(SYS_ID_AA64MMFR2_EL1), - HOST_HANDLED(SYS_SCTLR_EL1), - HOST_HANDLED(SYS_ACTLR_EL1), - HOST_HANDLED(SYS_CPACR_EL1), - - HOST_HANDLED(SYS_RGSR_EL1), - HOST_HANDLED(SYS_GCR_EL1), - /* Scalable Vector Registers are restricted. */ - HOST_HANDLED(SYS_TTBR0_EL1), - HOST_HANDLED(SYS_TTBR1_EL1), - HOST_HANDLED(SYS_TCR_EL1), - - HOST_HANDLED(SYS_APIAKEYLO_EL1), - HOST_HANDLED(SYS_APIAKEYHI_EL1), - HOST_HANDLED(SYS_APIBKEYLO_EL1), - HOST_HANDLED(SYS_APIBKEYHI_EL1), - HOST_HANDLED(SYS_APDAKEYLO_EL1), - HOST_HANDLED(SYS_APDAKEYHI_EL1), - HOST_HANDLED(SYS_APDBKEYLO_EL1), - HOST_HANDLED(SYS_APDBKEYHI_EL1), - HOST_HANDLED(SYS_APGAKEYLO_EL1), - HOST_HANDLED(SYS_APGAKEYHI_EL1), - - HOST_HANDLED(SYS_AFSR0_EL1), - HOST_HANDLED(SYS_AFSR1_EL1), - HOST_HANDLED(SYS_ESR_EL1), - RAZ_WI(SYS_ERRIDR_EL1), RAZ_WI(SYS_ERRSELR_EL1), RAZ_WI(SYS_ERXFR_EL1), @@ -408,31 +382,12 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = { RAZ_WI(SYS_ERXMISC0_EL1), RAZ_WI(SYS_ERXMISC1_EL1), - HOST_HANDLED(SYS_TFSR_EL1), - HOST_HANDLED(SYS_TFSRE0_EL1), - - HOST_HANDLED(SYS_FAR_EL1), - HOST_HANDLED(SYS_PAR_EL1), - /* Performance Monitoring Registers are restricted. */ - HOST_HANDLED(SYS_MAIR_EL1), - HOST_HANDLED(SYS_AMAIR_EL1), - /* Limited Ordering Regions Registers are restricted. */ - HOST_HANDLED(SYS_VBAR_EL1), - HOST_HANDLED(SYS_DISR_EL1), - /* GIC CPU Interface registers are restricted. */ - HOST_HANDLED(SYS_CONTEXTIDR_EL1), - HOST_HANDLED(SYS_TPIDR_EL1), - - HOST_HANDLED(SYS_SCXTNUM_EL1), - - HOST_HANDLED(SYS_CNTKCTL_EL1), - HOST_HANDLED(SYS_CCSIDR_EL1), HOST_HANDLED(SYS_CLIDR_EL1), HOST_HANDLED(SYS_CSSELR_EL1), @@ -440,11 +395,6 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = { /* Performance Monitoring Registers are restricted. */ - HOST_HANDLED(SYS_TPIDR_EL0), - HOST_HANDLED(SYS_TPIDRRO_EL0), - - HOST_HANDLED(SYS_SCXTNUM_EL0), - /* Activity Monitoring Registers are restricted. */ HOST_HANDLED(SYS_CNTP_TVAL_EL0), -- cgit v1.2.3 From cbca19738472be8156d854663ed724b01255c932 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 13 Oct 2021 13:03:41 +0100 Subject: KVM: arm64: pkvm: Handle GICv3 traps as required Forward accesses to the ICV_*SGI*_EL1 registers to EL1, and emulate ICV_SRE_EL1 by returning a fixed value. This should be enough to support GICv3 in a protected guest. Signed-off-by: Marc Zyngier Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Link: https://lore.kernel.org/r/20211013120346.2926621-7-maz@kernel.org --- arch/arm64/kvm/hyp/nvhe/sys_regs.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index eb4ee2589316..a341bd8ef252 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -4,6 +4,8 @@ * Author: Fuad Tabba */ +#include + #include #include #include @@ -303,6 +305,17 @@ static bool pvm_access_id_aarch64(struct kvm_vcpu *vcpu, return true; } +static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + /* pVMs only support GICv3. 'nuf said. */ + if (!p->is_write) + p->regval = ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB | ICC_SRE_EL1_SRE; + + return true; +} + /* Mark the specified system register as an AArch32 feature id register. */ #define AARCH32(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch32 } @@ -386,7 +399,10 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = { /* Limited Ordering Regions Registers are restricted. */ - /* GIC CPU Interface registers are restricted. */ + HOST_HANDLED(SYS_ICC_SGI1R_EL1), + HOST_HANDLED(SYS_ICC_ASGI1R_EL1), + HOST_HANDLED(SYS_ICC_SGI0R_EL1), + { SYS_DESC(SYS_ICC_SRE_EL1), .access = pvm_gic_read_sre, }, HOST_HANDLED(SYS_CCSIDR_EL1), HOST_HANDLED(SYS_CLIDR_EL1), -- cgit v1.2.3 From 271b7286058da636ab6f5f47722e098ca3a0478b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 13 Oct 2021 13:03:42 +0100 Subject: KVM: arm64: pkvm: Preserve pending SError on exit from AArch32 Don't drop a potential SError when a guest gets caught red-handed running AArch32 code. Signed-off-by: Marc Zyngier Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Link: https://lore.kernel.org/r/20211013120346.2926621-8-maz@kernel.org --- arch/arm64/kvm/hyp/nvhe/switch.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index f25b6353a598..481c365ef144 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -256,7 +256,8 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code) * protected VMs. */ vcpu->arch.target = -1; - *exit_code = ARM_EXCEPTION_IL; + *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT); + *exit_code |= ARM_EXCEPTION_IL; return false; } -- cgit v1.2.3 From 3061725d162cad0589b012fc6413c9dd0da8f02a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 13 Oct 2021 13:03:43 +0100 Subject: KVM: arm64: pkvm: Consolidate include files kvm_fixed_config.h is pkvm specific, and would be better placed near its users. At the same time, include/nvhe/sys_regs.h is now almost empty. Merge the two into arch/arm64/kvm/hyp/include/nvhe/fixed_config.h. Signed-off-by: Marc Zyngier Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Link: https://lore.kernel.org/r/20211013120346.2926621-9-maz@kernel.org --- arch/arm64/include/asm/kvm_fixed_config.h | 195 ------------------------ arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 200 +++++++++++++++++++++++++ arch/arm64/kvm/hyp/include/nvhe/sys_regs.h | 17 --- arch/arm64/kvm/hyp/nvhe/pkvm.c | 3 +- arch/arm64/kvm/hyp/nvhe/setup.c | 2 +- arch/arm64/kvm/hyp/nvhe/switch.c | 3 +- arch/arm64/kvm/hyp/nvhe/sys_regs.c | 3 +- 7 files changed, 204 insertions(+), 219 deletions(-) delete mode 100644 arch/arm64/include/asm/kvm_fixed_config.h create mode 100644 arch/arm64/kvm/hyp/include/nvhe/fixed_config.h delete mode 100644 arch/arm64/kvm/hyp/include/nvhe/sys_regs.h diff --git a/arch/arm64/include/asm/kvm_fixed_config.h b/arch/arm64/include/asm/kvm_fixed_config.h deleted file mode 100644 index 0ed06923f7e9..000000000000 --- a/arch/arm64/include/asm/kvm_fixed_config.h +++ /dev/null @@ -1,195 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2021 Google LLC - * Author: Fuad Tabba - */ - -#ifndef __ARM64_KVM_FIXED_CONFIG_H__ -#define __ARM64_KVM_FIXED_CONFIG_H__ - -#include - -/* - * This file contains definitions for features to be allowed or restricted for - * guest virtual machines, depending on the mode KVM is running in and on the - * type of guest that is running. - * - * The ALLOW masks represent a bitmask of feature fields that are allowed - * without any restrictions as long as they are supported by the system. - * - * The RESTRICT_UNSIGNED masks, if present, represent unsigned fields for - * features that are restricted to support at most the specified feature. - * - * If a feature field is not present in either, than it is not supported. - * - * The approach taken for protected VMs is to allow features that are: - * - Needed by common Linux distributions (e.g., floating point) - * - Trivial to support, e.g., supporting the feature does not introduce or - * require tracking of additional state in KVM - * - Cannot be trapped or prevent the guest from using anyway - */ - -/* - * Allow for protected VMs: - * - Floating-point and Advanced SIMD - * - Data Independent Timing - */ -#define PVM_ID_AA64PFR0_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64PFR0_FP) | \ - ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD) | \ - ARM64_FEATURE_MASK(ID_AA64PFR0_DIT) \ - ) - -/* - * Restrict to the following *unsigned* features for protected VMs: - * - AArch64 guests only (no support for AArch32 guests): - * AArch32 adds complexity in trap handling, emulation, condition codes, - * etc... - * - RAS (v1) - * Supported by KVM - */ -#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), ID_AA64PFR0_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), ID_AA64PFR0_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL2), ID_AA64PFR0_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL3), ID_AA64PFR0_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), ID_AA64PFR0_RAS_V1) \ - ) - -/* - * Allow for protected VMs: - * - Branch Target Identification - * - Speculative Store Bypassing - */ -#define PVM_ID_AA64PFR1_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64PFR1_BT) | \ - ARM64_FEATURE_MASK(ID_AA64PFR1_SSBS) \ - ) - -/* - * Allow for protected VMs: - * - Mixed-endian - * - Distinction between Secure and Non-secure Memory - * - Mixed-endian at EL0 only - * - Non-context synchronizing exception entry and exit - */ -#define PVM_ID_AA64MMFR0_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR0_SNSMEM) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL0) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR0_EXS) \ - ) - -/* - * Restrict to the following *unsigned* features for protected VMs: - * - 40-bit IPA - * - 16-bit ASID - */ -#define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_PARANGE), ID_AA64MMFR0_PARANGE_40) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_ASID), ID_AA64MMFR0_ASID_16) \ - ) - -/* - * Allow for protected VMs: - * - Hardware translation table updates to Access flag and Dirty state - * - Number of VMID bits from CPU - * - Hierarchical Permission Disables - * - Privileged Access Never - * - SError interrupt exceptions from speculative reads - * - Enhanced Translation Synchronization - */ -#define PVM_ID_AA64MMFR1_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_VMIDBITS) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_HPD) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_PAN) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_SPECSEI) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_ETS) \ - ) - -/* - * Allow for protected VMs: - * - Common not Private translations - * - User Access Override - * - IESB bit in the SCTLR_ELx registers - * - Unaligned single-copy atomicity and atomic functions - * - ESR_ELx.EC value on an exception by read access to feature ID space - * - TTL field in address operations. - * - Break-before-make sequences when changing translation block size - * - E0PDx mechanism - */ -#define PVM_ID_AA64MMFR2_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR2_CNP) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_UAO) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_IESB) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_AT) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_IDS) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_TTL) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_BBM) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_E0PD) \ - ) - -/* - * No support for Scalable Vectors for protected VMs: - * Requires additional support from KVM, e.g., context-switching and - * trapping at EL2 - */ -#define PVM_ID_AA64ZFR0_ALLOW (0ULL) - -/* - * No support for debug, including breakpoints, and watchpoints for protected - * VMs: - * The Arm architecture mandates support for at least the Armv8 debug - * architecture, which would include at least 2 hardware breakpoints and - * watchpoints. Providing that support to protected guests adds - * considerable state and complexity. Therefore, the reserved value of 0 is - * used for debug-related fields. - */ -#define PVM_ID_AA64DFR0_ALLOW (0ULL) -#define PVM_ID_AA64DFR1_ALLOW (0ULL) - -/* - * No support for implementation defined features. - */ -#define PVM_ID_AA64AFR0_ALLOW (0ULL) -#define PVM_ID_AA64AFR1_ALLOW (0ULL) - -/* - * No restrictions on instructions implemented in AArch64. - */ -#define PVM_ID_AA64ISAR0_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64ISAR0_AES) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA1) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA2) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_CRC32) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMICS) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_RDM) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA3) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_SM3) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_SM4) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_DP) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_FHM) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_TS) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_TLB) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_RNDR) \ - ) - -#define PVM_ID_AA64ISAR1_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64ISAR1_DPB) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_API) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_JSCVT) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_FCMA) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_LRCPC) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_FRINTTS) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_SB) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_SPECRES) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_BF16) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_DGH) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_I8MM) \ - ) - -#endif /* __ARM64_KVM_FIXED_CONFIG_H__ */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h new file mode 100644 index 000000000000..747fc79ae784 --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -0,0 +1,200 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Google LLC + * Author: Fuad Tabba + */ + +#ifndef __ARM64_KVM_FIXED_CONFIG_H__ +#define __ARM64_KVM_FIXED_CONFIG_H__ + +#include + +/* + * This file contains definitions for features to be allowed or restricted for + * guest virtual machines, depending on the mode KVM is running in and on the + * type of guest that is running. + * + * The ALLOW masks represent a bitmask of feature fields that are allowed + * without any restrictions as long as they are supported by the system. + * + * The RESTRICT_UNSIGNED masks, if present, represent unsigned fields for + * features that are restricted to support at most the specified feature. + * + * If a feature field is not present in either, than it is not supported. + * + * The approach taken for protected VMs is to allow features that are: + * - Needed by common Linux distributions (e.g., floating point) + * - Trivial to support, e.g., supporting the feature does not introduce or + * require tracking of additional state in KVM + * - Cannot be trapped or prevent the guest from using anyway + */ + +/* + * Allow for protected VMs: + * - Floating-point and Advanced SIMD + * - Data Independent Timing + */ +#define PVM_ID_AA64PFR0_ALLOW (\ + ARM64_FEATURE_MASK(ID_AA64PFR0_FP) | \ + ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD) | \ + ARM64_FEATURE_MASK(ID_AA64PFR0_DIT) \ + ) + +/* + * Restrict to the following *unsigned* features for protected VMs: + * - AArch64 guests only (no support for AArch32 guests): + * AArch32 adds complexity in trap handling, emulation, condition codes, + * etc... + * - RAS (v1) + * Supported by KVM + */ +#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), ID_AA64PFR0_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), ID_AA64PFR0_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL2), ID_AA64PFR0_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL3), ID_AA64PFR0_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), ID_AA64PFR0_RAS_V1) \ + ) + +/* + * Allow for protected VMs: + * - Branch Target Identification + * - Speculative Store Bypassing + */ +#define PVM_ID_AA64PFR1_ALLOW (\ + ARM64_FEATURE_MASK(ID_AA64PFR1_BT) | \ + ARM64_FEATURE_MASK(ID_AA64PFR1_SSBS) \ + ) + +/* + * Allow for protected VMs: + * - Mixed-endian + * - Distinction between Secure and Non-secure Memory + * - Mixed-endian at EL0 only + * - Non-context synchronizing exception entry and exit + */ +#define PVM_ID_AA64MMFR0_ALLOW (\ + ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_SNSMEM) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL0) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_EXS) \ + ) + +/* + * Restrict to the following *unsigned* features for protected VMs: + * - 40-bit IPA + * - 16-bit ASID + */ +#define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_PARANGE), ID_AA64MMFR0_PARANGE_40) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_ASID), ID_AA64MMFR0_ASID_16) \ + ) + +/* + * Allow for protected VMs: + * - Hardware translation table updates to Access flag and Dirty state + * - Number of VMID bits from CPU + * - Hierarchical Permission Disables + * - Privileged Access Never + * - SError interrupt exceptions from speculative reads + * - Enhanced Translation Synchronization + */ +#define PVM_ID_AA64MMFR1_ALLOW (\ + ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_VMIDBITS) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_HPD) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_PAN) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_SPECSEI) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_ETS) \ + ) + +/* + * Allow for protected VMs: + * - Common not Private translations + * - User Access Override + * - IESB bit in the SCTLR_ELx registers + * - Unaligned single-copy atomicity and atomic functions + * - ESR_ELx.EC value on an exception by read access to feature ID space + * - TTL field in address operations. + * - Break-before-make sequences when changing translation block size + * - E0PDx mechanism + */ +#define PVM_ID_AA64MMFR2_ALLOW (\ + ARM64_FEATURE_MASK(ID_AA64MMFR2_CNP) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_UAO) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_IESB) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_AT) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_IDS) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_TTL) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_BBM) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_E0PD) \ + ) + +/* + * No support for Scalable Vectors for protected VMs: + * Requires additional support from KVM, e.g., context-switching and + * trapping at EL2 + */ +#define PVM_ID_AA64ZFR0_ALLOW (0ULL) + +/* + * No support for debug, including breakpoints, and watchpoints for protected + * VMs: + * The Arm architecture mandates support for at least the Armv8 debug + * architecture, which would include at least 2 hardware breakpoints and + * watchpoints. Providing that support to protected guests adds + * considerable state and complexity. Therefore, the reserved value of 0 is + * used for debug-related fields. + */ +#define PVM_ID_AA64DFR0_ALLOW (0ULL) +#define PVM_ID_AA64DFR1_ALLOW (0ULL) + +/* + * No support for implementation defined features. + */ +#define PVM_ID_AA64AFR0_ALLOW (0ULL) +#define PVM_ID_AA64AFR1_ALLOW (0ULL) + +/* + * No restrictions on instructions implemented in AArch64. + */ +#define PVM_ID_AA64ISAR0_ALLOW (\ + ARM64_FEATURE_MASK(ID_AA64ISAR0_AES) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA1) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA2) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_CRC32) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMICS) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_RDM) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA3) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_SM3) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_SM4) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_DP) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_FHM) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_TS) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_TLB) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_RNDR) \ + ) + +#define PVM_ID_AA64ISAR1_ALLOW (\ + ARM64_FEATURE_MASK(ID_AA64ISAR1_DPB) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_API) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_JSCVT) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_FCMA) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_LRCPC) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_FRINTTS) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_SB) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_SPECRES) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_BF16) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_DGH) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR1_I8MM) \ + ) + +u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id); +bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code); +int kvm_check_pvm_sysreg_table(void); +void inject_undef64(struct kvm_vcpu *vcpu); + +#endif /* __ARM64_KVM_FIXED_CONFIG_H__ */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/sys_regs.h b/arch/arm64/kvm/hyp/include/nvhe/sys_regs.h deleted file mode 100644 index 8adc13227b1a..000000000000 --- a/arch/arm64/kvm/hyp/include/nvhe/sys_regs.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2021 Google LLC - * Author: Fuad Tabba - */ - -#ifndef __ARM64_KVM_NVHE_SYS_REGS_H__ -#define __ARM64_KVM_NVHE_SYS_REGS_H__ - -#include - -u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id); -bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code); -int kvm_check_pvm_sysreg_table(void); -void inject_undef64(struct kvm_vcpu *vcpu); - -#endif /* __ARM64_KVM_NVHE_SYS_REGS_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 62377fa8a4cb..99c8d8b73e70 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -6,8 +6,7 @@ #include #include -#include -#include +#include #include /* diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index c85ff64e63f2..862c7b514e20 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -10,11 +10,11 @@ #include #include +#include #include #include #include #include -#include #include struct hyp_pool hpool; diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 481c365ef144..317dba6a018d 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -28,8 +27,8 @@ #include #include +#include #include -#include /* Non-VHE specific context */ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index a341bd8ef252..052f885e65b2 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -7,12 +7,11 @@ #include #include -#include #include #include -#include +#include #include "../../sys_regs.h" -- cgit v1.2.3 From 746bdeadc53b0d58fddea6442591f5ec3eeabe7d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 13 Oct 2021 13:03:44 +0100 Subject: KVM: arm64: pkvm: Move kvm_handle_pvm_restricted around Place kvm_handle_pvm_restricted() next to its little friends such as kvm_handle_pvm_sysreg(). This allows to make inject_undef64() static. Signed-off-by: Marc Zyngier Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Link: https://lore.kernel.org/r/20211013120346.2926621-10-maz@kernel.org --- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 2 +- arch/arm64/kvm/hyp/nvhe/switch.c | 12 ------------ arch/arm64/kvm/hyp/nvhe/sys_regs.c | 14 +++++++++++++- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 747fc79ae784..eea1f6a53723 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -194,7 +194,7 @@ u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id); bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code); +bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code); int kvm_check_pvm_sysreg_table(void); -void inject_undef64(struct kvm_vcpu *vcpu); #endif /* __ARM64_KVM_FIXED_CONFIG_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 317dba6a018d..be6889e33b2b 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -159,18 +159,6 @@ static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) write_sysreg(pmu->events_host, pmcntenset_el0); } -/** - * Handler for protected VM restricted exceptions. - * - * Inject an undefined exception into the guest and return true to indicate that - * the hypervisor has handled the exit, and control should go back to the guest. - */ -static bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code) -{ - inject_undef64(vcpu); - return true; -} - /** * Handler for protected VM MSR, MRS or System instruction execution in AArch64. * diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index 052f885e65b2..3787ee6fb1a2 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -30,7 +30,7 @@ u64 id_aa64mmfr2_el1_sys_val; * Inject an unknown/undefined exception to an AArch64 guest while most of its * sysregs are live. */ -void inject_undef64(struct kvm_vcpu *vcpu) +static void inject_undef64(struct kvm_vcpu *vcpu) { u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); @@ -473,3 +473,15 @@ bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) return true; } + +/** + * Handler for protected VM restricted exceptions. + * + * Inject an undefined exception into the guest and return true to indicate that + * the hypervisor has handled the exit, and control should go back to the guest. + */ +bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + inject_undef64(vcpu); + return true; +} -- cgit v1.2.3 From 0c7639cc838263b6e38b3af76755d574f15cdf41 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 13 Oct 2021 13:03:45 +0100 Subject: KVM: arm64: pkvm: Pass vpcu instead of kvm to kvm_get_exit_handler_array() Passing a VM pointer around is odd, and results in extra work on VHE. Follow the rest of the design that uses the vcpu instead, and let the nVHE code look into the struct kvm as required. Signed-off-by: Marc Zyngier Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Link: https://lore.kernel.org/r/20211013120346.2926621-11-maz@kernel.org --- arch/arm64/kvm/hyp/include/hyp/switch.h | 4 ++-- arch/arm64/kvm/hyp/nvhe/switch.c | 4 ++-- arch/arm64/kvm/hyp/vhe/switch.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 4126926c3e06..c6e98c7e918b 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -397,7 +397,7 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *); -static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm); +static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu); /* * Allow the hypervisor to handle the exit with an exit handler if it has one. @@ -407,7 +407,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm); */ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code) { - const exit_handler_fn *handlers = kvm_get_exit_handler_array(kern_hyp_va(vcpu->kvm)); + const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu); exit_handler_fn fn; fn = handlers[kvm_vcpu_trap_get_class(vcpu)]; diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index be6889e33b2b..50c7d48e0fa0 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -211,9 +211,9 @@ static const exit_handler_fn pvm_exit_handlers[] = { [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; -static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm) +static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) { - if (unlikely(kvm_vm_is_protected(kvm))) + if (unlikely(kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)))) return pvm_exit_handlers; return hyp_exit_handlers; diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index f6fb97accf65..5a2cb5d9bc4b 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -107,7 +107,7 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; -static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm) +static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) { return hyp_exit_handlers; } -- cgit v1.2.3 From 07305590114af81817148d181f1eb0af294e40d6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 13 Oct 2021 13:03:46 +0100 Subject: KVM: arm64: pkvm: Give priority to standard traps over pvm handling Checking for pvm handling first means that we cannot handle ptrauth traps or apply any of the workarounds (GICv3 or TX2 #219). Flip the order around. Signed-off-by: Marc Zyngier Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Link: https://lore.kernel.org/r/20211013120346.2926621-12-maz@kernel.org --- arch/arm64/kvm/hyp/nvhe/switch.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 50c7d48e0fa0..c0e3fed26d93 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -167,10 +167,13 @@ static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) */ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code) { - if (kvm_handle_pvm_sysreg(vcpu, exit_code)) - return true; - - return kvm_hyp_handle_sysreg(vcpu, exit_code); + /* + * Make sure we handle the exit for workarounds and ptrauth + * before the pKVM handling, as the latter could decide to + * UNDEF. + */ + return (kvm_hyp_handle_sysreg(vcpu, exit_code) || + kvm_handle_pvm_sysreg(vcpu, exit_code)); } /** -- cgit v1.2.3 From 3d5e7a28b1ea2d603dea478e58e37ce75b9597ab Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 15 Oct 2021 04:50:01 -0400 Subject: KVM: x86: avoid warning with -Wbitwise-instead-of-logical This is a new warning in clang top-of-tree (will be clang 14): In file included from arch/x86/kvm/mmu/mmu.c:27: arch/x86/kvm/mmu/spte.h:318:9: error: use of bitwise '|' with boolean operands [-Werror,-Wbitwise-instead-of-logical] return __is_bad_mt_xwr(rsvd_check, spte) | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ || arch/x86/kvm/mmu/spte.h:318:9: note: cast one or both operands to int to silence this warning The code is fine, but change it anyway to shut up this clever clogs of a compiler. Reported-by: torvic9@mailbox.org Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/spte.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 7c0b09461349..fcda5abc7dc0 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -310,12 +310,7 @@ static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, u64 spte, int level) { - /* - * Use a bitwise-OR instead of a logical-OR to aggregate the reserved - * bits and EPT's invalid memtype/XWR checks to avoid an extra Jcc - * (this is extremely unlikely to be short-circuited as true). - */ - return __is_bad_mt_xwr(rsvd_check, spte) | + return __is_bad_mt_xwr(rsvd_check, spte) || __is_rsvd_bits_set(rsvd_check, spte, level); } -- cgit v1.2.3 From c68dc1b577eabd5605c6c7c08f3e07ae18d30d5d Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 16 Sep 2021 18:15:35 +0000 Subject: KVM: x86: Report host tsc and realtime values in KVM_GET_CLOCK Handling the migration of TSCs correctly is difficult, in part because Linux does not provide userspace with the ability to retrieve a (TSC, realtime) clock pair for a single instant in time. In lieu of a more convenient facility, KVM can report similar information in the kvm_clock structure. Provide userspace with a host TSC & realtime pair iff the realtime clock is based on the TSC. If userspace provides KVM_SET_CLOCK with a valid realtime value, advance the KVM clock by the amount of elapsed time. Do not step the KVM clock backwards, though, as it is a monotonic oscillator. Suggested-by: Paolo Bonzini Signed-off-by: Oliver Upton Signed-off-by: Paolo Bonzini Message-Id: <20210916181538.968978-5-oupton@google.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 48 +++++++++++++++++++++++++++++++++-------- arch/x86/include/asm/kvm_host.h | 3 +++ arch/x86/kvm/x86.c | 47 ++++++++++++++++++++++++++++------------ include/uapi/linux/kvm.h | 7 +++++- 4 files changed, 81 insertions(+), 24 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 0c0bf26426b3..3b093d6dbe22 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -1010,20 +1010,37 @@ such as migration. When KVM_CAP_ADJUST_CLOCK is passed to KVM_CHECK_EXTENSION, it returns the set of bits that KVM can return in struct kvm_clock_data's flag member. -The only flag defined now is KVM_CLOCK_TSC_STABLE. If set, the returned -value is the exact kvmclock value seen by all VCPUs at the instant -when KVM_GET_CLOCK was called. If clear, the returned value is simply -CLOCK_MONOTONIC plus a constant offset; the offset can be modified -with KVM_SET_CLOCK. KVM will try to make all VCPUs follow this clock, -but the exact value read by each VCPU could differ, because the host -TSC is not stable. +The following flags are defined: + +KVM_CLOCK_TSC_STABLE + If set, the returned value is the exact kvmclock + value seen by all VCPUs at the instant when KVM_GET_CLOCK was called. + If clear, the returned value is simply CLOCK_MONOTONIC plus a constant + offset; the offset can be modified with KVM_SET_CLOCK. KVM will try + to make all VCPUs follow this clock, but the exact value read by each + VCPU could differ, because the host TSC is not stable. + +KVM_CLOCK_REALTIME + If set, the `realtime` field in the kvm_clock_data + structure is populated with the value of the host's real time + clocksource at the instant when KVM_GET_CLOCK was called. If clear, + the `realtime` field does not contain a value. + +KVM_CLOCK_HOST_TSC + If set, the `host_tsc` field in the kvm_clock_data + structure is populated with the value of the host's timestamp counter (TSC) + at the instant when KVM_GET_CLOCK was called. If clear, the `host_tsc` field + does not contain a value. :: struct kvm_clock_data { __u64 clock; /* kvmclock current value */ __u32 flags; - __u32 pad[9]; + __u32 pad0; + __u64 realtime; + __u64 host_tsc; + __u32 pad[4]; }; @@ -1040,12 +1057,25 @@ Sets the current timestamp of kvmclock to the value specified in its parameter. In conjunction with KVM_GET_CLOCK, it is used to ensure monotonicity on scenarios such as migration. +The following flags can be passed: + +KVM_CLOCK_REALTIME + If set, KVM will compare the value of the `realtime` field + with the value of the host's real time clocksource at the instant when + KVM_SET_CLOCK was called. The difference in elapsed time is added to the final + kvmclock value that will be provided to guests. + +Other flags returned by ``KVM_GET_CLOCK`` are accepted but ignored. + :: struct kvm_clock_data { __u64 clock; /* kvmclock current value */ __u32 flags; - __u32 pad[9]; + __u32 pad0; + __u64 realtime; + __u64 host_tsc; + __u32 pad[4]; }; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 5271fce6cd65..8b16fa504cd4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1942,4 +1942,7 @@ int kvm_cpu_dirty_log_size(void); int alloc_all_memslots_rmaps(struct kvm *kvm); +#define KVM_CLOCK_VALID_FLAGS \ + (KVM_CLOCK_TSC_STABLE | KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC) + #endif /* _ASM_X86_KVM_HOST_H */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3ea4f6ef2474..d3631d149187 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2787,6 +2787,7 @@ static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) struct pvclock_vcpu_time_info hv_clock; unsigned long flags; + data->flags = 0; spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); if (!ka->use_master_clock) { spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); @@ -2803,10 +2804,20 @@ static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) get_cpu(); if (__this_cpu_read(cpu_tsc_khz)) { +#ifdef CONFIG_X86_64 + struct timespec64 ts; + + if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) { + data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec; + data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC; + } else +#endif + data->host_tsc = rdtsc(); + kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, &hv_clock.tsc_shift, &hv_clock.tsc_to_system_mul); - data->clock = __pvclock_read_cycles(&hv_clock, rdtsc()); + data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc); } else { data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset; } @@ -2818,12 +2829,6 @@ u64 get_kvmclock_ns(struct kvm *kvm) { struct kvm_clock_data data; - /* - * Zero flags as it's accessed RMW, leave everything else uninitialized - * as clock is always written and no other fields are consumed. - */ - data.flags = 0; - get_kvmclock(kvm, &data); return data.clock; } @@ -4050,7 +4055,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = KVM_SYNC_X86_VALID_FIELDS; break; case KVM_CAP_ADJUST_CLOCK: - r = KVM_CLOCK_TSC_STABLE; + r = KVM_CLOCK_VALID_FLAGS; break; case KVM_CAP_X86_DISABLE_EXITS: r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE | @@ -5847,12 +5852,16 @@ static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp) { struct kvm_arch *ka = &kvm->arch; struct kvm_clock_data data; - u64 now_ns; + u64 now_raw_ns; if (copy_from_user(&data, argp, sizeof(data))) return -EFAULT; - if (data.flags) + /* + * Only KVM_CLOCK_REALTIME is used, but allow passing the + * result of KVM_GET_CLOCK back to KVM_SET_CLOCK. + */ + if (data.flags & ~KVM_CLOCK_VALID_FLAGS) return -EINVAL; kvm_hv_invalidate_tsc_page(kvm); @@ -5866,11 +5875,21 @@ static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp) * is slightly ahead) here we risk going negative on unsigned * 'system_time' when 'data.clock' is very small. */ - if (kvm->arch.use_master_clock) - now_ns = ka->master_kernel_ns; + if (data.flags & KVM_CLOCK_REALTIME) { + u64 now_real_ns = ktime_get_real_ns(); + + /* + * Avoid stepping the kvmclock backwards. + */ + if (now_real_ns > data.realtime) + data.clock += now_real_ns - data.realtime; + } + + if (ka->use_master_clock) + now_raw_ns = ka->master_kernel_ns; else - now_ns = get_kvmclock_base_ns(); - ka->kvmclock_offset = data.clock - now_ns; + now_raw_ns = get_kvmclock_base_ns(); + ka->kvmclock_offset = data.clock - now_raw_ns; kvm_end_pvclock_update(kvm); return 0; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 322b4b588d75..5ca5ffe16cb4 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1231,11 +1231,16 @@ struct kvm_irqfd { /* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */ #define KVM_CLOCK_TSC_STABLE 2 +#define KVM_CLOCK_REALTIME (1 << 2) +#define KVM_CLOCK_HOST_TSC (1 << 3) struct kvm_clock_data { __u64 clock; __u32 flags; - __u32 pad[9]; + __u32 pad0; + __u64 realtime; + __u64 host_tsc; + __u32 pad[4]; }; /* For KVM_CAP_SW_TLB */ -- cgit v1.2.3 From 869b44211adc878be7149cc4ae57207f924f7390 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 16 Sep 2021 18:15:36 +0000 Subject: kvm: x86: protect masterclock with a seqcount Protect the reference point for kvmclock with a seqcount, so that kvmclock updates for all vCPUs can proceed in parallel. Xen runstate updates will also run in parallel and not bounce the kvmclock cacheline. Of the variables that were protected by pvclock_gtod_sync_lock, nr_vcpus_matched_tsc is different because it is updated outside pvclock_update_vm_gtod_copy and read inside it. Therefore, we need to keep it protected by a spinlock. In fact it must now be a raw spinlock, because pvclock_update_vm_gtod_copy, being the write-side of a seqcount, is non-preemptible. Since we already have tsc_write_lock which is a raw spinlock, we can just use tsc_write_lock as the lock that protects the write-side of the seqcount. Co-developed-by: Oliver Upton Message-Id: <20210916181538.968978-6-oupton@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 7 +++- arch/x86/kvm/x86.c | 82 ++++++++++++++++++++++------------------- 2 files changed, 51 insertions(+), 38 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8b16fa504cd4..68ac06fef4fa 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1086,6 +1086,11 @@ struct kvm_arch { unsigned long irq_sources_bitmap; s64 kvmclock_offset; + + /* + * This also protects nr_vcpus_matched_tsc which is read from a + * preemption-disabled region, so it must be a raw spinlock. + */ raw_spinlock_t tsc_write_lock; u64 last_tsc_nsec; u64 last_tsc_write; @@ -1096,7 +1101,7 @@ struct kvm_arch { u64 cur_tsc_generation; int nr_vcpus_matched_tsc; - spinlock_t pvclock_gtod_sync_lock; + seqcount_raw_spinlock_t pvclock_sc; bool use_master_clock; u64 master_kernel_ns; u64 master_cycle_now; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d3631d149187..d7588f6c90c8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2521,9 +2521,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; kvm_vcpu_write_tsc_offset(vcpu, offset); - raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); - spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags); if (!matched) { kvm->arch.nr_vcpus_matched_tsc = 0; } else if (!already_matched) { @@ -2531,7 +2529,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) } kvm_track_tsc_matching(vcpu); - spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags); + raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); } static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, @@ -2719,6 +2717,7 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm) int vclock_mode; bool host_tsc_clocksource, vcpus_matched; + lockdep_assert_held(&kvm->arch.tsc_write_lock); vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&kvm->online_vcpus)); @@ -2748,14 +2747,18 @@ static void kvm_make_mclock_inprogress_request(struct kvm *kvm) kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); } -static void kvm_start_pvclock_update(struct kvm *kvm) +static void __kvm_start_pvclock_update(struct kvm *kvm) { - struct kvm_arch *ka = &kvm->arch; + raw_spin_lock_irq(&kvm->arch.tsc_write_lock); + write_seqcount_begin(&kvm->arch.pvclock_sc); +} +static void kvm_start_pvclock_update(struct kvm *kvm) +{ kvm_make_mclock_inprogress_request(kvm); /* no guest entries from this point */ - spin_lock_irq(&ka->pvclock_gtod_sync_lock); + __kvm_start_pvclock_update(kvm); } static void kvm_end_pvclock_update(struct kvm *kvm) @@ -2764,7 +2767,8 @@ static void kvm_end_pvclock_update(struct kvm *kvm) struct kvm_vcpu *vcpu; int i; - spin_unlock_irq(&ka->pvclock_gtod_sync_lock); + write_seqcount_end(&ka->pvclock_sc); + raw_spin_unlock_irq(&ka->tsc_write_lock); kvm_for_each_vcpu(i, vcpu, kvm) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); @@ -2781,29 +2785,17 @@ static void kvm_update_masterclock(struct kvm *kvm) kvm_end_pvclock_update(kvm); } -static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) +/* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */ +static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) { struct kvm_arch *ka = &kvm->arch; struct pvclock_vcpu_time_info hv_clock; - unsigned long flags; - - data->flags = 0; - spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); - if (!ka->use_master_clock) { - spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); - data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset; - return; - } - - data->flags |= KVM_CLOCK_TSC_STABLE; - hv_clock.tsc_timestamp = ka->master_cycle_now; - hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; - spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); /* both __this_cpu_read() and rdtsc() should be on the same cpu */ get_cpu(); - if (__this_cpu_read(cpu_tsc_khz)) { + data->flags = 0; + if (ka->use_master_clock && __this_cpu_read(cpu_tsc_khz)) { #ifdef CONFIG_X86_64 struct timespec64 ts; @@ -2814,6 +2806,9 @@ static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) #endif data->host_tsc = rdtsc(); + data->flags |= KVM_CLOCK_TSC_STABLE; + hv_clock.tsc_timestamp = ka->master_cycle_now; + hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, &hv_clock.tsc_shift, &hv_clock.tsc_to_system_mul); @@ -2825,6 +2820,17 @@ static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) put_cpu(); } +static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) +{ + struct kvm_arch *ka = &kvm->arch; + unsigned seq; + + do { + seq = read_seqcount_begin(&ka->pvclock_sc); + __get_kvmclock(kvm, data); + } while (read_seqcount_retry(&ka->pvclock_sc, seq)); +} + u64 get_kvmclock_ns(struct kvm *kvm) { struct kvm_clock_data data; @@ -2895,6 +2901,7 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v, static int kvm_guest_time_update(struct kvm_vcpu *v) { unsigned long flags, tgt_tsc_khz; + unsigned seq; struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_arch *ka = &v->kvm->arch; s64 kernel_ns; @@ -2909,13 +2916,14 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ - spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); - use_master_clock = ka->use_master_clock; - if (use_master_clock) { - host_tsc = ka->master_cycle_now; - kernel_ns = ka->master_kernel_ns; - } - spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); + do { + seq = read_seqcount_begin(&ka->pvclock_sc); + use_master_clock = ka->use_master_clock; + if (use_master_clock) { + host_tsc = ka->master_cycle_now; + kernel_ns = ka->master_kernel_ns; + } + } while (read_seqcount_retry(&ka->pvclock_sc, seq)); /* Keep irq disabled to prevent changes to the clock */ local_irq_save(flags); @@ -5838,9 +5846,8 @@ int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state) static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp) { - struct kvm_clock_data data; + struct kvm_clock_data data = { 0 }; - memset(&data, 0, sizeof(data)); get_kvmclock(kvm, &data); if (copy_to_user(argp, &data, sizeof(data))) return -EFAULT; @@ -8153,9 +8160,7 @@ static void kvm_hyperv_tsc_notifier(void) kvm_max_guest_tsc_khz = tsc_khz; list_for_each_entry(kvm, &vm_list, vm_list) { - struct kvm_arch *ka = &kvm->arch; - - spin_lock_irq(&ka->pvclock_gtod_sync_lock); + __kvm_start_pvclock_update(kvm); pvclock_update_vm_gtod_copy(kvm); kvm_end_pvclock_update(kvm); } @@ -11156,6 +11161,7 @@ void kvm_arch_free_vm(struct kvm *kvm) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret; + unsigned long flags; if (type) return -EINVAL; @@ -11179,10 +11185,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) raw_spin_lock_init(&kvm->arch.tsc_write_lock); mutex_init(&kvm->arch.apic_map_lock); - spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); - + seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock); kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); + + raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); pvclock_update_vm_gtod_copy(kvm); + raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); kvm->arch.guest_can_read_msr_platform_info = true; -- cgit v1.2.3 From 58d4277be9b66d8048054c8e57214a86b6b15da9 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 16 Sep 2021 18:15:37 +0000 Subject: KVM: x86: Refactor tsc synchronization code Refactor kvm_synchronize_tsc to make a new function that allows callers to specify TSC parameters (offset, value, nanoseconds, etc.) explicitly for the sake of participating in TSC synchronization. Signed-off-by: Oliver Upton Message-Id: <20210916181538.968978-7-oupton@google.com> [Make sure kvm->arch.cur_tsc_generation and vcpu->arch.this_tsc_generation are equal at the end of __kvm_synchronize_tsc, if matched is false. Reported by Maxim Levitsky. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 95 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 53 insertions(+), 42 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d7588f6c90c8..c74a44f2a38c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2435,13 +2435,63 @@ static inline bool kvm_check_tsc_unstable(void) return check_tsc_unstable(); } +/* + * Infers attempts to synchronize the guest's tsc from host writes. Sets the + * offset for the vcpu and tracks the TSC matching generation that the vcpu + * participates in. + */ +static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, + u64 ns, bool matched) +{ + struct kvm *kvm = vcpu->kvm; + + lockdep_assert_held(&kvm->arch.tsc_write_lock); + + /* + * We also track th most recent recorded KHZ, write and time to + * allow the matching interval to be extended at each write. + */ + kvm->arch.last_tsc_nsec = ns; + kvm->arch.last_tsc_write = tsc; + kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; + + vcpu->arch.last_guest_tsc = tsc; + + kvm_vcpu_write_tsc_offset(vcpu, offset); + + if (!matched) { + /* + * We split periods of matched TSC writes into generations. + * For each generation, we track the original measured + * nanosecond time, offset, and write, so if TSCs are in + * sync, we can match exact offset, and if not, we can match + * exact software computation in compute_guest_tsc() + * + * These values are tracked in kvm->arch.cur_xxx variables. + */ + kvm->arch.cur_tsc_generation++; + kvm->arch.cur_tsc_nsec = ns; + kvm->arch.cur_tsc_write = tsc; + kvm->arch.cur_tsc_offset = offset; + kvm->arch.nr_vcpus_matched_tsc = 0; + } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { + kvm->arch.nr_vcpus_matched_tsc++; + } + + /* Keep track of which generation this VCPU has synchronized to */ + vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; + vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; + vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; + + kvm_track_tsc_matching(vcpu); +} + static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; u64 offset, ns, elapsed; unsigned long flags; - bool matched; - bool already_matched; + bool matched = false; bool synchronizing = false; raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); @@ -2487,48 +2537,9 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) offset = kvm_compute_l1_tsc_offset(vcpu, data); } matched = true; - already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); - } else { - /* - * We split periods of matched TSC writes into generations. - * For each generation, we track the original measured - * nanosecond time, offset, and write, so if TSCs are in - * sync, we can match exact offset, and if not, we can match - * exact software computation in compute_guest_tsc() - * - * These values are tracked in kvm->arch.cur_xxx variables. - */ - kvm->arch.cur_tsc_generation++; - kvm->arch.cur_tsc_nsec = ns; - kvm->arch.cur_tsc_write = data; - kvm->arch.cur_tsc_offset = offset; - matched = false; - } - - /* - * We also track th most recent recorded KHZ, write and time to - * allow the matching interval to be extended at each write. - */ - kvm->arch.last_tsc_nsec = ns; - kvm->arch.last_tsc_write = data; - kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; - - vcpu->arch.last_guest_tsc = data; - - /* Keep track of which generation this VCPU has synchronized to */ - vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; - vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; - vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; - - kvm_vcpu_write_tsc_offset(vcpu, offset); - - if (!matched) { - kvm->arch.nr_vcpus_matched_tsc = 0; - } else if (!already_matched) { - kvm->arch.nr_vcpus_matched_tsc++; } - kvm_track_tsc_matching(vcpu); + __kvm_synchronize_tsc(vcpu, offset, data, ns, matched); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); } -- cgit v1.2.3 From 828ca89628bfcb1b8f27535025f69dd00eb55207 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 16 Sep 2021 18:15:38 +0000 Subject: KVM: x86: Expose TSC offset controls to userspace To date, VMM-directed TSC synchronization and migration has been a bit messy. KVM has some baked-in heuristics around TSC writes to infer if the VMM is attempting to synchronize. This is problematic, as it depends on host userspace writing to the guest's TSC within 1 second of the last write. A much cleaner approach to configuring the guest's views of the TSC is to simply migrate the TSC offset for every vCPU. Offsets are idempotent, and thus not subject to change depending on when the VMM actually reads/writes values from/to KVM. The VMM can then read the TSC once with KVM_GET_CLOCK to capture a (realtime, host_tsc) pair at the instant when the guest is paused. Cc: David Matlack Cc: Sean Christopherson Signed-off-by: Oliver Upton Signed-off-by: Paolo Bonzini Message-Id: <20210916181538.968978-8-oupton@google.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/devices/vcpu.rst | 57 ++++++++++++++++ arch/x86/include/asm/kvm_host.h | 1 + arch/x86/include/uapi/asm/kvm.h | 4 ++ arch/x86/kvm/x86.c | 116 ++++++++++++++++++++++++++++++++ 4 files changed, 178 insertions(+) diff --git a/Documentation/virt/kvm/devices/vcpu.rst b/Documentation/virt/kvm/devices/vcpu.rst index 2acec3b9ef65..3b399d727c11 100644 --- a/Documentation/virt/kvm/devices/vcpu.rst +++ b/Documentation/virt/kvm/devices/vcpu.rst @@ -161,3 +161,60 @@ Specifies the base address of the stolen time structure for this VCPU. The base address must be 64 byte aligned and exist within a valid guest memory region. See Documentation/virt/kvm/arm/pvtime.rst for more information including the layout of the stolen time structure. + +4. GROUP: KVM_VCPU_TSC_CTRL +=========================== + +:Architectures: x86 + +4.1 ATTRIBUTE: KVM_VCPU_TSC_OFFSET + +:Parameters: 64-bit unsigned TSC offset + +Returns: + + ======= ====================================== + -EFAULT Error reading/writing the provided + parameter address. + -ENXIO Attribute not supported + ======= ====================================== + +Specifies the guest's TSC offset relative to the host's TSC. The guest's +TSC is then derived by the following equation: + + guest_tsc = host_tsc + KVM_VCPU_TSC_OFFSET + +This attribute is useful for the precise migration of a guest's TSC. The +following describes a possible algorithm to use for the migration of a +guest's TSC: + +From the source VMM process: + +1. Invoke the KVM_GET_CLOCK ioctl to record the host TSC (t_0), + kvmclock nanoseconds (k_0), and realtime nanoseconds (r_0). + +2. Read the KVM_VCPU_TSC_OFFSET attribute for every vCPU to record the + guest TSC offset (off_n). + +3. Invoke the KVM_GET_TSC_KHZ ioctl to record the frequency of the + guest's TSC (freq). + +From the destination VMM process: + +4. Invoke the KVM_SET_CLOCK ioctl, providing the kvmclock nanoseconds + (k_0) and realtime nanoseconds (r_0) in their respective fields. + Ensure that the KVM_CLOCK_REALTIME flag is set in the provided + structure. KVM will advance the VM's kvmclock to account for elapsed + time since recording the clock values. + +5. Invoke the KVM_GET_CLOCK ioctl to record the host TSC (t_1) and + kvmclock nanoseconds (k_1). + +6. Adjust the guest TSC offsets for every vCPU to account for (1) time + elapsed since recording state and (2) difference in TSCs between the + source and destination machine: + + new_off_n = t_0 + off_n + (k_1 - k_0) * freq - t_1 + +7. Write the KVM_VCPU_TSC_OFFSET attribute for every vCPU with the + respective value derived in the previous step. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 68ac06fef4fa..88f0326c184a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1095,6 +1095,7 @@ struct kvm_arch { u64 last_tsc_nsec; u64 last_tsc_write; u32 last_tsc_khz; + u64 last_tsc_offset; u64 cur_tsc_nsec; u64 cur_tsc_write; u64 cur_tsc_offset; diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 2ef1f6513c68..5a776a08f78c 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -504,4 +504,8 @@ struct kvm_pmu_event_filter { #define KVM_PMU_EVENT_ALLOW 0 #define KVM_PMU_EVENT_DENY 1 +/* for KVM_{GET,SET,HAS}_DEVICE_ATTR */ +#define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */ +#define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */ + #endif /* _ASM_X86_KVM_H */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c74a44f2a38c..afdc5d186c50 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2454,6 +2454,7 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, kvm->arch.last_tsc_nsec = ns; kvm->arch.last_tsc_write = tsc; kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; + kvm->arch.last_tsc_offset = offset; vcpu->arch.last_guest_tsc = tsc; @@ -4054,6 +4055,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: case KVM_CAP_SREGS2: case KVM_CAP_EXIT_ON_EMULATION_FAILURE: + case KVM_CAP_VCPU_ATTRIBUTES: r = 1; break; case KVM_CAP_EXIT_HYPERCALL: @@ -4918,6 +4920,115 @@ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) return 0; } +static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int r; + + switch (attr->attr) { + case KVM_VCPU_TSC_OFFSET: + r = 0; + break; + default: + r = -ENXIO; + } + + return r; +} + +static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + u64 __user *uaddr = (u64 __user *)(unsigned long)attr->addr; + int r; + + if ((u64)(unsigned long)uaddr != attr->addr) + return -EFAULT; + + switch (attr->attr) { + case KVM_VCPU_TSC_OFFSET: + r = -EFAULT; + if (put_user(vcpu->arch.l1_tsc_offset, uaddr)) + break; + r = 0; + break; + default: + r = -ENXIO; + } + + return r; +} + +static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + u64 __user *uaddr = (u64 __user *)(unsigned long)attr->addr; + struct kvm *kvm = vcpu->kvm; + int r; + + if ((u64)(unsigned long)uaddr != attr->addr) + return -EFAULT; + + switch (attr->attr) { + case KVM_VCPU_TSC_OFFSET: { + u64 offset, tsc, ns; + unsigned long flags; + bool matched; + + r = -EFAULT; + if (get_user(offset, uaddr)) + break; + + raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); + + matched = (vcpu->arch.virtual_tsc_khz && + kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz && + kvm->arch.last_tsc_offset == offset); + + tsc = kvm_scale_tsc(vcpu, rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset; + ns = get_kvmclock_base_ns(); + + __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched); + raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); + + r = 0; + break; + } + default: + r = -ENXIO; + } + + return r; +} + +static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu, + unsigned int ioctl, + void __user *argp) +{ + struct kvm_device_attr attr; + int r; + + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + + if (attr.group != KVM_VCPU_TSC_CTRL) + return -ENXIO; + + switch (ioctl) { + case KVM_HAS_DEVICE_ATTR: + r = kvm_arch_tsc_has_attr(vcpu, &attr); + break; + case KVM_GET_DEVICE_ATTR: + r = kvm_arch_tsc_get_attr(vcpu, &attr); + break; + case KVM_SET_DEVICE_ATTR: + r = kvm_arch_tsc_set_attr(vcpu, &attr); + break; + } + + return r; +} + static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, struct kvm_enable_cap *cap) { @@ -5372,6 +5483,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = __set_sregs2(vcpu, u.sregs2); break; } + case KVM_HAS_DEVICE_ATTR: + case KVM_GET_DEVICE_ATTR: + case KVM_SET_DEVICE_ATTR: + r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp); + break; default: r = -EINVAL; } -- cgit v1.2.3 From 500065393400fc88d20c6d655717c00efc07dc52 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 16 Sep 2021 18:15:47 +0000 Subject: tools: arch: x86: pull in pvclock headers Copy over approximately clean versions of the pvclock headers into tools. Reconcile headers/symbols missing in tools that are unneeded. Signed-off-by: Oliver Upton Message-Id: <20210916181555.973085-2-oupton@google.com> Signed-off-by: Paolo Bonzini --- tools/arch/x86/include/asm/pvclock-abi.h | 48 ++++++++++++++ tools/arch/x86/include/asm/pvclock.h | 103 +++++++++++++++++++++++++++++++ 2 files changed, 151 insertions(+) create mode 100644 tools/arch/x86/include/asm/pvclock-abi.h create mode 100644 tools/arch/x86/include/asm/pvclock.h diff --git a/tools/arch/x86/include/asm/pvclock-abi.h b/tools/arch/x86/include/asm/pvclock-abi.h new file mode 100644 index 000000000000..1436226efe3e --- /dev/null +++ b/tools/arch/x86/include/asm/pvclock-abi.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_PVCLOCK_ABI_H +#define _ASM_X86_PVCLOCK_ABI_H +#ifndef __ASSEMBLY__ + +/* + * These structs MUST NOT be changed. + * They are the ABI between hypervisor and guest OS. + * Both Xen and KVM are using this. + * + * pvclock_vcpu_time_info holds the system time and the tsc timestamp + * of the last update. So the guest can use the tsc delta to get a + * more precise system time. There is one per virtual cpu. + * + * pvclock_wall_clock references the point in time when the system + * time was zero (usually boot time), thus the guest calculates the + * current wall clock by adding the system time. + * + * Protocol for the "version" fields is: hypervisor raises it (making + * it uneven) before it starts updating the fields and raises it again + * (making it even) when it is done. Thus the guest can make sure the + * time values it got are consistent by checking the version before + * and after reading them. + */ + +struct pvclock_vcpu_time_info { + u32 version; + u32 pad0; + u64 tsc_timestamp; + u64 system_time; + u32 tsc_to_system_mul; + s8 tsc_shift; + u8 flags; + u8 pad[2]; +} __attribute__((__packed__)); /* 32 bytes */ + +struct pvclock_wall_clock { + u32 version; + u32 sec; + u32 nsec; +} __attribute__((__packed__)); + +#define PVCLOCK_TSC_STABLE_BIT (1 << 0) +#define PVCLOCK_GUEST_STOPPED (1 << 1) +/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */ +#define PVCLOCK_COUNTS_FROM_ZERO (1 << 2) +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_X86_PVCLOCK_ABI_H */ diff --git a/tools/arch/x86/include/asm/pvclock.h b/tools/arch/x86/include/asm/pvclock.h new file mode 100644 index 000000000000..2628f9a6330b --- /dev/null +++ b/tools/arch/x86/include/asm/pvclock.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_PVCLOCK_H +#define _ASM_X86_PVCLOCK_H + +#include +#include + +/* some helper functions for xen and kvm pv clock sources */ +u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); +u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src); +void pvclock_set_flags(u8 flags); +unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); +void pvclock_resume(void); + +void pvclock_touch_watchdogs(void); + +static __always_inline +unsigned pvclock_read_begin(const struct pvclock_vcpu_time_info *src) +{ + unsigned version = src->version & ~1; + /* Make sure that the version is read before the data. */ + rmb(); + return version; +} + +static __always_inline +bool pvclock_read_retry(const struct pvclock_vcpu_time_info *src, + unsigned version) +{ + /* Make sure that the version is re-read after the data. */ + rmb(); + return version != src->version; +} + +/* + * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction, + * yielding a 64-bit result. + */ +static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) +{ + u64 product; +#ifdef __i386__ + u32 tmp1, tmp2; +#else + unsigned long tmp; +#endif + + if (shift < 0) + delta >>= -shift; + else + delta <<= shift; + +#ifdef __i386__ + __asm__ ( + "mul %5 ; " + "mov %4,%%eax ; " + "mov %%edx,%4 ; " + "mul %5 ; " + "xor %5,%5 ; " + "add %4,%%eax ; " + "adc %5,%%edx ; " + : "=A" (product), "=r" (tmp1), "=r" (tmp2) + : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); +#elif defined(__x86_64__) + __asm__ ( + "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]" + : [lo]"=a"(product), + [hi]"=d"(tmp) + : "0"(delta), + [mul_frac]"rm"((u64)mul_frac)); +#else +#error implement me! +#endif + + return product; +} + +static __always_inline +u64 __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, u64 tsc) +{ + u64 delta = tsc - src->tsc_timestamp; + u64 offset = pvclock_scale_delta(delta, src->tsc_to_system_mul, + src->tsc_shift); + return src->system_time + offset; +} + +struct pvclock_vsyscall_time_info { + struct pvclock_vcpu_time_info pvti; +} __attribute__((__aligned__(64))); + +#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) + +#ifdef CONFIG_PARAVIRT_CLOCK +void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti); +struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void); +#else +static inline struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void) +{ + return NULL; +} +#endif + +#endif /* _ASM_X86_PVCLOCK_H */ -- cgit v1.2.3 From 61fb1c54853dda9a95b785f36220fa8ff39bac20 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 16 Sep 2021 18:15:48 +0000 Subject: selftests: KVM: Add test for KVM_{GET,SET}_CLOCK Add a selftest for the new KVM clock UAPI that was introduced. Ensure that the KVM clock is consistent between userspace and the guest, and that the difference in realtime will only ever cause the KVM clock to advance forward. Cc: Andrew Jones Signed-off-by: Oliver Upton Message-Id: <20210916181555.973085-3-oupton@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/.gitignore | 1 + tools/testing/selftests/kvm/Makefile | 1 + tools/testing/selftests/kvm/include/kvm_util.h | 2 + .../testing/selftests/kvm/x86_64/kvm_clock_test.c | 203 +++++++++++++++++++++ 4 files changed, 207 insertions(+) create mode 100644 tools/testing/selftests/kvm/x86_64/kvm_clock_test.c diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index b8dbabe24ac2..4bcff9c1a524 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -12,6 +12,7 @@ /x86_64/emulator_error_test /x86_64/get_cpuid_test /x86_64/get_msr_index_features +/x86_64/kvm_clock_test /x86_64/kvm_pv_test /x86_64/hyperv_clock /x86_64/hyperv_cpuid diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index d1774f461393..3995940d0011 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -46,6 +46,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/get_cpuid_test TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid TEST_GEN_PROGS_x86_64 += x86_64/hyperv_features +TEST_GEN_PROGS_x86_64 += x86_64/kvm_clock_test TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test TEST_GEN_PROGS_x86_64 += x86_64/mmu_role_test diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 010b59b13917..a8ac5d52e17b 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -19,6 +19,8 @@ #define KVM_DEV_PATH "/dev/kvm" #define KVM_MAX_VCPUS 512 +#define NSEC_PER_SEC 1000000000L + /* * Callers of kvm_util only have an incomplete/opaque description of the * structure kvm_util is using to maintain the state of a VM. diff --git a/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c b/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c new file mode 100644 index 000000000000..97731454f3f3 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021, Google LLC. + * + * Tests for adjusting the KVM clock from userspace + */ +#include +#include +#include +#include +#include +#include +#include + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +#define VCPU_ID 0 + +struct test_case { + uint64_t kvmclock_base; + int64_t realtime_offset; +}; + +static struct test_case test_cases[] = { + { .kvmclock_base = 0 }, + { .kvmclock_base = 180 * NSEC_PER_SEC }, + { .kvmclock_base = 0, .realtime_offset = -180 * NSEC_PER_SEC }, + { .kvmclock_base = 0, .realtime_offset = 180 * NSEC_PER_SEC }, +}; + +#define GUEST_SYNC_CLOCK(__stage, __val) \ + GUEST_SYNC_ARGS(__stage, __val, 0, 0, 0) + +static void guest_main(vm_paddr_t pvti_pa, struct pvclock_vcpu_time_info *pvti) +{ + int i; + + wrmsr(MSR_KVM_SYSTEM_TIME_NEW, pvti_pa | KVM_MSR_ENABLED); + for (i = 0; i < ARRAY_SIZE(test_cases); i++) + GUEST_SYNC_CLOCK(i, __pvclock_read_cycles(pvti, rdtsc())); +} + +#define EXPECTED_FLAGS (KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC) + +static inline void assert_flags(struct kvm_clock_data *data) +{ + TEST_ASSERT((data->flags & EXPECTED_FLAGS) == EXPECTED_FLAGS, + "unexpected clock data flags: %x (want set: %x)", + data->flags, EXPECTED_FLAGS); +} + +static void handle_sync(struct ucall *uc, struct kvm_clock_data *start, + struct kvm_clock_data *end) +{ + uint64_t obs, exp_lo, exp_hi; + + obs = uc->args[2]; + exp_lo = start->clock; + exp_hi = end->clock; + + assert_flags(start); + assert_flags(end); + + TEST_ASSERT(exp_lo <= obs && obs <= exp_hi, + "unexpected kvm-clock value: %"PRIu64" expected range: [%"PRIu64", %"PRIu64"]", + obs, exp_lo, exp_hi); + + pr_info("kvm-clock value: %"PRIu64" expected range [%"PRIu64", %"PRIu64"]\n", + obs, exp_lo, exp_hi); +} + +static void handle_abort(struct ucall *uc) +{ + TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0], + __FILE__, uc->args[1]); +} + +static void setup_clock(struct kvm_vm *vm, struct test_case *test_case) +{ + struct kvm_clock_data data; + + memset(&data, 0, sizeof(data)); + + data.clock = test_case->kvmclock_base; + if (test_case->realtime_offset) { + struct timespec ts; + int r; + + data.flags |= KVM_CLOCK_REALTIME; + do { + r = clock_gettime(CLOCK_REALTIME, &ts); + if (!r) + break; + } while (errno == EINTR); + + TEST_ASSERT(!r, "clock_gettime() failed: %d\n", r); + + data.realtime = ts.tv_sec * NSEC_PER_SEC; + data.realtime += ts.tv_nsec; + data.realtime += test_case->realtime_offset; + } + + vm_ioctl(vm, KVM_SET_CLOCK, &data); +} + +static void enter_guest(struct kvm_vm *vm) +{ + struct kvm_clock_data start, end; + struct kvm_run *run; + struct ucall uc; + int i, r; + + run = vcpu_state(vm, VCPU_ID); + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + setup_clock(vm, &test_cases[i]); + + vm_ioctl(vm, KVM_GET_CLOCK, &start); + + r = _vcpu_run(vm, VCPU_ID); + vm_ioctl(vm, KVM_GET_CLOCK, &end); + + TEST_ASSERT(!r, "vcpu_run failed: %d\n", r); + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "unexpected exit reason: %u (%s)", + run->exit_reason, exit_reason_str(run->exit_reason)); + + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_SYNC: + handle_sync(&uc, &start, &end); + break; + case UCALL_ABORT: + handle_abort(&uc); + return; + default: + TEST_ASSERT(0, "unhandled ucall: %ld\n", uc.cmd); + } + } +} + +#define CLOCKSOURCE_PATH "/sys/devices/system/clocksource/clocksource0/current_clocksource" + +static void check_clocksource(void) +{ + char *clk_name; + struct stat st; + FILE *fp; + + fp = fopen(CLOCKSOURCE_PATH, "r"); + if (!fp) { + pr_info("failed to open clocksource file: %d; assuming TSC.\n", + errno); + return; + } + + if (fstat(fileno(fp), &st)) { + pr_info("failed to stat clocksource file: %d; assuming TSC.\n", + errno); + goto out; + } + + clk_name = malloc(st.st_size); + TEST_ASSERT(clk_name, "failed to allocate buffer to read file\n"); + + if (!fgets(clk_name, st.st_size, fp)) { + pr_info("failed to read clocksource file: %d; assuming TSC.\n", + ferror(fp)); + goto out; + } + + TEST_ASSERT(!strncmp(clk_name, "tsc\n", st.st_size), + "clocksource not supported: %s", clk_name); +out: + fclose(fp); +} + +int main(void) +{ + vm_vaddr_t pvti_gva; + vm_paddr_t pvti_gpa; + struct kvm_vm *vm; + int flags; + + flags = kvm_check_cap(KVM_CAP_ADJUST_CLOCK); + if (!(flags & KVM_CLOCK_REALTIME)) { + print_skip("KVM_CLOCK_REALTIME not supported; flags: %x", + flags); + exit(KSFT_SKIP); + } + + check_clocksource(); + + vm = vm_create_default(VCPU_ID, 0, guest_main); + + pvti_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000); + pvti_gpa = addr_gva2gpa(vm, pvti_gva); + vcpu_args_set(vm, VCPU_ID, 2, pvti_gpa, pvti_gva); + + enter_guest(vm); + kvm_vm_free(vm); +} -- cgit v1.2.3 From c1901feef5bbe87713815ad0eff6331ae7225973 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 16 Sep 2021 18:15:49 +0000 Subject: selftests: KVM: Fix kvm device helper ioctl assertions The KVM_CREATE_DEVICE and KVM_{GET,SET}_DEVICE_ATTR ioctls are defined to return a value of zero on success. As such, tighten the assertions in the helper functions to only pass if the return code is zero. Suggested-by: Andrew Jones Reviewed-by: Andrew Jones Signed-off-by: Oliver Upton Message-Id: <20210916181555.973085-4-oupton@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/lib/kvm_util.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 10a8ed691c66..0ffc2d39c80d 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1984,7 +1984,7 @@ int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr) { int ret = _kvm_device_check_attr(dev_fd, group, attr); - TEST_ASSERT(ret >= 0, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno); + TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno); return ret; } @@ -2008,7 +2008,7 @@ int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test) ret = _kvm_create_device(vm, type, test, &fd); if (!test) { - TEST_ASSERT(ret >= 0, + TEST_ASSERT(!ret, "KVM_CREATE_DEVICE IOCTL failed, rc: %i errno: %i", ret, errno); return fd; } @@ -2036,7 +2036,7 @@ int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, { int ret = _kvm_device_access(dev_fd, group, attr, val, write); - TEST_ASSERT(ret >= 0, "KVM_SET|GET_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno); + TEST_ASSERT(!ret, "KVM_SET|GET_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno); return ret; } -- cgit v1.2.3 From c895513453268c8911858e17713fb94d9e0eae50 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 16 Sep 2021 18:15:50 +0000 Subject: selftests: KVM: Add helpers for vCPU device attributes vCPU file descriptors are abstracted away from test code in KVM selftests, meaning that tests cannot directly access a vCPU's device attributes. Add helpers that tests can use to get at vCPU device attributes. Reviewed-by: Andrew Jones Signed-off-by: Oliver Upton Message-Id: <20210916181555.973085-5-oupton@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/include/kvm_util.h | 9 ++++++ tools/testing/selftests/kvm/lib/kvm_util.c | 38 ++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index a8ac5d52e17b..1b3ef5757819 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -240,6 +240,15 @@ int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, void *val, bool write); +int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, + uint64_t attr); +int vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, + uint64_t attr); +int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, + uint64_t attr, void *val, bool write); +int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, + uint64_t attr, void *val, bool write); + const char *exit_reason_str(unsigned int exit_reason); void virt_pgd_alloc(struct kvm_vm *vm); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 0ffc2d39c80d..0fe66ca6139a 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -2040,6 +2040,44 @@ int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, return ret; } +int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, + uint64_t attr) +{ + struct vcpu *vcpu = vcpu_find(vm, vcpuid); + + TEST_ASSERT(vcpu, "nonexistent vcpu id: %d", vcpuid); + + return _kvm_device_check_attr(vcpu->fd, group, attr); +} + +int vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, + uint64_t attr) +{ + int ret = _vcpu_has_device_attr(vm, vcpuid, group, attr); + + TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno); + return ret; +} + +int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, + uint64_t attr, void *val, bool write) +{ + struct vcpu *vcpu = vcpu_find(vm, vcpuid); + + TEST_ASSERT(vcpu, "nonexistent vcpu id: %d", vcpuid); + + return _kvm_device_access(vcpu->fd, group, attr, val, write); +} + +int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, + uint64_t attr, void *val, bool write) +{ + int ret = _vcpu_access_device_attr(vm, vcpuid, group, attr, val, write); + + TEST_ASSERT(!ret, "KVM_SET|GET_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno); + return ret; +} + /* * VM Dump * -- cgit v1.2.3 From 3f9808cac06c8dd4f800101e04f84fe3180198b0 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 16 Sep 2021 18:15:51 +0000 Subject: selftests: KVM: Introduce system counter offset test Introduce a KVM selftest to verify that userspace manipulation of the TSC (via the new vCPU attribute) results in the correct behavior within the guest. Reviewed-by: Andrew Jones Signed-off-by: Oliver Upton Message-Id: <20210916181555.973085-6-oupton@google.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/devices/vcpu.rst | 41 ++++--- tools/testing/selftests/kvm/.gitignore | 1 + tools/testing/selftests/kvm/Makefile | 1 + .../selftests/kvm/system_counter_offset_test.c | 132 +++++++++++++++++++++ 4 files changed, 161 insertions(+), 14 deletions(-) create mode 100644 tools/testing/selftests/kvm/system_counter_offset_test.c diff --git a/Documentation/virt/kvm/devices/vcpu.rst b/Documentation/virt/kvm/devices/vcpu.rst index 3b399d727c11..60a29972d3f1 100644 --- a/Documentation/virt/kvm/devices/vcpu.rst +++ b/Documentation/virt/kvm/devices/vcpu.rst @@ -184,37 +184,50 @@ TSC is then derived by the following equation: guest_tsc = host_tsc + KVM_VCPU_TSC_OFFSET -This attribute is useful for the precise migration of a guest's TSC. The -following describes a possible algorithm to use for the migration of a -guest's TSC: +This attribute is useful to adjust the guest's TSC on live migration, +so that the TSC counts the time during which the VM was paused. The +following describes a possible algorithm to use for this purpose. From the source VMM process: -1. Invoke the KVM_GET_CLOCK ioctl to record the host TSC (t_0), - kvmclock nanoseconds (k_0), and realtime nanoseconds (r_0). +1. Invoke the KVM_GET_CLOCK ioctl to record the host TSC (tsc_src), + kvmclock nanoseconds (guest_src), and host CLOCK_REALTIME nanoseconds + (host_src). 2. Read the KVM_VCPU_TSC_OFFSET attribute for every vCPU to record the - guest TSC offset (off_n). + guest TSC offset (ofs_src[i]). 3. Invoke the KVM_GET_TSC_KHZ ioctl to record the frequency of the guest's TSC (freq). From the destination VMM process: -4. Invoke the KVM_SET_CLOCK ioctl, providing the kvmclock nanoseconds - (k_0) and realtime nanoseconds (r_0) in their respective fields. - Ensure that the KVM_CLOCK_REALTIME flag is set in the provided - structure. KVM will advance the VM's kvmclock to account for elapsed - time since recording the clock values. +4. Invoke the KVM_SET_CLOCK ioctl, providing the source nanoseconds from + kvmclock (guest_src) and CLOCK_REALTIME (host_src) in their respective + fields. Ensure that the KVM_CLOCK_REALTIME flag is set in the provided + structure. -5. Invoke the KVM_GET_CLOCK ioctl to record the host TSC (t_1) and - kvmclock nanoseconds (k_1). + KVM will advance the VM's kvmclock to account for elapsed time since + recording the clock values. Note that this will cause problems in + the guest (e.g., timeouts) unless CLOCK_REALTIME is synchronized + between the source and destination, and a reasonably short time passes + between the source pausing the VMs and the destination executing + steps 4-7. + +5. Invoke the KVM_GET_CLOCK ioctl to record the host TSC (tsc_dest) and + kvmclock nanoseconds (guest_dest). 6. Adjust the guest TSC offsets for every vCPU to account for (1) time elapsed since recording state and (2) difference in TSCs between the source and destination machine: - new_off_n = t_0 + off_n + (k_1 - k_0) * freq - t_1 + ofs_dst[i] = ofs_src[i] - + (guest_src - guest_dest) * freq + + (tsc_src - tsc_dest) + + ("ofs[i] + tsc - guest * freq" is the guest TSC value corresponding to + a time of 0 in kvmclock. The above formula ensures that it is the + same on the destination as it was on the source). 7. Write the KVM_VCPU_TSC_OFFSET attribute for every vCPU with the respective value derived in the previous step. diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 4bcff9c1a524..fdd13fbdcc8f 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -54,3 +54,4 @@ /set_memory_region_test /steal_time /kvm_binary_stats_test +/system_counter_offset_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 3995940d0011..fd20f271aac0 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -86,6 +86,7 @@ TEST_GEN_PROGS_x86_64 += rseq_test TEST_GEN_PROGS_x86_64 += set_memory_region_test TEST_GEN_PROGS_x86_64 += steal_time TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test +TEST_GEN_PROGS_x86_64 += system_counter_offset_test TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list diff --git a/tools/testing/selftests/kvm/system_counter_offset_test.c b/tools/testing/selftests/kvm/system_counter_offset_test.c new file mode 100644 index 000000000000..b337bbbfa41f --- /dev/null +++ b/tools/testing/selftests/kvm/system_counter_offset_test.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021, Google LLC. + * + * Tests for adjusting the system counter from userspace + */ +#include +#include +#include +#include +#include + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +#define VCPU_ID 0 + +#ifdef __x86_64__ + +struct test_case { + uint64_t tsc_offset; +}; + +static struct test_case test_cases[] = { + { 0 }, + { 180 * NSEC_PER_SEC }, + { -180 * NSEC_PER_SEC }, +}; + +static void check_preconditions(struct kvm_vm *vm) +{ + if (!_vcpu_has_device_attr(vm, VCPU_ID, KVM_VCPU_TSC_CTRL, KVM_VCPU_TSC_OFFSET)) + return; + + print_skip("KVM_VCPU_TSC_OFFSET not supported; skipping test"); + exit(KSFT_SKIP); +} + +static void setup_system_counter(struct kvm_vm *vm, struct test_case *test) +{ + vcpu_access_device_attr(vm, VCPU_ID, KVM_VCPU_TSC_CTRL, + KVM_VCPU_TSC_OFFSET, &test->tsc_offset, true); +} + +static uint64_t guest_read_system_counter(struct test_case *test) +{ + return rdtsc(); +} + +static uint64_t host_read_guest_system_counter(struct test_case *test) +{ + return rdtsc() + test->tsc_offset; +} + +#else /* __x86_64__ */ + +#error test not implemented for this architecture! + +#endif + +#define GUEST_SYNC_CLOCK(__stage, __val) \ + GUEST_SYNC_ARGS(__stage, __val, 0, 0, 0) + +static void guest_main(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + struct test_case *test = &test_cases[i]; + + GUEST_SYNC_CLOCK(i, guest_read_system_counter(test)); + } +} + +static void handle_sync(struct ucall *uc, uint64_t start, uint64_t end) +{ + uint64_t obs = uc->args[2]; + + TEST_ASSERT(start <= obs && obs <= end, + "unexpected system counter value: %"PRIu64" expected range: [%"PRIu64", %"PRIu64"]", + obs, start, end); + + pr_info("system counter value: %"PRIu64" expected range [%"PRIu64", %"PRIu64"]\n", + obs, start, end); +} + +static void handle_abort(struct ucall *uc) +{ + TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0], + __FILE__, uc->args[1]); +} + +static void enter_guest(struct kvm_vm *vm) +{ + uint64_t start, end; + struct ucall uc; + int i; + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + struct test_case *test = &test_cases[i]; + + setup_system_counter(vm, test); + start = host_read_guest_system_counter(test); + vcpu_run(vm, VCPU_ID); + end = host_read_guest_system_counter(test); + + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_SYNC: + handle_sync(&uc, start, end); + break; + case UCALL_ABORT: + handle_abort(&uc); + return; + default: + TEST_ASSERT(0, "unhandled ucall %ld\n", + get_ucall(vm, VCPU_ID, &uc)); + } + } +} + +int main(void) +{ + struct kvm_vm *vm; + + vm = vm_create_default(VCPU_ID, 0, guest_main); + check_preconditions(vm); + ucall_init(vm, NULL); + + enter_guest(vm); + kvm_vm_free(vm); +} -- cgit v1.2.3 From ffb4ce3c49366f02f1c064fbe2e66a96ab5f98b8 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Tue, 7 Sep 2021 11:09:56 -0700 Subject: KVM: selftests: Make memslot_perf_test arch independent memslot_perf_test uses ucalls for synchronization between guest and host. Ucalls API is architecture independent: tests do not need to know details like what kind of exit they generate on a specific arch. More specifically, there is no need to check whether an exit is KVM_EXIT_IO in x86 for the host to know that the exit is ucall related, as get_ucall() already makes that check. Change memslot_perf_test to not require specifying what exit does a ucall generate. Also add a missing ucall_init. Signed-off-by: Ricardo Koller Reviewed-by: Andrew Jones Reviewed-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210907180957.609966-2-ricarkol@google.com --- tools/testing/selftests/kvm/memslot_perf_test.c | 56 +++++++++++++++---------- 1 file changed, 34 insertions(+), 22 deletions(-) diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c index d6e381e01db7..1727f75e0c2c 100644 --- a/tools/testing/selftests/kvm/memslot_perf_test.c +++ b/tools/testing/selftests/kvm/memslot_perf_test.c @@ -127,43 +127,54 @@ static bool verbose; pr_info(__VA_ARGS__); \ } while (0) +static void check_mmio_access(struct vm_data *vm, struct kvm_run *run) +{ + TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit"); + TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read"); + TEST_ASSERT(run->mmio.len == 8, + "Unexpected exit mmio size = %u", run->mmio.len); + TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min && + run->mmio.phys_addr <= vm->mmio_gpa_max, + "Unexpected exit mmio address = 0x%llx", + run->mmio.phys_addr); +} + static void *vcpu_worker(void *data) { struct vm_data *vm = data; struct kvm_run *run; struct ucall uc; - uint64_t cmd; run = vcpu_state(vm->vm, VCPU_ID); while (1) { vcpu_run(vm->vm, VCPU_ID); - if (run->exit_reason == KVM_EXIT_IO) { - cmd = get_ucall(vm->vm, VCPU_ID, &uc); - if (cmd != UCALL_SYNC) - break; - + switch (get_ucall(vm->vm, VCPU_ID, &uc)) { + case UCALL_SYNC: + TEST_ASSERT(uc.args[1] == 0, + "Unexpected sync ucall, got %lx", + (ulong)uc.args[1]); sem_post(&vcpu_ready); continue; - } - - if (run->exit_reason != KVM_EXIT_MMIO) + case UCALL_NONE: + if (run->exit_reason == KVM_EXIT_MMIO) + check_mmio_access(vm, run); + else + goto done; break; - - TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit"); - TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read"); - TEST_ASSERT(run->mmio.len == 8, - "Unexpected exit mmio size = %u", run->mmio.len); - TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min && - run->mmio.phys_addr <= vm->mmio_gpa_max, - "Unexpected exit mmio address = 0x%llx", - run->mmio.phys_addr); + case UCALL_ABORT: + TEST_FAIL("%s at %s:%ld, val = %lu", + (const char *)uc.args[0], + __FILE__, uc.args[1], uc.args[2]); + break; + case UCALL_DONE: + goto done; + default: + TEST_FAIL("Unknown ucall %lu", uc.cmd); + } } - if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT) - TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0], - __FILE__, uc.args[1], uc.args[2]); - +done: return NULL; } @@ -268,6 +279,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, TEST_ASSERT(data->hva_slots, "malloc() fail"); data->vm = vm_create_default(VCPU_ID, mempages, guest_code); + ucall_init(data->vm, NULL); pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n", max_mem_slots - 1, data->pages_per_slot, rempages); -- cgit v1.2.3 From 358928fd5264f069b9758f8b29297c7bff2a06de Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Tue, 7 Sep 2021 11:09:57 -0700 Subject: KVM: selftests: Build the memslot tests for arm64 Add memslot_perf_test and memslot_modification_stress_test to the list of aarch64 selftests. Signed-off-by: Ricardo Koller Reviewed-by: Andrew Jones Reviewed-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210907180957.609966-3-ricarkol@google.com --- tools/testing/selftests/kvm/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index d1774f461393..e6fba18d0b2c 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -95,6 +95,8 @@ TEST_GEN_PROGS_aarch64 += dirty_log_test TEST_GEN_PROGS_aarch64 += dirty_log_perf_test TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus TEST_GEN_PROGS_aarch64 += kvm_page_table_test +TEST_GEN_PROGS_aarch64 += memslot_modification_stress_test +TEST_GEN_PROGS_aarch64 += memslot_perf_test TEST_GEN_PROGS_aarch64 += rseq_test TEST_GEN_PROGS_aarch64 += set_memory_region_test TEST_GEN_PROGS_aarch64 += steal_time -- cgit v1.2.3 From 1e76a3ce0d3cdfc6b506e21047a26471bc1cc92e Mon Sep 17 00:00:00 2001 From: David Stevens Date: Fri, 15 Oct 2021 12:30:21 -0400 Subject: KVM: cleanup allocation of rmaps and page tracking data Unify the flags for rmaps and page tracking data, using a single flag in struct kvm_arch and a single loop to go over all the address spaces and memslots. This avoids code duplication between alloc_all_memslots_rmaps and kvm_page_track_enable_mmu_write_tracking. Signed-off-by: David Stevens [This patch is the delta between David's v2 and v3, with conflicts fixed and my own commit message. - Paolo] Co-developed-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 17 +++----- arch/x86/include/asm/kvm_page_track.h | 3 +- arch/x86/kvm/mmu.h | 22 +++++++--- arch/x86/kvm/mmu/mmu.c | 78 ++++++++++++++++++++++++++++------- arch/x86/kvm/mmu/page_track.c | 57 +++++++------------------ arch/x86/kvm/mmu/tdp_mmu.h | 2 - arch/x86/kvm/x86.c | 47 +-------------------- 7 files changed, 103 insertions(+), 123 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 88f0326c184a..b4fece3bb061 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1212,18 +1212,11 @@ struct kvm_arch { #endif /* CONFIG_X86_64 */ /* - * If set, rmaps have been allocated for all memslots and should be - * allocated for any newly created or modified memslots. + * If set, at least one shadow root has been allocated. This flag + * is used as one input when determining whether certain memslot + * related allocations are necessary. */ - bool memslots_have_rmaps; - - /* - * Set when the KVM mmu needs guest write access page tracking. If - * set, the necessary gfn_track arrays have been allocated for - * all memslots and should be allocated for any newly created or - * modified memslots. - */ - bool memslots_mmu_write_tracking; + bool shadow_root_allocated; #if IS_ENABLED(CONFIG_HYPERV) hpa_t hv_root_tdp; @@ -1946,7 +1939,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) int kvm_cpu_dirty_log_size(void); -int alloc_all_memslots_rmaps(struct kvm *kvm); +int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages); #define KVM_CLOCK_VALID_FLAGS \ (KVM_CLOCK_TSC_STABLE | KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC) diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 79d84a94f8eb..9d4a3b1b25b9 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -49,7 +49,8 @@ struct kvm_page_track_notifier_node { int kvm_page_track_init(struct kvm *kvm); void kvm_page_track_cleanup(struct kvm *kvm); -int kvm_page_track_enable_mmu_write_tracking(struct kvm *kvm); +bool kvm_page_track_write_tracking_enabled(struct kvm *kvm); +int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot); void kvm_page_track_free_memslot(struct kvm_memory_slot *slot); int kvm_page_track_create_memslot(struct kvm *kvm, diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 75367af1a6d3..2df48d60c949 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -304,14 +304,26 @@ int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); int kvm_mmu_post_init_vm(struct kvm *kvm); void kvm_mmu_pre_destroy_vm(struct kvm *kvm); -static inline bool kvm_memslots_have_rmaps(struct kvm *kvm) +static inline bool kvm_shadow_root_allocated(struct kvm *kvm) { /* - * Read memslot_have_rmaps before rmap pointers. Hence, threads reading - * memslots_have_rmaps in any lock context are guaranteed to see the - * pointers. Pairs with smp_store_release in alloc_all_memslots_rmaps. + * Read shadow_root_allocated before related pointers. Hence, threads + * reading shadow_root_allocated in any lock context are guaranteed to + * see the pointers. Pairs with smp_store_release in + * mmu_first_shadow_root_alloc. */ - return smp_load_acquire(&kvm->arch.memslots_have_rmaps); + return smp_load_acquire(&kvm->arch.shadow_root_allocated); +} + +#ifdef CONFIG_X86_64 +static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; } +#else +static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; } +#endif + +static inline bool kvm_memslots_have_rmaps(struct kvm *kvm) +{ + return !is_tdp_mmu_enabled(kvm) || kvm_shadow_root_allocated(kvm); } static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 29e7a4bb26e9..701db0794581 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3397,6 +3397,67 @@ out_unlock: return r; } +static int mmu_first_shadow_root_alloc(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *slot; + int r = 0, i; + + /* + * Check if this is the first shadow root being allocated before + * taking the lock. + */ + if (kvm_shadow_root_allocated(kvm)) + return 0; + + mutex_lock(&kvm->slots_arch_lock); + + /* Recheck, under the lock, whether this is the first shadow root. */ + if (kvm_shadow_root_allocated(kvm)) + goto out_unlock; + + /* + * Check if anything actually needs to be allocated, e.g. all metadata + * will be allocated upfront if TDP is disabled. + */ + if (kvm_memslots_have_rmaps(kvm) && + kvm_page_track_write_tracking_enabled(kvm)) + goto out_success; + + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + kvm_for_each_memslot(slot, slots) { + /* + * Both of these functions are no-ops if the target is + * already allocated, so unconditionally calling both + * is safe. Intentionally do NOT free allocations on + * failure to avoid having to track which allocations + * were made now versus when the memslot was created. + * The metadata is guaranteed to be freed when the slot + * is freed, and will be kept/used if userspace retries + * KVM_RUN instead of killing the VM. + */ + r = memslot_rmap_alloc(slot, slot->npages); + if (r) + goto out_unlock; + r = kvm_page_track_write_tracking_alloc(slot); + if (r) + goto out_unlock; + } + } + + /* + * Ensure that shadow_root_allocated becomes true strictly after + * all the related pointers are set. + */ +out_success: + smp_store_release(&kvm->arch.shadow_root_allocated, true); + +out_unlock: + mutex_unlock(&kvm->slots_arch_lock); + return r; +} + static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.mmu; @@ -3427,11 +3488,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) } } - r = alloc_all_memslots_rmaps(vcpu->kvm); - if (r) - return r; - - r = kvm_page_track_enable_mmu_write_tracking(vcpu->kvm); + r = mmu_first_shadow_root_alloc(vcpu->kvm); if (r) return r; @@ -5604,16 +5661,7 @@ void kvm_mmu_init_vm(struct kvm *kvm) spin_lock_init(&kvm->arch.mmu_unsync_pages_lock); - if (!kvm_mmu_init_tdp_mmu(kvm)) - /* - * No smp_load/store wrappers needed here as we are in - * VM init and there cannot be any memslots / other threads - * accessing this struct kvm yet. - */ - kvm->arch.memslots_have_rmaps = true; - - if (!tdp_enabled) - kvm->arch.memslots_mmu_write_tracking = true; + kvm_mmu_init_tdp_mmu(kvm); node->track_write = kvm_mmu_pte_write; node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index bb5d60bd4dbf..cc4eb5b7fb76 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -19,14 +19,10 @@ #include "mmu.h" #include "mmu_internal.h" -static bool write_tracking_enabled(struct kvm *kvm) +bool kvm_page_track_write_tracking_enabled(struct kvm *kvm) { - /* - * Read memslots_mmu_write_tracking before gfn_track pointers. Pairs - * with smp_store_release in kvm_page_track_enable_mmu_write_tracking. - */ return IS_ENABLED(CONFIG_KVM_EXTERNAL_WRITE_TRACKING) || - smp_load_acquire(&kvm->arch.memslots_mmu_write_tracking); + !tdp_enabled || kvm_shadow_root_allocated(kvm); } void kvm_page_track_free_memslot(struct kvm_memory_slot *slot) @@ -46,7 +42,8 @@ int kvm_page_track_create_memslot(struct kvm *kvm, int i; for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { - if (i == KVM_PAGE_TRACK_WRITE && !write_tracking_enabled(kvm)) + if (i == KVM_PAGE_TRACK_WRITE && + !kvm_page_track_write_tracking_enabled(kvm)) continue; slot->arch.gfn_track[i] = @@ -71,43 +68,18 @@ static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode) return true; } -int kvm_page_track_enable_mmu_write_tracking(struct kvm *kvm) +int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot) { - struct kvm_memslots *slots; - struct kvm_memory_slot *slot; - unsigned short **gfn_track; - int i; + unsigned short *gfn_track; - if (write_tracking_enabled(kvm)) + if (slot->arch.gfn_track[KVM_PAGE_TRACK_WRITE]) return 0; - mutex_lock(&kvm->slots_arch_lock); - - if (write_tracking_enabled(kvm)) { - mutex_unlock(&kvm->slots_arch_lock); - return 0; - } - - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { - slots = __kvm_memslots(kvm, i); - kvm_for_each_memslot(slot, slots) { - gfn_track = slot->arch.gfn_track + KVM_PAGE_TRACK_WRITE; - *gfn_track = kvcalloc(slot->npages, sizeof(*gfn_track), - GFP_KERNEL_ACCOUNT); - if (*gfn_track == NULL) { - mutex_unlock(&kvm->slots_arch_lock); - return -ENOMEM; - } - } - } - - /* - * Ensure that memslots_mmu_write_tracking becomes true strictly - * after all the pointers are set. - */ - smp_store_release(&kvm->arch.memslots_mmu_write_tracking, true); - mutex_unlock(&kvm->slots_arch_lock); + gfn_track = kvcalloc(slot->npages, sizeof(*gfn_track), GFP_KERNEL_ACCOUNT); + if (gfn_track == NULL) + return -ENOMEM; + slot->arch.gfn_track[KVM_PAGE_TRACK_WRITE] = gfn_track; return 0; } @@ -147,7 +119,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm, return; if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE && - !write_tracking_enabled(kvm))) + !kvm_page_track_write_tracking_enabled(kvm))) return; update_gfn_track(slot, gfn, mode, 1); @@ -185,7 +157,7 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm, return; if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE && - !write_tracking_enabled(kvm))) + !kvm_page_track_write_tracking_enabled(kvm))) return; update_gfn_track(slot, gfn, mode, -1); @@ -213,7 +185,8 @@ bool kvm_slot_page_track_is_active(struct kvm_vcpu *vcpu, if (!slot) return false; - if (mode == KVM_PAGE_TRACK_WRITE && !write_tracking_enabled(vcpu->kvm)) + if (mode == KVM_PAGE_TRACK_WRITE && + !kvm_page_track_write_tracking_enabled(vcpu->kvm)) return false; index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index ceaf7ff3ca7c..476b133544dd 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -90,7 +90,6 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, #ifdef CONFIG_X86_64 bool kvm_mmu_init_tdp_mmu(struct kvm *kvm); void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); -static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; } static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } static inline bool is_tdp_mmu(struct kvm_mmu *mmu) @@ -112,7 +111,6 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu) #else static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; } static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} -static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; } static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; } #endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index afdc5d186c50..ac6d31ec909f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11514,8 +11514,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) kvm_page_track_free_memslot(slot); } -static int memslot_rmap_alloc(struct kvm_memory_slot *slot, - unsigned long npages) +int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages) { const int sz = sizeof(*slot->arch.rmap[0]); int i; @@ -11537,50 +11536,6 @@ static int memslot_rmap_alloc(struct kvm_memory_slot *slot, return 0; } -int alloc_all_memslots_rmaps(struct kvm *kvm) -{ - struct kvm_memslots *slots; - struct kvm_memory_slot *slot; - int r, i; - - /* - * Check if memslots alreday have rmaps early before acquiring - * the slots_arch_lock below. - */ - if (kvm_memslots_have_rmaps(kvm)) - return 0; - - mutex_lock(&kvm->slots_arch_lock); - - /* - * Read memslots_have_rmaps again, under the slots arch lock, - * before allocating the rmaps - */ - if (kvm_memslots_have_rmaps(kvm)) { - mutex_unlock(&kvm->slots_arch_lock); - return 0; - } - - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { - slots = __kvm_memslots(kvm, i); - kvm_for_each_memslot(slot, slots) { - r = memslot_rmap_alloc(slot, slot->npages); - if (r) { - mutex_unlock(&kvm->slots_arch_lock); - return r; - } - } - } - - /* - * Ensure that memslots_have_rmaps becomes true strictly after - * all the rmap pointers are set. - */ - smp_store_release(&kvm->arch.memslots_have_rmaps, true); - mutex_unlock(&kvm->slots_arch_lock); - return 0; -} - static int kvm_alloc_memslot_metadata(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) -- cgit v1.2.3 From 2839180ce5bb27ad5e1f092fdafede284a925e5c Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 29 Sep 2021 09:19:32 -0400 Subject: KVM: x86/mmu: clean up prefetch/prefault/speculative naming "prefetch", "prefault" and "speculative" are used throughout KVM to mean the same thing. Use a single name, standardizing on "prefetch" which is already used by various functions such as direct_pte_prefetch, FNAME(prefetch_gpte), FNAME(pte_prefetch), etc. Suggested-by: David Matlack Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.h | 6 +++--- arch/x86/kvm/mmu/mmu.c | 10 +++++----- arch/x86/kvm/mmu/mmu_internal.h | 2 +- arch/x86/kvm/mmu/paging_tmpl.h | 2 +- arch/x86/kvm/mmu/spte.c | 8 ++++---- arch/x86/kvm/mmu/spte.h | 2 +- arch/x86/kvm/mmu/tdp_mmu.c | 2 +- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 2df48d60c949..c921619f7dc9 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -118,7 +118,7 @@ struct kvm_page_fault { /* arguments to kvm_mmu_do_page_fault. */ const gpa_t addr; const u32 error_code; - const bool prefault; + const bool prefetch; /* Derived from error_code. */ const bool exec; @@ -176,7 +176,7 @@ static inline bool is_nx_huge_page_enabled(void) } static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - u32 err, bool prefault) + u32 err, bool prefetch) { struct kvm_page_fault fault = { .addr = cr2_or_gpa, @@ -186,7 +186,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, .present = err & PFERR_PRESENT_MASK, .rsvd = err & PFERR_RSVD_MASK, .user = err & PFERR_USER_MASK, - .prefault = prefault, + .prefetch = prefetch, .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault), .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(), diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 701db0794581..43ee10181459 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2573,7 +2573,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) * be write-protected. */ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, - gfn_t gfn, bool can_unsync, bool speculative) + gfn_t gfn, bool can_unsync, bool prefetch) { struct kvm_mmu_page *sp; bool locked = false; @@ -2599,7 +2599,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, if (sp->unsync) continue; - if (speculative) + if (prefetch) return -EEXIST; /* @@ -2687,7 +2687,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, /* Prefetching always gets a writable pfn. */ bool host_writable = !fault || fault->map_writable; - bool speculative = !fault || fault->prefault; + bool prefetch = !fault || fault->prefetch; bool write_fault = fault && fault->write; pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, @@ -2719,7 +2719,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, was_rmapped = 1; } - wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, speculative, + wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch, true, host_writable, &spte); if (*sptep == spte) { @@ -3923,7 +3923,7 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, if (!async) return false; /* *pfn has correct page already */ - if (!fault->prefault && kvm_can_do_async_pf(vcpu)) { + if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) { trace_kvm_try_async_get_page(fault->addr, fault->gfn); if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) { trace_kvm_async_pf_doublefault(fault->addr, fault->gfn); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 585146a712d2..52c6527b1a06 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -119,7 +119,7 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) } int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, - gfn_t gfn, bool can_unsync, bool speculative); + gfn_t gfn, bool can_unsync, bool prefetch); void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index d8889e02c4b7..f87d36898c44 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -853,7 +853,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault */ if (!r) { pgprintk("%s: guest page fault\n", __func__); - if (!fault->prefault) + if (!fault->prefetch) kvm_inject_emulated_page_fault(vcpu, &walker.fault); return RET_PF_RETRY; diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 871f6114b0fa..0c76c45fdb68 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -92,7 +92,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, - u64 old_spte, bool speculative, bool can_unsync, + u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 *new_spte) { int level = sp->role.level; @@ -111,7 +111,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, * read access. See FNAME(gpte_access) in paging_tmpl.h. */ spte |= shadow_present_mask; - if (!speculative) + if (!prefetch) spte |= spte_shadow_accessed_mask(spte); if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && @@ -161,7 +161,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, * e.g. it's write-tracked (upper-level SPs) or has one or more * shadow pages and unsync'ing pages is not allowed. */ - if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, speculative)) { + if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, prefetch)) { pgprintk("%s: found shadow page for %llx, marking ro\n", __func__, gfn); wrprot = true; @@ -174,7 +174,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, spte |= spte_shadow_dirty_mask(spte); out: - if (speculative) + if (prefetch) spte = mark_spte_for_access_track(spte); WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level), diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index fcda5abc7dc0..cc432f9a966b 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -332,7 +332,7 @@ static inline u64 get_mmio_spte_generation(u64 spte) bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, - u64 old_spte, bool speculative, bool can_unsync, + u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 *new_spte); u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled); u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 953f24ded6bc..7c5dd83e52de 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -907,7 +907,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); else wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn, - fault->pfn, iter->old_spte, fault->prefault, true, + fault->pfn, iter->old_spte, fault->prefetch, true, fault->map_writable, &new_spte); if (new_spte == iter->old_spte) -- cgit v1.2.3 From 2e6e0d683b77b1e08e5e03146ca3b8e033492666 Mon Sep 17 00:00:00 2001 From: Xiaoyao Li Date: Fri, 27 Aug 2021 15:02:43 +0800 Subject: KVM: VMX: Restore host's MSR_IA32_RTIT_CTL when it's not zero A minor optimization to WRMSR MSR_IA32_RTIT_CTL when necessary. Opportunistically refine the comment to call out that KVM requires VM_EXIT_CLEAR_IA32_RTIT_CTL to expose PT to the guest. Reviewed-by: Sean Christopherson Signed-off-by: Xiaoyao Li Message-Id: <20210827070249.924633-2-xiaoyao.li@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 1c8b2b6e7ed9..9a8cd7f8481e 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1074,8 +1074,12 @@ static void pt_guest_exit(struct vcpu_vmx *vmx) pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); } - /* Reload host state (IA32_RTIT_CTL will be cleared on VM exit). */ - wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); + /* + * KVM requires VM_EXIT_CLEAR_IA32_RTIT_CTL to expose PT to the guest, + * i.e. RTIT_CTL is always cleared on VM-Exit. Restore it if necessary. + */ + if (vmx->pt_desc.host.ctl) + wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); } void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, -- cgit v1.2.3 From ba51d627230fdac53a0613a1b8d23241fdf45c26 Mon Sep 17 00:00:00 2001 From: Xiaoyao Li Date: Fri, 27 Aug 2021 15:02:44 +0800 Subject: KVM: VMX: Use precomputed vmx->pt_desc.addr_range The number of valid PT ADDR MSRs for the guest is precomputed in vmx->pt_desc.addr_range. Use it instead of calculating again. Signed-off-by: Xiaoyao Li Message-Id: <20210827070249.924633-3-xiaoyao.li@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9a8cd7f8481e..f8539f911fb7 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1890,8 +1890,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; if (!vmx_pt_mode_is_host_guest() || - (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, - PT_CAP_num_address_ranges))) + (index >= 2 * vmx->pt_desc.addr_range)) return 1; if (index % 2) msr_info->data = vmx->pt_desc.guest.addr_b[index / 2]; @@ -2206,8 +2205,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!pt_can_write_msr(vmx)) return 1; index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; - if (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, - PT_CAP_num_address_ranges)) + if (index >= 2 * vmx->pt_desc.addr_range) return 1; if (is_noncanonical_address(data, vcpu)) return 1; -- cgit v1.2.3 From f4d3a902a558a2c058c605e589ce683d699f5333 Mon Sep 17 00:00:00 2001 From: Xiaoyao Li Date: Fri, 27 Aug 2021 15:02:45 +0800 Subject: KVM: VMX: Rename pt_desc.addr_range to pt_desc.num_address_ranges To better self explain the meaning of this field and match the PT_CAP_num_address_ranges constatn. Suggested-by: Sean Christopherson Signed-off-by: Xiaoyao Li Message-Id: <20210827070249.924633-4-xiaoyao.li@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 26 +++++++++++++------------- arch/x86/kvm/vmx/vmx.h | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index f8539f911fb7..0337083a57f1 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1059,8 +1059,8 @@ static void pt_guest_enter(struct vcpu_vmx *vmx) rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { wrmsrl(MSR_IA32_RTIT_CTL, 0); - pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); - pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range); + pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges); + pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges); } } @@ -1070,8 +1070,8 @@ static void pt_guest_exit(struct vcpu_vmx *vmx) return; if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { - pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range); - pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); + pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges); + pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges); } /* @@ -1460,16 +1460,16 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data) * cause a #GP fault. */ value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET; - if ((value && (vmx->pt_desc.addr_range < 1)) || (value > 2)) + if ((value && (vmx->pt_desc.num_address_ranges < 1)) || (value > 2)) return 1; value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET; - if ((value && (vmx->pt_desc.addr_range < 2)) || (value > 2)) + if ((value && (vmx->pt_desc.num_address_ranges < 2)) || (value > 2)) return 1; value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET; - if ((value && (vmx->pt_desc.addr_range < 3)) || (value > 2)) + if ((value && (vmx->pt_desc.num_address_ranges < 3)) || (value > 2)) return 1; value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET; - if ((value && (vmx->pt_desc.addr_range < 4)) || (value > 2)) + if ((value && (vmx->pt_desc.num_address_ranges < 4)) || (value > 2)) return 1; return 0; @@ -1890,7 +1890,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; if (!vmx_pt_mode_is_host_guest() || - (index >= 2 * vmx->pt_desc.addr_range)) + (index >= 2 * vmx->pt_desc.num_address_ranges)) return 1; if (index % 2) msr_info->data = vmx->pt_desc.guest.addr_b[index / 2]; @@ -2205,7 +2205,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!pt_can_write_msr(vmx)) return 1; index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; - if (index >= 2 * vmx->pt_desc.addr_range) + if (index >= 2 * vmx->pt_desc.num_address_ranges) return 1; if (is_noncanonical_address(data, vcpu)) return 1; @@ -3881,7 +3881,7 @@ void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu) vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_BASE, MSR_TYPE_RW, flag); vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_MASK, MSR_TYPE_RW, flag); vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_CR3_MATCH, MSR_TYPE_RW, flag); - for (i = 0; i < vmx->pt_desc.addr_range; i++) { + for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) { vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag); vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag); } @@ -7131,7 +7131,7 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) } /* Get the number of configurable Address Ranges for filtering */ - vmx->pt_desc.addr_range = intel_pt_validate_cap(vmx->pt_desc.caps, + vmx->pt_desc.num_address_ranges = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_num_address_ranges); /* Initialize and clear the no dependency bits */ @@ -7179,7 +7179,7 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN; /* unmask address range configure area */ - for (i = 0; i < vmx->pt_desc.addr_range; i++) + for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); } diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 592217fd7d92..e7db42e3b0ce 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -62,7 +62,7 @@ struct pt_ctx { struct pt_desc { u64 ctl_bitmask; - u32 addr_range; + u32 num_address_ranges; u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; struct pt_ctx host; struct pt_ctx guest; -- cgit v1.2.3 From e099f3eb0e9193752ba71b85f0631b7ad7d3f726 Mon Sep 17 00:00:00 2001 From: Xiaoyao Li Date: Fri, 27 Aug 2021 15:02:46 +0800 Subject: KVM: VMX: RTIT_CTL_BRANCH_EN has no dependency on other CPUID bit Per Intel SDM, RTIT_CTL_BRANCH_EN bit has no dependency on any CPUID leaf 0x14. Signed-off-by: Xiaoyao Li Message-Id: <20210827070249.924633-5-xiaoyao.li@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 0337083a57f1..79d6af09dbf4 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7136,7 +7136,8 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) /* Initialize and clear the no dependency bits */ vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS | - RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC); + RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC | + RTIT_CTL_BRANCH_EN); /* * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise @@ -7154,12 +7155,11 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ); /* - * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn BranchEn and - * MTCFreq can be set + * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn and MTCFreq can be set */ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc)) vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN | - RTIT_CTL_BRANCH_EN | RTIT_CTL_MTC_RANGE); + RTIT_CTL_MTC_RANGE); /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite)) -- cgit v1.2.3 From 610265ea3da117db435868bd109f1861534a5634 Mon Sep 17 00:00:00 2001 From: David Matlack Date: Tue, 19 Oct 2021 16:22:23 +0000 Subject: KVM: x86/mmu: Rename slot_handle_leaf to slot_handle_level_4k slot_handle_leaf is a misnomer because it only operates on 4K SPTEs whereas "leaf" is used to describe any valid terminal SPTE (4K or large page). Rename slot_handle_leaf to slot_handle_level_4k to avoid confusion. Making this change makes it more obvious there is a benign discrepency between the legacy MMU and the TDP MMU when it comes to dirty logging. The legacy MMU only iterates through 4K SPTEs when zapping for collapsing and when clearing D-bits. The TDP MMU, on the other hand, iterates through SPTEs on all levels. The TDP MMU behavior of zapping SPTEs at all levels is technically overkill for its current dirty logging implementation, which always demotes to 4k SPTES, but both the TDP MMU and legacy MMU zap if and only if the SPTE can be replaced by a larger page, i.e. will not spuriously zap 2m (or larger) SPTEs. Opportunistically add comments to explain this discrepency in the code. Signed-off-by: David Matlack Message-Id: <20211019162223.3935109-1-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 43ee10181459..0460301d0285 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5440,8 +5440,8 @@ slot_handle_level(struct kvm *kvm, const struct kvm_memory_slot *memslot, } static __always_inline bool -slot_handle_leaf(struct kvm *kvm, const struct kvm_memory_slot *memslot, - slot_level_handler fn, bool flush_on_yield) +slot_handle_level_4k(struct kvm *kvm, const struct kvm_memory_slot *memslot, + slot_level_handler fn, bool flush_on_yield) { return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield); @@ -5821,7 +5821,12 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, if (kvm_memslots_have_rmaps(kvm)) { write_lock(&kvm->mmu_lock); - flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true); + /* + * Zap only 4k SPTEs since the legacy MMU only supports dirty + * logging at a 4k granularity and never creates collapsible + * 2m SPTEs during dirty logging. + */ + flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true); if (flush) kvm_arch_flush_remote_tlbs_memslot(kvm, slot); write_unlock(&kvm->mmu_lock); @@ -5858,8 +5863,11 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, if (kvm_memslots_have_rmaps(kvm)) { write_lock(&kvm->mmu_lock); - flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, - false); + /* + * Clear dirty bits only on 4k SPTEs since the legacy MMU only + * support dirty logging at a 4k granularity. + */ + flush = slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false); write_unlock(&kvm->mmu_lock); } -- cgit v1.2.3 From 540c7abe61cc5e81a3d17fe02bce94f6cbf9dce0 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Tue, 19 Oct 2021 01:12:39 -0700 Subject: KVM: vPMU: Fill get_msr MSR_CORE_PERF_GLOBAL_OVF_CTRL w/ 0 SDM section 18.2.3 mentioned that: "IA32_PERF_GLOBAL_OVF_CTL MSR allows software to clear overflow indicator(s) of any general-purpose or fixed-function counters via a single WRMSR." It is R/W mentioned by SDM, we read this msr on bare-metal during perf testing, the value is always 0 for ICX/SKX boxes on hands. Let's fill get_msr MSR_CORE_PERF_GLOBAL_OVF_CTRL w/ 0 as hardware behavior and drop global_ovf_ctrl variable. Tested-by: Like Xu Signed-off-by: Wanpeng Li Message-Id: <1634631160-67276-2-git-send-email-wanpengli@tencent.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/vmx/pmu_intel.c | 6 ++---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b4fece3bb061..1c523b5c99d1 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -499,7 +499,6 @@ struct kvm_pmu { u64 fixed_ctr_ctrl; u64 global_ctrl; u64 global_status; - u64 global_ovf_ctrl; u64 counter_bitmask[2]; u64 global_ctrl_mask; u64 global_ovf_ctrl_mask; diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 10cc4f65c4ef..b8e0d21b7c8a 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -365,7 +365,7 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = pmu->global_ctrl; return 0; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: - msr_info->data = pmu->global_ovf_ctrl; + msr_info->data = 0; return 0; default: if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || @@ -423,7 +423,6 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!(data & pmu->global_ovf_ctrl_mask)) { if (!msr_info->host_initiated) pmu->global_status &= ~data; - pmu->global_ovf_ctrl = data; return 0; } break; @@ -588,8 +587,7 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu) pmc->counter = 0; } - pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = - pmu->global_ovf_ctrl = 0; + pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0; intel_pmu_release_guest_lbr_event(vcpu); } -- cgit v1.2.3 From 4dfe4f40d845cdca655b27bf207959855f3e2d62 Mon Sep 17 00:00:00 2001 From: Junaid Shahid Date: Tue, 19 Oct 2021 18:06:27 -0700 Subject: kvm: x86: mmu: Make NX huge page recovery period configurable Currently, the NX huge page recovery thread wakes up every minute and zaps 1/nx_huge_pages_recovery_ratio of the total number of split NX huge pages at a time. This is intended to ensure that only a relatively small number of pages get zapped at a time. But for very large VMs (or more specifically, VMs with a large number of executable pages), a period of 1 minute could still result in this number being too high (unless the ratio is changed significantly, but that can result in split pages lingering on for too long). This change makes the period configurable instead of fixing it at 1 minute. Users of large VMs can then adjust the period and/or the ratio to reduce the number of pages zapped at one time while still maintaining the same overall duration for cycling through the entire list. By default, KVM derives a period from the ratio such that a page will remain on the list for 1 hour on average. Signed-off-by: Junaid Shahid Message-Id: <20211020010627.305925-1-junaids@google.com> Signed-off-by: Paolo Bonzini --- Documentation/admin-guide/kernel-parameters.txt | 9 +++++- arch/x86/kvm/mmu/mmu.c | 41 ++++++++++++++++++------- 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 91ba391f9b32..f780844c8390 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2353,7 +2353,14 @@ [KVM] Controls how many 4KiB pages are periodically zapped back to huge pages. 0 disables the recovery, otherwise if the value is N KVM will zap 1/Nth of the 4KiB pages every - minute. The default is 60. + period (see below). The default is 60. + + kvm.nx_huge_pages_recovery_period_ms= + [KVM] Controls the time period at which KVM zaps 4KiB pages + back to huge pages. If the value is a non-zero N, KVM will + zap a portion (see ratio above) of the pages every N msecs. + If the value is 0 (the default), KVM will pick a period based + on the ratio, such that a page is zapped after 1 hour on average. kvm-amd.nested= [KVM,AMD] Allow nested virtualization in KVM/SVM. Default is 1 (enabled) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 0460301d0285..f9f228963088 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -58,6 +58,7 @@ extern bool itlb_multihit_kvm_mitigation; int __read_mostly nx_huge_pages = -1; +static uint __read_mostly nx_huge_pages_recovery_period_ms; #ifdef CONFIG_PREEMPT_RT /* Recovery can cause latency spikes, disable it for PREEMPT_RT. */ static uint __read_mostly nx_huge_pages_recovery_ratio = 0; @@ -66,23 +67,26 @@ static uint __read_mostly nx_huge_pages_recovery_ratio = 60; #endif static int set_nx_huge_pages(const char *val, const struct kernel_param *kp); -static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp); +static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp); static const struct kernel_param_ops nx_huge_pages_ops = { .set = set_nx_huge_pages, .get = param_get_bool, }; -static const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = { - .set = set_nx_huge_pages_recovery_ratio, +static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = { + .set = set_nx_huge_pages_recovery_param, .get = param_get_uint, }; module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644); __MODULE_PARM_TYPE(nx_huge_pages, "bool"); -module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops, +module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_param_ops, &nx_huge_pages_recovery_ratio, 0644); __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint"); +module_param_cb(nx_huge_pages_recovery_period_ms, &nx_huge_pages_recovery_param_ops, + &nx_huge_pages_recovery_period_ms, 0644); +__MODULE_PARM_TYPE(nx_huge_pages_recovery_period_ms, "uint"); static bool __read_mostly force_flush_and_sync_on_reuse; module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644); @@ -6145,18 +6149,24 @@ void kvm_mmu_module_exit(void) mmu_audit_disable(); } -static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp) +static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp) { - unsigned int old_val; + bool was_recovery_enabled, is_recovery_enabled; + uint old_period, new_period; int err; - old_val = nx_huge_pages_recovery_ratio; + was_recovery_enabled = nx_huge_pages_recovery_ratio; + old_period = nx_huge_pages_recovery_period_ms; + err = param_set_uint(val, kp); if (err) return err; - if (READ_ONCE(nx_huge_pages) && - !old_val && nx_huge_pages_recovery_ratio) { + is_recovery_enabled = nx_huge_pages_recovery_ratio; + new_period = nx_huge_pages_recovery_period_ms; + + if (READ_ONCE(nx_huge_pages) && is_recovery_enabled && + (!was_recovery_enabled || old_period > new_period)) { struct kvm *kvm; mutex_lock(&kvm_lock); @@ -6219,8 +6229,17 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) static long get_nx_lpage_recovery_timeout(u64 start_time) { - return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio) - ? start_time + 60 * HZ - get_jiffies_64() + uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio); + uint period = READ_ONCE(nx_huge_pages_recovery_period_ms); + + if (!period && ratio) { + /* Make sure the period is not less than one second. */ + ratio = min(ratio, 3600u); + period = 60 * 60 * 1000 / ratio; + } + + return READ_ONCE(nx_huge_pages) && ratio + ? start_time + msecs_to_jiffies(period) - get_jiffies_64() : MAX_SCHEDULE_TIMEOUT; } -- cgit v1.2.3 From 9dadfc4a6145e163cd18fb4bdea5ca4f44f3c1ad Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 18 Oct 2021 11:39:28 -0700 Subject: KVM: x86: Add vendor name to kvm_x86_ops, use it for error messages Paul pointed out the error messages when KVM fails to load are unhelpful in understanding exactly what went wrong if userspace probes the "wrong" module. Add a mandatory kvm_x86_ops field to track vendor module names, kvm_intel and kvm_amd, and use the name for relevant error message when KVM fails to load so that the user knows which module failed to load. Opportunistically tweak the "disabled by bios" error message to clarify that _support_ was disabled, not that the module itself was magically disabled by BIOS. Suggested-by: Paul Menzel Signed-off-by: Sean Christopherson Message-Id: <20211018183929.897461-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/svm/svm.c | 2 ++ arch/x86/kvm/vmx/vmx.c | 2 ++ arch/x86/kvm/x86.c | 8 +++++--- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 1c523b5c99d1..d41699e89d1f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1301,6 +1301,8 @@ static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical) } struct kvm_x86_ops { + const char *name; + int (*hardware_enable)(void); void (*hardware_disable)(void); void (*hardware_unsetup)(void); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 89077160d463..cee4915d2ce3 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4580,6 +4580,8 @@ static int svm_vm_init(struct kvm *kvm) } static struct kvm_x86_ops svm_x86_ops __initdata = { + .name = "kvm_amd", + .hardware_unsetup = svm_hardware_teardown, .hardware_enable = svm_hardware_enable, .hardware_disable = svm_hardware_disable, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 79d6af09dbf4..2f677e72d864 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7570,6 +7570,8 @@ static bool vmx_check_apicv_inhibit_reasons(ulong bit) } static struct kvm_x86_ops vmx_x86_ops __initdata = { + .name = "kvm_intel", + .hardware_unsetup = hardware_unsetup, .hardware_enable = hardware_enable, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ac6d31ec909f..ac386c085dd0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8532,18 +8532,20 @@ int kvm_arch_init(void *opaque) int r; if (kvm_x86_ops.hardware_enable) { - printk(KERN_ERR "kvm: already loaded the other module\n"); + pr_err("kvm: already loaded vendor module '%s'\n", kvm_x86_ops.name); r = -EEXIST; goto out; } if (!ops->cpu_has_kvm_support()) { - pr_err_ratelimited("kvm: no hardware support\n"); + pr_err_ratelimited("kvm: no hardware support for '%s'\n", + ops->runtime_ops->name); r = -EOPNOTSUPP; goto out; } if (ops->disabled_by_bios()) { - pr_err_ratelimited("kvm: disabled by bios\n"); + pr_err_ratelimited("kvm: support for '%s' disabled by bios\n", + ops->runtime_ops->name); r = -EOPNOTSUPP; goto out; } -- cgit v1.2.3 From 9ae7f6c9b51e297a7f5c2eb8ff5de42f8402eb71 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Wed, 20 Oct 2021 03:13:56 -0700 Subject: KVM: emulate: Comment on difference between RDPMC implementation and manual SDM mentioned that, RDPMC: IF (((CR4.PCE = 1) or (CPL = 0) or (CR0.PE = 0)) and (ECX indicates a supported counter)) THEN EAX := counter[31:0]; EDX := ZeroExtend(counter[MSCB:32]); ELSE (* ECX is not valid or CR4.PCE is 0 and CPL is 1, 2, or 3 and CR0.PE is 1 *) #GP(0); FI; Let's add a comment why CR0.PE isn't tested since it's impossible for CPL to be >0 if CR0.PE=0. Signed-off-by: Wanpeng Li Message-Id: <1634724836-73721-1-git-send-email-wanpengli@tencent.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/emulate.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index c589ac832265..532791ffcbb9 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -4223,6 +4223,11 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt) if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx)) return X86EMUL_CONTINUE; + /* + * If CR4.PCE is set, the SDM requires CPL=0 or CR0.PE=0. The CR0.PE + * check however is unnecessary because CPL is always 0 outside + * protected mode. + */ if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || ctxt->ops->check_pmc(ctxt, rcx)) return emulate_gp(ctxt, 0); -- cgit v1.2.3 From 413eaa4ecd0f115ab2178d924ae75d3409b4ff5c Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Wed, 6 Oct 2021 15:36:17 -0500 Subject: KVM: selftests: set CPUID before setting sregs in vcpu creation Recent kernels have checks to ensure the GPA values in special-purpose registers like CR3 are within the maximum physical address range and don't overlap with anything in the upper/reserved range. In the case of SEV kselftest guests booting directly into 64-bit mode, CR3 needs to be initialized to the GPA of the page table root, with the encryption bit set. The kernel accounts for this encryption bit by removing it from reserved bit range when the guest advertises the bit position via KVM_SET_CPUID*, but kselftests currently call KVM_SET_SREGS as part of vm_vcpu_add_default(), before KVM_SET_CPUID*. As a result, KVM_SET_SREGS will return an error in these cases. Address this by moving vcpu_set_cpuid() (which calls KVM_SET_CPUID*) ahead of vcpu_setup() (which calls KVM_SET_SREGS). While there, address a typo in the assertion that triggers when KVM_SET_SREGS fails. Suggested-by: Sean Christopherson Signed-off-by: Michael Roth Message-Id: <20211006203617.13045-1-michael.roth@amd.com> Signed-off-by: Paolo Bonzini Reviewed-by: Nathan Tempelman --- tools/testing/selftests/kvm/lib/kvm_util.c | 2 +- tools/testing/selftests/kvm/lib/x86_64/processor.c | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 0fe66ca6139a..041004c0fda7 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1792,7 +1792,7 @@ void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) { int ret = _vcpu_sregs_set(vm, vcpuid, sregs); - TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, " + TEST_ASSERT(ret == 0, "KVM_SET_SREGS IOCTL failed, " "rc: %i errno: %i", ret, errno); } diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 28cb881f440d..82c39db91369 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -660,6 +660,7 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) /* Create VCPU */ vm_vcpu_add(vm, vcpuid); + vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid()); vcpu_setup(vm, vcpuid); /* Setup guest general purpose registers */ @@ -672,9 +673,6 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) /* Setup the MP state */ mp_state.mp_state = 0; vcpu_set_mp_state(vm, vcpuid, &mp_state); - - /* Setup supported CPUIDs */ - vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid()); } /* -- cgit v1.2.3 From 552617382c197949ff965a3559da8952bf3c1fa5 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Sun, 19 Sep 2021 10:42:45 +0800 Subject: KVM: X86: Don't reset mmu context when X86_CR4_PCIDE 1->0 X86_CR4_PCIDE doesn't participate in kvm_mmu_role, so the mmu context doesn't need to be reset. It is only required to flush all the guest tlb. Signed-off-by: Lai Jiangshan Reviewed-by: Sean Christopherson Message-Id: <20210919024246.89230-2-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ac386c085dd0..c37694ccc015 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1022,9 +1022,10 @@ EXPORT_SYMBOL_GPL(kvm_is_valid_cr4); void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) { - if (((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) || - (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) + if ((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) kvm_mmu_reset_context(vcpu); + else if (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)) + kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); } EXPORT_SYMBOL_GPL(kvm_post_set_cr4); -- cgit v1.2.3 From a91a7c7096005113d8e749fd8dfdd3e1eecee263 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Sun, 19 Sep 2021 10:42:46 +0800 Subject: KVM: X86: Don't reset mmu context when toggling X86_CR4_PGE X86_CR4_PGE doesn't participate in kvm_mmu_role, so the mmu context doesn't need to be reset. It is only required to flush all the guest tlb. It is also inconsistent that X86_CR4_PGE is in KVM_MMU_CR4_ROLE_BITS while kvm_mmu_role doesn't use X86_CR4_PGE. So X86_CR4_PGE is also removed from KVM_MMU_CR4_ROLE_BITS. Signed-off-by: Lai Jiangshan Reviewed-by: Sean Christopherson Message-Id: <20210919024246.89230-3-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.h | 5 ++--- arch/x86/kvm/x86.c | 3 ++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index c921619f7dc9..3456f4d0eaeb 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -44,9 +44,8 @@ #define PT32_ROOT_LEVEL 2 #define PT32E_ROOT_LEVEL 3 -#define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | \ - X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE | \ - X86_CR4_LA57) +#define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_LA57 | \ + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE) #define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c37694ccc015..f55654158836 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1024,7 +1024,8 @@ void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned lon { if ((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) kvm_mmu_reset_context(vcpu); - else if (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)) + else if (((cr4 ^ old_cr4) & X86_CR4_PGE) || + (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); } EXPORT_SYMBOL_GPL(kvm_post_set_cr4); -- cgit v1.2.3 From e45e9e3998f0001079b09555db5bb3b4257f6746 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 19 Oct 2021 19:01:51 +0800 Subject: KVM: X86: Fix tlb flush for tdp in kvm_invalidate_pcid() The KVM doesn't know whether any TLB for a specific pcid is cached in the CPU when tdp is enabled. So it is better to flush all the guest TLB when invalidating any single PCID context. The case is very rare or even impossible since KVM generally doesn't intercept CR3 write or INVPCID instructions when tdp is enabled, so the fix is mostly for the sake of overall robustness. Signed-off-by: Lai Jiangshan Message-Id: <20211019110154.4091-2-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f55654158836..f7806e3f3019 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1073,6 +1073,18 @@ static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) unsigned long roots_to_free = 0; int i; + /* + * MOV CR3 and INVPCID are usually not intercepted when using TDP, but + * this is reachable when running EPT=1 and unrestricted_guest=0, and + * also via the emulator. KVM's TDP page tables are not in the scope of + * the invalidation, but the guest's TLB entries need to be flushed as + * the CPU may have cached entries in its TLB for the target PCID. + */ + if (unlikely(tdp_enabled)) { + kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); + return; + } + /* * If neither the current CR3 nor any of the prev_roots use the given * PCID, then nothing needs to be done here because a resync will -- cgit v1.2.3 From 509bfe3d979672cd69c318d520420cf95b474fd9 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 19 Oct 2021 19:01:52 +0800 Subject: KVM: X86: Cache CR3 in prev_roots when PCID is disabled The commit 21823fbda5522 ("KVM: x86: Invalidate all PGDs for the current PCID on MOV CR3 w/ flush") invalidates all PGDs for the specific PCID and in the case of PCID is disabled, it includes all PGDs in the prev_roots and the commit made prev_roots totally unused in this case. Not using prev_roots fixes a problem when CR4.PCIDE is changed 0 -> 1 before the said commit: (CR4.PCIDE=0, CR4.PGE=1; CR3=cr3_a; the page for the guest RIP is global; cr3_b is cached in prev_roots) modify page tables under cr3_b the shadow root of cr3_b is unsync in kvm INVPCID single context the guest expects the TLB is clean for PCID=0 change CR4.PCIDE 0 -> 1 switch to cr3_b with PCID=0,NOFLUSH=1 No sync in kvm, cr3_b is still unsync in kvm jump to the page that was modified in step 1 shadow page tables point to the wrong page It is a very unlikely case, but it shows that stale prev_roots can be a problem after CR4.PCIDE changes from 0 to 1. However, to fix this case, the commit disabled caching CR3 in prev_roots altogether when PCID is disabled. Not all CPUs have PCID; especially the PCID support for AMD CPUs is kind of recent. To restore the prev_roots optimization for CR4.PCIDE=0, flush the whole MMU (including all prev_roots) when CR4.PCIDE changes. Signed-off-by: Lai Jiangshan Message-Id: <20211019110154.4091-3-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f7806e3f3019..9d25ef7d4d53 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1022,10 +1022,27 @@ EXPORT_SYMBOL_GPL(kvm_is_valid_cr4); void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) { + /* + * If any role bit is changed, the MMU needs to be reset. + * + * If CR4.PCIDE is changed 1 -> 0, the guest TLB must be flushed. + * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB + * according to the SDM; however, stale prev_roots could be reused + * incorrectly in the future after a MOV to CR3 with NOFLUSH=1, so we + * free them all. KVM_REQ_MMU_RELOAD is fit for the both cases; it + * is slow, but changing CR4.PCIDE is a rare case. + * + * If CR4.PGE is changed, the guest TLB must be flushed. + * + * Note: resetting MMU is a superset of KVM_REQ_MMU_RELOAD and + * KVM_REQ_MMU_RELOAD is a superset of KVM_REQ_TLB_FLUSH_GUEST, hence + * the usage of "else if". + */ if ((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) kvm_mmu_reset_context(vcpu); - else if (((cr4 ^ old_cr4) & X86_CR4_PGE) || - (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) + else if ((cr4 ^ old_cr4) & X86_CR4_PCIDE) + kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); + else if ((cr4 ^ old_cr4) & X86_CR4_PGE) kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); } EXPORT_SYMBOL_GPL(kvm_post_set_cr4); @@ -1095,6 +1112,14 @@ static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); } + /* + * If PCID is disabled, there is no need to free prev_roots even if the + * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB + * with PCIDE=0. + */ + if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) + return; + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid) roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); -- cgit v1.2.3 From 264d3dc1d3dca13b7eaf0c4fa7a4b2c91a5e056a Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 19 Oct 2021 19:01:53 +0800 Subject: KVM: X86: pair smp_wmb() of mmu_try_to_unsync_pages() with smp_rmb() The commit 578e1c4db2213 ("kvm: x86: Avoid taking MMU lock in kvm_mmu_sync_roots if no sync is needed") added smp_wmb() in mmu_try_to_unsync_pages(), but the corresponding smp_load_acquire() isn't used on the load of SPTE.W. smp_load_acquire() orders _subsequent_ loads after sp->is_unsync; it does not order _earlier_ loads before the load of sp->is_unsync. This has no functional change; smp_rmb() is a NOP on x86, and no compiler barrier is required because there is a VMEXIT between the load of SPTE.W and kvm_mmu_snc_roots. Cc: Junaid Shahid Signed-off-by: Lai Jiangshan Message-Id: <20211019110154.4091-4-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 41 +++++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index f9f228963088..cb7622e93419 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2669,8 +2669,8 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, * (sp->unsync = true) * * The write barrier below ensures that 1.1 happens before 1.2 and thus - * the situation in 2.4 does not arise. The implicit barrier in 2.2 - * pairs with this write barrier. + * the situation in 2.4 does not arise. It pairs with the read barrier + * in is_unsync_root(), placed between 2.1's load of SPTE.W and 2.3. */ smp_wmb(); @@ -3643,6 +3643,30 @@ err_pml4: #endif } +static bool is_unsync_root(hpa_t root) +{ + struct kvm_mmu_page *sp; + + /* + * The read barrier orders the CPU's read of SPTE.W during the page table + * walk before the reads of sp->unsync/sp->unsync_children here. + * + * Even if another CPU was marking the SP as unsync-ed simultaneously, + * any guest page table changes are not guaranteed to be visible anyway + * until this VCPU issues a TLB flush strictly after those changes are + * made. We only need to ensure that the other CPU sets these flags + * before any actual changes to the page tables are made. The comments + * in mmu_try_to_unsync_pages() describe what could go wrong if this + * requirement isn't satisfied. + */ + smp_rmb(); + sp = to_shadow_page(root); + if (sp->unsync || sp->unsync_children) + return true; + + return false; +} + void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) { int i; @@ -3660,18 +3684,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) hpa_t root = vcpu->arch.mmu->root_hpa; sp = to_shadow_page(root); - /* - * Even if another CPU was marking the SP as unsync-ed - * simultaneously, any guest page table changes are not - * guaranteed to be visible anyway until this VCPU issues a TLB - * flush strictly after those changes are made. We only need to - * ensure that the other CPU sets these flags before any actual - * changes to the page tables are made. The comments in - * mmu_try_to_unsync_pages() describe what could go wrong if - * this requirement isn't satisfied. - */ - if (!smp_load_acquire(&sp->unsync) && - !smp_load_acquire(&sp->unsync_children)) + if (!is_unsync_root(root)) return; write_lock(&vcpu->kvm->mmu_lock); -- cgit v1.2.3 From 61b05a9fd4aec2dff0bbdf9d16ee000b24b33f41 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 19 Oct 2021 19:01:54 +0800 Subject: KVM: X86: Don't unload MMU in kvm_vcpu_flush_tlb_guest() kvm_mmu_unload() destroys all the PGD caches. Use the lighter kvm_mmu_sync_roots() and kvm_mmu_sync_prev_roots() instead. Signed-off-by: Lai Jiangshan Message-Id: <20211019110154.4091-5-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.h | 1 + arch/x86/kvm/mmu/mmu.c | 16 ++++++++++++++++ arch/x86/kvm/x86.c | 11 +++++------ 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 3456f4d0eaeb..9ae6168d381e 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -79,6 +79,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, int kvm_mmu_load(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu); void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); +void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu); static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) { diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index cb7622e93419..28d06180079d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3647,6 +3647,9 @@ static bool is_unsync_root(hpa_t root) { struct kvm_mmu_page *sp; + if (!VALID_PAGE(root)) + return false; + /* * The read barrier orders the CPU's read of SPTE.W during the page table * walk before the reads of sp->unsync/sp->unsync_children here. @@ -3714,6 +3717,19 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) write_unlock(&vcpu->kvm->mmu_lock); } +void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu) +{ + unsigned long roots_to_free = 0; + int i; + + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) + if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa)) + roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); + + /* sync prev_roots by simply freeing them */ + kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); +} + static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr, u32 access, struct x86_exception *exception) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9d25ef7d4d53..3a74540caca2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3245,15 +3245,14 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) ++vcpu->stat.tlb_flush; if (!tdp_enabled) { - /* + /* * A TLB flush on behalf of the guest is equivalent to * INVPCID(all), toggling CR4.PGE, etc., which requires - * a forced sync of the shadow page tables. Unload the - * entire MMU here and the subsequent load will sync the - * shadow page tables, and also flush the TLB. + * a forced sync of the shadow page tables. Ensure all the + * roots are synced and the guest TLB in hardware is clean. */ - kvm_mmu_unload(vcpu); - return; + kvm_mmu_sync_roots(vcpu); + kvm_mmu_sync_prev_roots(vcpu); } static_call(kvm_x86_tlb_flush_guest)(vcpu); -- cgit v1.2.3 From bc3b3c1002ea684e618ff6d8c387b1b8b319f140 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 21 Oct 2021 18:00:03 -0700 Subject: KVM: x86/mmu: Drop a redundant, broken remote TLB flush A recent commit to fix the calls to kvm_flush_remote_tlbs_with_address() in kvm_zap_gfn_range() inadvertantly added yet another flush instead of fixing the existing flush. Drop the redundant flush, and fix the params for the existing flush. Cc: stable@vger.kernel.org Fixes: 2822da446640 ("KVM: x86/mmu: fix parameters to kvm_flush_remote_tlbs_with_address") Cc: Maxim Levitsky Cc: Maciej S. Szmigiero Signed-off-by: Sean Christopherson Message-Id: <20211022010005.1454978-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 28d06180079d..d9f1142b4892 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5752,13 +5752,11 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start, gfn_end, flush); - if (flush) - kvm_flush_remote_tlbs_with_address(kvm, gfn_start, - gfn_end - gfn_start); } if (flush) - kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end); + kvm_flush_remote_tlbs_with_address(kvm, gfn_start, + gfn_end - gfn_start); kvm_dec_notifier_count(kvm, gfn_start, gfn_end); -- cgit v1.2.3 From e8be2a5ba86c7d6553a22d76fd21ac2cf665b165 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 21 Oct 2021 18:00:04 -0700 Subject: KVM: x86/mmu: Drop a redundant remote TLB flush in kvm_zap_gfn_range() Remove an unnecessary remote TLB flush in kvm_zap_gfn_range() now that said function holds mmu_lock for write for its entire duration. The flush was added by the now-reverted commit to allow TDP MMU to flush while holding mmu_lock for read, as the transition from write=>read required dropping the lock and thus a pending flush needed to be serviced. Fixes: 5a324c24b638 ("Revert "KVM: x86/mmu: Allow zap gfn range to operate under the mmu read lock"") Cc: Maxim Levitsky Cc: Maciej S. Szmigiero Cc: Ben Gardon Signed-off-by: Sean Christopherson Message-Id: <20211022010005.1454978-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index d9f1142b4892..71165a5d1163 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5743,9 +5743,6 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) end - 1, true, flush); } } - if (flush) - kvm_flush_remote_tlbs_with_address(kvm, gfn_start, - gfn_end - gfn_start); } if (is_tdp_mmu_enabled(kvm)) { -- cgit v1.2.3 From 21fa324654e4c733aa6538001194eac13d8bec07 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 21 Oct 2021 18:00:05 -0700 Subject: KVM: x86/mmu: Extract zapping of rmaps for gfn range to separate helper Extract the zapping of rmaps, a.k.a. legacy MMU, for a gfn range to a separate helper to clean up the unholy mess that kvm_zap_gfn_range() has become. In addition to deep nesting, the rmaps zapping spreads out the declaration of several variables and is generally a mess. Clean up the mess now so that future work to improve the memslots implementation doesn't need to deal with it. Cc: Maciej S. Szmigiero Signed-off-by: Sean Christopherson Message-Id: <20211022010005.1454978-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 52 +++++++++++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 71165a5d1163..354d2ca92df4 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5710,40 +5710,48 @@ void kvm_mmu_uninit_vm(struct kvm *kvm) kvm_mmu_uninit_tdp_mmu(kvm); } +static bool __kvm_zap_rmaps(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) +{ + const struct kvm_memory_slot *memslot; + struct kvm_memslots *slots; + bool flush = false; + gfn_t start, end; + int i; + + if (!kvm_memslots_have_rmaps(kvm)) + return flush; + + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + kvm_for_each_memslot(memslot, slots) { + start = max(gfn_start, memslot->base_gfn); + end = min(gfn_end, memslot->base_gfn + memslot->npages); + if (start >= end) + continue; + + flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, + PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, + start, end - 1, true, flush); + } + } + + return flush; +} + /* * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end * (not including it) */ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) { - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; + bool flush; int i; - bool flush = false; write_lock(&kvm->mmu_lock); kvm_inc_notifier_count(kvm, gfn_start, gfn_end); - if (kvm_memslots_have_rmaps(kvm)) { - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { - slots = __kvm_memslots(kvm, i); - kvm_for_each_memslot(memslot, slots) { - gfn_t start, end; - - start = max(gfn_start, memslot->base_gfn); - end = min(gfn_end, memslot->base_gfn + memslot->npages); - if (start >= end) - continue; - - flush = slot_handle_level_range(kvm, - (const struct kvm_memory_slot *) memslot, - kvm_zap_rmapp, PG_LEVEL_4K, - KVM_MAX_HUGEPAGE_LEVEL, start, - end - 1, true, flush); - } - } - } + flush = __kvm_zap_rmaps(kvm, gfn_start, gfn_end); if (is_tdp_mmu_enabled(kvm)) { for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) -- cgit v1.2.3 From 9b4eb77099f6ba37a8da7d7c5d409db309eb995e Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Fri, 22 Oct 2021 08:15:14 +0200 Subject: riscv: do not select non-existing config ANON_INODES Commit 99cdc6c18c2d ("RISC-V: Add initial skeletal KVM support") selects the config ANON_INODES in config KVM, but the config ANON_INODES is removed since commit 5dd50aaeb185 ("Make anon_inodes unconditional") in 2018. Hence, ./scripts/checkkconfigsymbols.py warns on non-existing symbols: ANON_INODES Referencing files: arch/riscv/kvm/Kconfig Remove selecting the non-existing config ANON_INODES. Signed-off-by: Lukas Bulwahn Message-Id: <20211022061514.25946-1-lukas.bulwahn@gmail.com> Signed-off-by: Paolo Bonzini --- arch/riscv/kvm/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig index a712bb910cda..f5a342fa1b1d 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig @@ -22,7 +22,6 @@ config KVM depends on RISCV_SBI && MMU select MMU_NOTIFIER select PREEMPT_NOTIFIERS - select ANON_INODES select KVM_MMIO select KVM_GENERIC_DIRTYLOG_READ_PROTECT select HAVE_KVM_VCPU_ASYNC_IOCTL -- cgit v1.2.3 From ee49a89329711f84601bcb65ac8e8ef54fdac771 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 21 Oct 2021 17:49:25 -0700 Subject: KVM: x86: Move SVM's APICv sanity check to common x86 Move SVM's assertion that vCPU's APICv state is consistent with its VM's state out of svm_vcpu_run() and into x86's common inner run loop. The assertion and underlying logic is not unique to SVM, it's just that SVM has more inhibiting conditions and thus is more likely to run headfirst into any KVM bugs. Add relevant comments to document exactly why the update path has unusual ordering between the update the kick, why said ordering is safe, and also the basic rules behind the assertion in the run loop. Cc: Maxim Levitsky Signed-off-by: Sean Christopherson Message-Id: <20211022004927.1448382-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 2 -- arch/x86/kvm/x86.c | 20 ++++++++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index cee4915d2ce3..a2a4e9b42750 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3864,8 +3864,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) pre_svm_run(vcpu); - WARN_ON_ONCE(kvm_apicv_activated(vcpu->kvm) != kvm_vcpu_apicv_active(vcpu)); - sync_lapic_to_cr8(vcpu); if (unlikely(svm->asid != svm->vmcb->control.asid)) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3a74540caca2..a09365ecdf01 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9481,6 +9481,18 @@ void __kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit) if (!!old != !!new) { trace_kvm_apicv_update_request(activate, bit); + /* + * Kick all vCPUs before setting apicv_inhibit_reasons to avoid + * false positives in the sanity check WARN in svm_vcpu_run(). + * This task will wait for all vCPUs to ack the kick IRQ before + * updating apicv_inhibit_reasons, and all other vCPUs will + * block on acquiring apicv_update_lock so that vCPUs can't + * redo svm_vcpu_run() without seeing the new inhibit state. + * + * Note, holding apicv_update_lock and taking it in the read + * side (handling the request) also prevents other vCPUs from + * servicing the request with a stale apicv_inhibit_reasons. + */ kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE); kvm->arch.apicv_inhibit_reasons = new; if (new) { @@ -9815,6 +9827,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) } for (;;) { + /* + * Assert that vCPU vs. VM APICv state is consistent. An APICv + * update must kick and wait for all vCPUs before toggling the + * per-VM state, and responsing vCPUs must wait for the update + * to complete before servicing KVM_REQ_APICV_UPDATE. + */ + WARN_ON_ONCE(kvm_apicv_activated(vcpu->kvm) != kvm_vcpu_apicv_active(vcpu)); + exit_fastpath = static_call(kvm_x86_run)(vcpu); if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) break; -- cgit v1.2.3 From 187c8833def8a191c7f01d7932eac1bd2ab84af1 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 21 Oct 2021 17:49:27 -0700 Subject: KVM: x86: Use rw_semaphore for APICv lock to allow vCPU parallelism Use a rw_semaphore instead of a mutex to coordinate APICv updates so that vCPUs responding to requests can take the lock for read and run in parallel. Using a mutex forces serialization of vCPUs even though kvm_vcpu_update_apicv() only touches data local to that vCPU or is protected by a different lock, e.g. SVM's ir_list_lock. Signed-off-by: Sean Christopherson Message-Id: <20211022004927.1448382-5-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/hyperv.c | 4 ++-- arch/x86/kvm/x86.c | 12 +++++++----- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d41699e89d1f..c8530ea136aa 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1071,7 +1071,7 @@ struct kvm_arch { atomic_t apic_map_dirty; /* Protects apic_access_memslot_enabled and apicv_inhibit_reasons */ - struct mutex apicv_update_lock; + struct rw_semaphore apicv_update_lock; bool apic_access_memslot_enabled; unsigned long apicv_inhibit_reasons; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 6f11cda2bfa4..4f15c0165c05 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -112,7 +112,7 @@ static void synic_update_vector(struct kvm_vcpu_hv_synic *synic, if (!!auto_eoi_old == !!auto_eoi_new) return; - mutex_lock(&vcpu->kvm->arch.apicv_update_lock); + down_write(&vcpu->kvm->arch.apicv_update_lock); if (auto_eoi_new) hv->synic_auto_eoi_used++; @@ -123,7 +123,7 @@ static void synic_update_vector(struct kvm_vcpu_hv_synic *synic, !hv->synic_auto_eoi_used, APICV_INHIBIT_REASON_HYPERV); - mutex_unlock(&vcpu->kvm->arch.apicv_update_lock); + up_write(&vcpu->kvm->arch.apicv_update_lock); } static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a09365ecdf01..0377e61b8fc0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8778,7 +8778,7 @@ EXPORT_SYMBOL_GPL(kvm_apicv_activated); static void kvm_apicv_init(struct kvm *kvm) { - mutex_init(&kvm->arch.apicv_update_lock); + init_rwsem(&kvm->arch.apicv_update_lock); if (enable_apicv) clear_bit(APICV_INHIBIT_REASON_DISABLE, @@ -9440,7 +9440,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) if (!lapic_in_kernel(vcpu)) return; - mutex_lock(&vcpu->kvm->arch.apicv_update_lock); + down_read(&vcpu->kvm->arch.apicv_update_lock); activate = kvm_apicv_activated(vcpu->kvm); if (vcpu->arch.apicv_active == activate) @@ -9460,7 +9460,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) kvm_make_request(KVM_REQ_EVENT, vcpu); out: - mutex_unlock(&vcpu->kvm->arch.apicv_update_lock); + up_read(&vcpu->kvm->arch.apicv_update_lock); } EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv); @@ -9468,6 +9468,8 @@ void __kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit) { unsigned long old, new; + lockdep_assert_held_write(&kvm->arch.apicv_update_lock); + if (!kvm_x86_ops.check_apicv_inhibit_reasons || !static_call(kvm_x86_check_apicv_inhibit_reasons)(bit)) return; @@ -9506,9 +9508,9 @@ EXPORT_SYMBOL_GPL(__kvm_request_apicv_update); void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit) { - mutex_lock(&kvm->arch.apicv_update_lock); + down_write(&kvm->arch.apicv_update_lock); __kvm_request_apicv_update(kvm, activate, bit); - mutex_unlock(&kvm->arch.apicv_update_lock); + up_write(&kvm->arch.apicv_update_lock); } EXPORT_SYMBOL_GPL(kvm_request_apicv_update); -- cgit v1.2.3 From 6ff53f6a438f72998f56e82e76694a1df9d1ea2c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 17:11:04 -0700 Subject: x86/irq: Ensure PI wakeup handler is unregistered before module unload Add a synchronize_rcu() after clearing the posted interrupt wakeup handler to ensure all readers, i.e. in-flight IRQ handlers, see the new handler before returning to the caller. If the caller is an exiting module and is unregistering its handler, failure to wait could result in the IRQ handler jumping into an unloaded module. The registration path doesn't require synchronization, as it's the caller's responsibility to not generate interrupts it cares about until after its handler is registered. Fixes: f6b3c72c2366 ("x86/irq: Define a global vector for VT-d Posted-Interrupts") Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson Message-Id: <20211009001107.3936588-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kernel/irq.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index e28f6a5d14f1..766ffe3ba313 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -291,8 +291,10 @@ void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)) { if (handler) kvm_posted_intr_wakeup_handler = handler; - else + else { kvm_posted_intr_wakeup_handler = dummy_handler; + synchronize_rcu(); + } } EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler); -- cgit v1.2.3 From ec5a4919fa7b7d8c7a2af1c7e799b1fe4be84343 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 17:11:05 -0700 Subject: KVM: VMX: Unregister posted interrupt wakeup handler on hardware unsetup Unregister KVM's posted interrupt wakeup handler during unsetup so that a spurious interrupt that arrives after kvm_intel.ko is unloaded doesn't call into freed memory. Fixes: bf9f6ac8d749 ("KVM: Update Posted-Interrupts Descriptor when vCPU is blocked") Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson Message-Id: <20211009001107.3936588-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 2f677e72d864..c11688f64e80 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7555,6 +7555,8 @@ static void vmx_migrate_timers(struct kvm_vcpu *vcpu) static void hardware_unsetup(void) { + kvm_set_posted_intr_wakeup_handler(NULL); + if (nested) nested_vmx_hardware_unsetup(); @@ -7885,8 +7887,6 @@ static __init int hardware_setup(void) vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit; } - kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler); - kvm_mce_cap_supported |= MCG_LMCE_P; if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST) @@ -7910,6 +7910,9 @@ static __init int hardware_setup(void) r = alloc_kvm_area(); if (r) nested_vmx_hardware_unsetup(); + + kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler); + return r; } -- cgit v1.2.3 From dfd3c713a9c87ade13c7bf618455a57f4d01e97b Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Thu, 21 Oct 2021 11:54:49 -0700 Subject: kvm: x86: Remove stale declaration of kvm_no_apic_vcpu This variable was renamed to kvm_has_noapic_vcpu in commit 6e4e3b4df4e3 ("KVM: Stop using deprecated jump label APIs"). Signed-off-by: Jim Mattson Message-Id: <20211021185449.3471763-1-jmattson@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 7d66d63dc55a..ea264c4502e4 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -343,8 +343,6 @@ extern bool enable_vmware_backdoor; extern int pi_inject_timer; -extern struct static_key kvm_no_apic_vcpu; - extern bool report_ignored_msrs; static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) -- cgit v1.2.3 From ed290e1c20da19fa100a3e0f421aa31b65984960 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Wed, 29 Sep 2021 17:36:49 -0700 Subject: KVM: selftests: Fix nested SVM tests when built with clang Though gcc conveniently compiles a simple memset to "rep stos," clang prefers to call the libc version of memset. If a test is dynamically linked, the libc memset isn't available in L1 (nor is the PLT or the GOT, for that matter). Even if the test is statically linked, the libc memset may choose to use some CPU features, like AVX, which may not be enabled in L1. Note that __builtin_memset doesn't solve the problem, because (a) the compiler is free to call memset anyway, and (b) __builtin_memset may also choose to use features like AVX, which may not be available in L1. To avoid a myriad of problems, use an explicit "rep stos" to clear the VMCB in generic_svm_setup(), which is called both from L0 and L1. Reported-by: Ricardo Koller Signed-off-by: Jim Mattson Fixes: 20ba262f8631a ("selftests: KVM: AMD Nested test infrastructure") Message-Id: <20210930003649.4026553-1-jmattson@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/lib/x86_64/svm.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/lib/x86_64/svm.c b/tools/testing/selftests/kvm/lib/x86_64/svm.c index 2ac98d70d02b..161eba7cd128 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/svm.c +++ b/tools/testing/selftests/kvm/lib/x86_64/svm.c @@ -54,6 +54,18 @@ static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector, seg->base = base; } +/* + * Avoid using memset to clear the vmcb, since libc may not be + * available in L1 (and, even if it is, features that libc memset may + * want to use, like AVX, may not be enabled). + */ +static void clear_vmcb(struct vmcb *vmcb) +{ + int n = sizeof(*vmcb) / sizeof(u32); + + asm volatile ("rep stosl" : "+c"(n), "+D"(vmcb) : "a"(0) : "memory"); +} + void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp) { struct vmcb *vmcb = svm->vmcb; @@ -70,7 +82,7 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r wrmsr(MSR_EFER, efer | EFER_SVME); wrmsr(MSR_VM_HSAVE_PA, svm->save_area_gpa); - memset(vmcb, 0, sizeof(*vmcb)); + clear_vmcb(vmcb); asm volatile ("vmsave %0\n\t" : : "a" (vmcb_gpa) : "memory"); vmcb_set_seg(&save->es, get_es(), 0, -1U, data_seg_attr); vmcb_set_seg(&save->cs, get_cs(), 0, -1U, code_seg_attr); -- cgit v1.2.3 From 2d8fb8f3914b40e3cc12f8cbb74daefd5245349d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 9 Sep 2021 18:22:40 +0200 Subject: s390/gmap: validate VMA in __gmap_zap() We should not walk/touch page tables outside of VMA boundaries when holding only the mmap sem in read mode. Evil user space can modify the VMA layout just before this function runs and e.g., trigger races with page table removal code since commit dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap"). The pure prescence in our guest_to_host radix tree does not imply that there is a VMA. Further, we should not allocate page tables (via get_locked_pte()) outside of VMA boundaries: if evil user space decides to map hugetlbfs to these ranges, bad things will happen because we suddenly have PTE or PMD page tables where we shouldn't have them. Similarly, we have to check if we suddenly find a hugetlbfs VMA, before calling get_locked_pte(). Note that gmap_discard() is different: zap_page_range()->unmap_single_vma() makes sure to stay within VMA boundaries. Fixes: b31288fa83b2 ("s390/kvm: support collaborative memory management") Signed-off-by: David Hildenbrand Reviewed-by: Claudio Imbrenda Acked-by: Heiko Carstens Link: https://lore.kernel.org/r/20210909162248.14969-2-david@redhat.com Signed-off-by: Christian Borntraeger --- arch/s390/mm/gmap.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 4d3b33ce81c6..e0735c343775 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -672,6 +672,7 @@ EXPORT_SYMBOL_GPL(gmap_fault); */ void __gmap_zap(struct gmap *gmap, unsigned long gaddr) { + struct vm_area_struct *vma; unsigned long vmaddr; spinlock_t *ptl; pte_t *ptep; @@ -681,6 +682,11 @@ void __gmap_zap(struct gmap *gmap, unsigned long gaddr) gaddr >> PMD_SHIFT); if (vmaddr) { vmaddr |= gaddr & ~PMD_MASK; + + vma = vma_lookup(gmap->mm, vmaddr); + if (!vma || is_vm_hugetlb_page(vma)) + return; + /* Get pointer to the page table entry */ ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); if (likely(ptep)) -- cgit v1.2.3 From b159f94c86b43cf7e73e654bc527255b1f4eafc4 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 9 Sep 2021 18:22:41 +0200 Subject: s390/gmap: don't unconditionally call pte_unmap_unlock() in __gmap_zap() ... otherwise we will try unlocking a spinlock that was never locked via a garbage pointer. At the time we reach this code path, we usually successfully looked up a PGSTE already; however, evil user space could have manipulated the VMA layout in the meantime and triggered removal of the page table. Fixes: 1e133ab296f3 ("s390/mm: split arch/s390/mm/pgtable.c") Signed-off-by: David Hildenbrand Reviewed-by: Claudio Imbrenda Acked-by: Heiko Carstens Link: https://lore.kernel.org/r/20210909162248.14969-3-david@redhat.com Signed-off-by: Christian Borntraeger --- arch/s390/mm/gmap.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index e0735c343775..d63c0ccc5ccd 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -689,9 +689,10 @@ void __gmap_zap(struct gmap *gmap, unsigned long gaddr) /* Get pointer to the page table entry */ ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); - if (likely(ptep)) + if (likely(ptep)) { ptep_zap_unused(gmap->mm, vmaddr, ptep, 0); - pte_unmap_unlock(ptep, ptl); + pte_unmap_unlock(ptep, ptl); + } } } EXPORT_SYMBOL_GPL(__gmap_zap); -- cgit v1.2.3 From fe3d10024073f06f04c74b9674bd71ccc1d787cf Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 9 Sep 2021 18:22:42 +0200 Subject: s390/mm: validate VMA in PGSTE manipulation functions We should not walk/touch page tables outside of VMA boundaries when holding only the mmap sem in read mode. Evil user space can modify the VMA layout just before this function runs and e.g., trigger races with page table removal code since commit dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap"). gfn_to_hva() will only translate using KVM memory regions, but won't validate the VMA. Further, we should not allocate page tables outside of VMA boundaries: if evil user space decides to map hugetlbfs to these ranges, bad things will happen because we suddenly have PTE or PMD page tables where we shouldn't have them. Similarly, we have to check if we suddenly find a hugetlbfs VMA, before calling get_locked_pte(). Fixes: 2d42f9477320 ("s390/kvm: Add PGSTE manipulation functions") Signed-off-by: David Hildenbrand Reviewed-by: Claudio Imbrenda Acked-by: Heiko Carstens Link: https://lore.kernel.org/r/20210909162248.14969-4-david@redhat.com Signed-off-by: Christian Borntraeger --- arch/s390/mm/pgtable.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 034721a68d8f..2717a406edeb 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -988,6 +988,7 @@ EXPORT_SYMBOL(get_guest_storage_key); int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, unsigned long *oldpte, unsigned long *oldpgste) { + struct vm_area_struct *vma; unsigned long pgstev; spinlock_t *ptl; pgste_t pgste; @@ -997,6 +998,10 @@ int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, WARN_ON_ONCE(orc > ESSA_MAX); if (unlikely(orc > ESSA_MAX)) return -EINVAL; + + vma = vma_lookup(mm, hva); + if (!vma || is_vm_hugetlb_page(vma)) + return -EFAULT; ptep = get_locked_pte(mm, hva, &ptl); if (unlikely(!ptep)) return -EFAULT; @@ -1089,10 +1094,14 @@ EXPORT_SYMBOL(pgste_perform_essa); int set_pgste_bits(struct mm_struct *mm, unsigned long hva, unsigned long bits, unsigned long value) { + struct vm_area_struct *vma; spinlock_t *ptl; pgste_t new; pte_t *ptep; + vma = vma_lookup(mm, hva); + if (!vma || is_vm_hugetlb_page(vma)) + return -EFAULT; ptep = get_locked_pte(mm, hva, &ptl); if (unlikely(!ptep)) return -EFAULT; @@ -1117,9 +1126,13 @@ EXPORT_SYMBOL(set_pgste_bits); */ int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep) { + struct vm_area_struct *vma; spinlock_t *ptl; pte_t *ptep; + vma = vma_lookup(mm, hva); + if (!vma || is_vm_hugetlb_page(vma)) + return -EFAULT; ptep = get_locked_pte(mm, hva, &ptl); if (unlikely(!ptep)) return -EFAULT; -- cgit v1.2.3 From 949f5c1244ee6c36d2e81c588d1200eaa83a3df6 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 9 Sep 2021 18:22:43 +0200 Subject: s390/mm: fix VMA and page table handling code in storage key handling functions There are multiple things broken about our storage key handling functions: 1. We should not walk/touch page tables outside of VMA boundaries when holding only the mmap sem in read mode. Evil user space can modify the VMA layout just before this function runs and e.g., trigger races with page table removal code since commit dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap"). gfn_to_hva() will only translate using KVM memory regions, but won't validate the VMA. 2. We should not allocate page tables outside of VMA boundaries: if evil user space decides to map hugetlbfs to these ranges, bad things will happen because we suddenly have PTE or PMD page tables where we shouldn't have them. 3. We don't handle large PUDs that might suddenly appeared inside our page table hierarchy. Don't manually allocate page tables, properly validate that we have VMA and bail out on pud_large(). All callers of page table handling functions, except get_guest_storage_key(), call fixup_user_fault() in case they receive an -EFAULT and retry; this will allocate the necessary page tables if required. To keep get_guest_storage_key() working as expected and not requiring kvm_s390_get_skeys() to call fixup_user_fault() distinguish between "there is simply no page table or huge page yet and the key is assumed to be 0" and "this is a fault to be reported". Although commit 637ff9efe5ea ("s390/mm: Add huge pmd storage key handling") introduced most of the affected code, it was actually already broken before when using get_locked_pte() without any VMA checks. Note: Ever since commit 637ff9efe5ea ("s390/mm: Add huge pmd storage key handling") we can no longer set a guest storage key (for example from QEMU during VM live migration) without actually resolving a fault. Although we would have created most page tables, we would choke on the !pmd_present(), requiring a call to fixup_user_fault(). I would have thought that this is problematic in combination with postcopy life migration ... but nobody noticed and this patch doesn't change the situation. So maybe it's just fine. Fixes: 9fcf93b5de06 ("KVM: S390: Create helper function get_guest_storage_key") Fixes: 24d5dd0208ed ("s390/kvm: Provide function for setting the guest storage key") Fixes: a7e19ab55ffd ("KVM: s390: handle missing storage-key facility") Signed-off-by: David Hildenbrand Reviewed-by: Claudio Imbrenda Acked-by: Heiko Carstens Link: https://lore.kernel.org/r/20210909162248.14969-5-david@redhat.com Signed-off-by: Christian Borntraeger --- arch/s390/mm/pgtable.c | 57 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 39 insertions(+), 18 deletions(-) diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 2717a406edeb..6ad634a27d5b 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -429,22 +429,36 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm, } #ifdef CONFIG_PGSTE -static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr) +static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp) { + struct vm_area_struct *vma; pgd_t *pgd; p4d_t *p4d; pud_t *pud; - pmd_t *pmd; + + /* We need a valid VMA, otherwise this is clearly a fault. */ + vma = vma_lookup(mm, addr); + if (!vma) + return -EFAULT; pgd = pgd_offset(mm, addr); - p4d = p4d_alloc(mm, pgd, addr); - if (!p4d) - return NULL; - pud = pud_alloc(mm, p4d, addr); - if (!pud) - return NULL; - pmd = pmd_alloc(mm, pud, addr); - return pmd; + if (!pgd_present(*pgd)) + return -ENOENT; + + p4d = p4d_offset(pgd, addr); + if (!p4d_present(*p4d)) + return -ENOENT; + + pud = pud_offset(p4d, addr); + if (!pud_present(*pud)) + return -ENOENT; + + /* Large PUDs are not supported yet. */ + if (pud_large(*pud)) + return -EFAULT; + + *pmdp = pmd_offset(pud, addr); + return 0; } #endif @@ -778,8 +792,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp; pte_t *ptep; - pmdp = pmd_alloc_map(mm, addr); - if (unlikely(!pmdp)) + if (pmd_lookup(mm, addr, &pmdp)) return -EFAULT; ptl = pmd_lock(mm, pmdp); @@ -881,8 +894,7 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr) pte_t *ptep; int cc = 0; - pmdp = pmd_alloc_map(mm, addr); - if (unlikely(!pmdp)) + if (pmd_lookup(mm, addr, &pmdp)) return -EFAULT; ptl = pmd_lock(mm, pmdp); @@ -935,15 +947,24 @@ int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp; pte_t *ptep; - pmdp = pmd_alloc_map(mm, addr); - if (unlikely(!pmdp)) + /* + * If we don't have a PTE table and if there is no huge page mapped, + * the storage key is 0. + */ + *key = 0; + + switch (pmd_lookup(mm, addr, &pmdp)) { + case -ENOENT: + return 0; + case 0: + break; + default: return -EFAULT; + } ptl = pmd_lock(mm, pmdp); if (!pmd_present(*pmdp)) { - /* Not yet mapped memory has a zero key */ spin_unlock(ptl); - *key = 0; return 0; } -- cgit v1.2.3 From 46c22ffd2772201662350bc7b94b9ea9d3ee5ac2 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 9 Sep 2021 18:22:44 +0200 Subject: s390/uv: fully validate the VMA before calling follow_page() We should not walk/touch page tables outside of VMA boundaries when holding only the mmap sem in read mode. Evil user space can modify the VMA layout just before this function runs and e.g., trigger races with page table removal code since commit dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap"). find_vma() does not check if the address is >= the VMA start address; use vma_lookup() instead. Fixes: 214d9bbcd3a6 ("s390/mm: provide memory management functions for protected KVM guests") Signed-off-by: David Hildenbrand Reviewed-by: Claudio Imbrenda Acked-by: Heiko Carstens Reviewed-by: Liam R. Howlett Link: https://lore.kernel.org/r/20210909162248.14969-6-david@redhat.com Signed-off-by: Christian Borntraeger --- arch/s390/kernel/uv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index 5a656c7b7a67..f95ccbd39692 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -212,7 +212,7 @@ again: uaddr = __gmap_translate(gmap, gaddr); if (IS_ERR_VALUE(uaddr)) goto out; - vma = find_vma(gmap->mm, uaddr); + vma = vma_lookup(gmap->mm, uaddr); if (!vma) goto out; /* -- cgit v1.2.3 From 8318c404cf8c2b5141746af739fdb62c030508ca Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 9 Sep 2021 18:22:46 +0200 Subject: s390/mm: no need for pte_alloc_map_lock() if we know the pmd is present pte_map_lock() is sufficient. Signed-off-by: David Hildenbrand Acked-by: Heiko Carstens Link: https://lore.kernel.org/r/20210909162248.14969-8-david@redhat.com Signed-off-by: Christian Borntraeger --- arch/s390/mm/pgtable.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 6ad634a27d5b..e74cc59dcd67 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -814,10 +814,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, } spin_unlock(ptl); - ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl); - if (unlikely(!ptep)) - return -EFAULT; - + ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); new = old = pgste_get_lock(ptep); pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | PGSTE_ACC_BITS | PGSTE_FP_BIT); @@ -912,10 +909,7 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr) } spin_unlock(ptl); - ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl); - if (unlikely(!ptep)) - return -EFAULT; - + ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); new = old = pgste_get_lock(ptep); /* Reset guest reference bit only */ pgste_val(new) &= ~PGSTE_GR_BIT; @@ -977,10 +971,7 @@ int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, } spin_unlock(ptl); - ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl); - if (unlikely(!ptep)) - return -EFAULT; - + ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); pgste = pgste_get_lock(ptep); *key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; paddr = pte_val(*ptep) & PAGE_MASK; -- cgit v1.2.3 From 7cb70266b0e3996daf4bbef9758b6e27e9ec904d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 9 Sep 2021 18:22:47 +0200 Subject: s390/mm: optimize set_guest_storage_key() We already optimize get_guest_storage_key() to assume that if we don't have a PTE table and don't have a huge page mapped that the storage key is 0. Similarly, optimize set_guest_storage_key() to simply do nothing in case the key to set is 0. Signed-off-by: David Hildenbrand Reviewed-by: Claudio Imbrenda Acked-by: Heiko Carstens Link: https://lore.kernel.org/r/20210909162248.14969-9-david@redhat.com Signed-off-by: Christian Borntraeger --- arch/s390/mm/pgtable.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index e74cc59dcd67..1c9aeb361f8d 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -792,13 +792,23 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp; pte_t *ptep; - if (pmd_lookup(mm, addr, &pmdp)) + /* + * If we don't have a PTE table and if there is no huge page mapped, + * we can ignore attempts to set the key to 0, because it already is 0. + */ + switch (pmd_lookup(mm, addr, &pmdp)) { + case -ENOENT: + return key ? -EFAULT : 0; + case 0: + break; + default: return -EFAULT; + } ptl = pmd_lock(mm, pmdp); if (!pmd_present(*pmdp)) { spin_unlock(ptl); - return -EFAULT; + return key ? -EFAULT : 0; } if (pmd_large(*pmdp)) { -- cgit v1.2.3 From 14ea40e22c4193ed71cd93ec79c0c05216c3600a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 9 Sep 2021 18:22:48 +0200 Subject: s390/mm: optimize reset_guest_reference_bit() We already optimize get_guest_storage_key() to assume that if we don't have a PTE table and don't have a huge page mapped that the storage key is 0. Similarly, optimize reset_guest_reference_bit() to simply do nothing if there is no PTE table and no huge page mapped. Signed-off-by: David Hildenbrand Reviewed-by: Claudio Imbrenda Acked-by: Heiko Carstens Link: https://lore.kernel.org/r/20210909162248.14969-10-david@redhat.com Signed-off-by: Christian Borntraeger --- arch/s390/mm/pgtable.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 1c9aeb361f8d..c16232cd0ec5 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -901,13 +901,23 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr) pte_t *ptep; int cc = 0; - if (pmd_lookup(mm, addr, &pmdp)) + /* + * If we don't have a PTE table and if there is no huge page mapped, + * the storage key is 0 and there is nothing for us to do. + */ + switch (pmd_lookup(mm, addr, &pmdp)) { + case -ENOENT: + return 0; + case 0: + break; + default: return -EFAULT; + } ptl = pmd_lock(mm, pmdp); if (!pmd_present(*pmdp)) { spin_unlock(ptl); - return -EFAULT; + return 0; } if (pmd_large(*pmdp)) { -- cgit v1.2.3 From 57c5df13eca4017ed28f9375dc1d246ec0f54217 Mon Sep 17 00:00:00 2001 From: Claudio Imbrenda Date: Mon, 20 Sep 2021 15:24:49 +0200 Subject: KVM: s390: pv: add macros for UVC CC values Add macros to describe the 4 possible CC values returned by the UVC instruction. Signed-off-by: Claudio Imbrenda Reviewed-by: Christian Borntraeger Reviewed-by: Janosch Frank Message-Id: <20210920132502.36111-2-imbrenda@linux.ibm.com> Signed-off-by: Janosch Frank Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/uv.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h index fe92a4caf5ec..9ab1914c5b95 100644 --- a/arch/s390/include/asm/uv.h +++ b/arch/s390/include/asm/uv.h @@ -18,6 +18,11 @@ #include #include +#define UVC_CC_OK 0 +#define UVC_CC_ERROR 1 +#define UVC_CC_BUSY 2 +#define UVC_CC_PARTIAL 3 + #define UVC_RC_EXECUTED 0x0001 #define UVC_RC_INV_CMD 0x0002 #define UVC_RC_INV_STATE 0x0003 -- cgit v1.2.3 From d4074324b07a94a1fca476d452dfbb3a4e7bf656 Mon Sep 17 00:00:00 2001 From: Claudio Imbrenda Date: Mon, 20 Sep 2021 15:24:50 +0200 Subject: KVM: s390: pv: avoid double free of sida page If kvm_s390_pv_destroy_cpu is called more than once, we risk calling free_page on a random page, since the sidad field is aliased with the gbea, which is not guaranteed to be zero. This can happen, for example, if userspace calls the KVM_PV_DISABLE IOCTL, and it fails, and then userspace calls the same IOCTL again. This scenario is only possible if KVM has some serious bug or if the hardware is broken. The solution is to simply return successfully immediately if the vCPU was already non secure. Signed-off-by: Claudio Imbrenda Fixes: 19e1227768863a1469797c13ef8fea1af7beac2c ("KVM: S390: protvirt: Introduce instruction data area bounce buffer") Reviewed-by: Janosch Frank Reviewed-by: Christian Borntraeger Message-Id: <20210920132502.36111-3-imbrenda@linux.ibm.com> Signed-off-by: Janosch Frank Signed-off-by: Christian Borntraeger --- arch/s390/kvm/pv.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index c8841f476e91..0a854115100b 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -16,18 +16,17 @@ int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) { - int cc = 0; + int cc; - if (kvm_s390_pv_cpu_get_handle(vcpu)) { - cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), - UVC_CMD_DESTROY_SEC_CPU, rc, rrc); + if (!kvm_s390_pv_cpu_get_handle(vcpu)) + return 0; + + cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc); + + KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x", + vcpu->vcpu_id, *rc, *rrc); + WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc); - KVM_UV_EVENT(vcpu->kvm, 3, - "PROTVIRT DESTROY VCPU %d: rc %x rrc %x", - vcpu->vcpu_id, *rc, *rrc); - WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", - *rc, *rrc); - } /* Intended memory leak for something that should never happen. */ if (!cc) free_pages(vcpu->arch.pv.stor_base, -- cgit v1.2.3 From 1e2aa46de526a5adafe580bca4c25856bb06f09e Mon Sep 17 00:00:00 2001 From: Claudio Imbrenda Date: Mon, 20 Sep 2021 15:24:51 +0200 Subject: KVM: s390: pv: avoid stalls for kvm_s390_pv_init_vm When the system is heavily overcommitted, kvm_s390_pv_init_vm might generate stall notifications. Fix this by using uv_call_sched instead of just uv_call. This is ok because we are not holding spinlocks. Signed-off-by: Claudio Imbrenda Fixes: 214d9bbcd3a672 ("s390/mm: provide memory management functions for protected KVM guests") Reviewed-by: Christian Borntraeger Reviewed-by: Janosch Frank Message-Id: <20210920132502.36111-4-imbrenda@linux.ibm.com> Signed-off-by: Janosch Frank Signed-off-by: Christian Borntraeger --- arch/s390/kvm/pv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index 0a854115100b..00d272d134c2 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -195,7 +195,7 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) uvcb.conf_base_stor_origin = (u64)kvm->arch.pv.stor_base; uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var; - cc = uv_call(0, (u64)&uvcb); + cc = uv_call_sched(0, (u64)&uvcb); *rc = uvcb.header.rc; *rrc = uvcb.header.rrc; KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x", -- cgit v1.2.3 From f0a1a0615a6ff6d38af2c65a522698fb4bb85df6 Mon Sep 17 00:00:00 2001 From: Claudio Imbrenda Date: Mon, 20 Sep 2021 15:24:52 +0200 Subject: KVM: s390: pv: avoid stalls when making pages secure Improve make_secure_pte to avoid stalls when the system is heavily overcommitted. This was especially problematic in kvm_s390_pv_unpack, because of the loop over all pages that needed unpacking. Due to the locks being held, it was not possible to simply replace uv_call with uv_call_sched. A more complex approach was needed, in which uv_call is replaced with __uv_call, which does not loop. When the UVC needs to be executed again, -EAGAIN is returned, and the caller (or its caller) will try again. When -EAGAIN is returned, the path is the same as when the page is in writeback (and the writeback check is also performed, which is harmless). Fixes: 214d9bbcd3a672 ("s390/mm: provide memory management functions for protected KVM guests") Signed-off-by: Claudio Imbrenda Reviewed-by: Janosch Frank Reviewed-by: Christian Borntraeger Link: https://lore.kernel.org/r/20210920132502.36111-5-imbrenda@linux.ibm.com Signed-off-by: Christian Borntraeger --- arch/s390/kernel/uv.c | 29 +++++++++++++++++++++++------ arch/s390/kvm/intercept.c | 5 +++++ 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index f95ccbd39692..09b80d371409 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -165,7 +165,7 @@ static int make_secure_pte(pte_t *ptep, unsigned long addr, { pte_t entry = READ_ONCE(*ptep); struct page *page; - int expected, rc = 0; + int expected, cc = 0; if (!pte_present(entry)) return -ENXIO; @@ -181,12 +181,25 @@ static int make_secure_pte(pte_t *ptep, unsigned long addr, if (!page_ref_freeze(page, expected)) return -EBUSY; set_bit(PG_arch_1, &page->flags); - rc = uv_call(0, (u64)uvcb); + /* + * If the UVC does not succeed or fail immediately, we don't want to + * loop for long, or we might get stall notifications. + * On the other hand, this is a complex scenario and we are holding a lot of + * locks, so we can't easily sleep and reschedule. We try only once, + * and if the UVC returned busy or partial completion, we return + * -EAGAIN and we let the callers deal with it. + */ + cc = __uv_call(0, (u64)uvcb); page_ref_unfreeze(page, expected); - /* Return -ENXIO if the page was not mapped, -EINVAL otherwise */ - if (rc) - rc = uvcb->rc == 0x10a ? -ENXIO : -EINVAL; - return rc; + /* + * Return -ENXIO if the page was not mapped, -EINVAL for other errors. + * If busy or partially completed, return -EAGAIN. + */ + if (cc == UVC_CC_OK) + return 0; + else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL) + return -EAGAIN; + return uvcb->rc == 0x10a ? -ENXIO : -EINVAL; } /* @@ -239,6 +252,10 @@ out: mmap_read_unlock(gmap->mm); if (rc == -EAGAIN) { + /* + * If we are here because the UVC returned busy or partial + * completion, this is just a useless check, but it is safe. + */ wait_on_page_writeback(page); } else if (rc == -EBUSY) { /* diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 2bd8f854f1b4..d07ff646d844 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -518,6 +518,11 @@ static int handle_pv_uvc(struct kvm_vcpu *vcpu) */ if (rc == -EINVAL) return 0; + /* + * If we got -EAGAIN here, we simply return it. It will eventually + * get propagated all the way to userspace, which should then try + * again. + */ return rc; } -- cgit v1.2.3 From 8eeba194a32e0f50329354a696baaa2e3d9accc5 Mon Sep 17 00:00:00 2001 From: Eric Farman Date: Fri, 8 Oct 2021 22:31:07 +0200 Subject: KVM: s390: Simplify SIGP Set Arch handling The Principles of Operations describe the various reasons that each individual SIGP orders might be rejected, and the status bit that are set for each condition. For example, for the Set Architecture order, it states: "If it is not true that all other CPUs in the configu- ration are in the stopped or check-stop state, ... bit 54 (incorrect state) ... is set to one." However, it also states: "... if the CZAM facility is installed, ... bit 55 (invalid parameter) ... is set to one." Since the Configuration-z/Architecture-Architectural Mode (CZAM) facility is unconditionally presented, there is no need to examine each VCPU to determine if it is started/stopped. It can simply be rejected outright with the Invalid Parameter bit. Fixes: b697e435aeee ("KVM: s390: Support Configuration z/Architecture Mode") Signed-off-by: Eric Farman Reviewed-by: Thomas Huth Reviewed-by: Claudio Imbrenda Reviewed-by: David Hildenbrand Reviewed-by: Christian Borntraeger Link: https://lore.kernel.org/r/20211008203112.1979843-2-farman@linux.ibm.com Signed-off-by: Christian Borntraeger --- arch/s390/kvm/sigp.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 683036c1c92a..cf4de80bd541 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -151,22 +151,10 @@ static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter, u64 *status_reg) { - unsigned int i; - struct kvm_vcpu *v; - bool all_stopped = true; - - kvm_for_each_vcpu(i, v, vcpu->kvm) { - if (v == vcpu) - continue; - if (!is_vcpu_stopped(v)) - all_stopped = false; - } - *status_reg &= 0xffffffff00000000UL; /* Reject set arch order, with czam we're always in z/Arch mode. */ - *status_reg |= (all_stopped ? SIGP_STATUS_INVALID_PARAMETER : - SIGP_STATUS_INCORRECT_STATE); + *status_reg |= SIGP_STATUS_INVALID_PARAMETER; return SIGP_CC_STATUS_STORED; } -- cgit v1.2.3 From 67cf68b6a5ccac8bc7dfef0a220b59af4c83fd2c Mon Sep 17 00:00:00 2001 From: Eric Farman Date: Fri, 8 Oct 2021 22:31:12 +0200 Subject: KVM: s390: Add a routine for setting userspace CPU state This capability exists, but we don't record anything when userspace enables it. Let's refactor that code so that a note can be made in the debug logs that it was enabled. Signed-off-by: Eric Farman Reviewed-by: Thomas Huth Reviewed-by: Claudio Imbrenda Reviewed-by: David Hildenbrand Link: https://lore.kernel.org/r/20211008203112.1979843-7-farman@linux.ibm.com Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 6 +++--- arch/s390/kvm/kvm-s390.h | 9 +++++++++ 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 1c97493d21e1..6482ea9139bb 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2487,8 +2487,8 @@ long kvm_arch_vm_ioctl(struct file *filp, case KVM_S390_PV_COMMAND: { struct kvm_pv_cmd args; - /* protvirt means user sigp */ - kvm->arch.user_cpu_state_ctrl = 1; + /* protvirt means user cpu state */ + kvm_s390_set_user_cpu_state_ctrl(kvm); r = 0; if (!is_prot_virt_host()) { r = -EINVAL; @@ -3802,7 +3802,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, vcpu_load(vcpu); /* user space knows about this interface - let it control the state */ - vcpu->kvm->arch.user_cpu_state_ctrl = 1; + kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm); switch (mp_state->mp_state) { case KVM_MP_STATE_STOPPED: diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 52bc8fbaa60a..c07a050d757d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -208,6 +208,15 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) return kvm->arch.user_cpu_state_ctrl != 0; } +static inline void kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm) +{ + if (kvm->arch.user_cpu_state_ctrl) + return; + + VM_EVENT(kvm, 3, "%s", "ENABLE: Userspace CPU state control"); + kvm->arch.user_cpu_state_ctrl = 1; +} + /* implemented in pv.c */ int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc); int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc); -- cgit v1.2.3 From a9d496d8e08ca1eb43d14cb734839b24ab0e8083 Mon Sep 17 00:00:00 2001 From: David Edmondson Date: Mon, 20 Sep 2021 11:37:34 +0100 Subject: KVM: x86: Clarify the kvm_run.emulation_failure structure layout Until more flags for kvm_run.emulation_failure flags are defined, it is undetermined whether new payload elements corresponding to those flags will be additive or alternative. As a hint to userspace that an alternative is possible, wrap the current payload elements in a union. Suggested-by: Sean Christopherson Signed-off-by: David Edmondson Reviewed-by: Sean Christopherson Message-Id: <20210920103737.2696756-2-david.edmondson@oracle.com> Signed-off-by: Paolo Bonzini --- include/uapi/linux/kvm.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 5ca5ffe16cb4..2c8aa8d4dac1 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -403,8 +403,12 @@ struct kvm_run { __u32 suberror; __u32 ndata; __u64 flags; - __u8 insn_size; - __u8 insn_bytes[15]; + union { + struct { + __u8 insn_size; + __u8 insn_bytes[15]; + }; + }; } emulation_failure; /* KVM_EXIT_OSI */ struct { -- cgit v1.2.3 From 0a62a0319abb92c89a4f91c2dbfcaee4e47f37ca Mon Sep 17 00:00:00 2001 From: David Edmondson Date: Mon, 20 Sep 2021 11:37:35 +0100 Subject: KVM: x86: Get exit_reason as part of kvm_x86_ops.get_exit_info Extend the get_exit_info static call to provide the reason for the VM exit. Modify relevant trace points to use this rather than extracting the reason in the caller. Signed-off-by: David Edmondson Reviewed-by: Sean Christopherson Message-Id: <20210920103737.2696756-3-david.edmondson@oracle.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 7 ++++--- arch/x86/kvm/svm/svm.c | 8 +++++--- arch/x86/kvm/trace.h | 9 +++++---- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/vmx/vmx.c | 6 ++++-- 5 files changed, 19 insertions(+), 13 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c8530ea136aa..0b8a9ea7a47b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1412,10 +1412,11 @@ struct kvm_x86_ops { void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu, u64 multiplier); /* - * Retrieve somewhat arbitrary exit information. Intended to be used - * only from within tracepoints to avoid VMREADs when tracing is off. + * Retrieve somewhat arbitrary exit information. Intended to + * be used only from within tracepoints or error paths. */ - void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2, + void (*get_exit_info)(struct kvm_vcpu *vcpu, u32 *reason, + u64 *info1, u64 *info2, u32 *exit_int_info, u32 *exit_int_info_err_code); int (*check_intercept)(struct kvm_vcpu *vcpu, diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index a2a4e9b42750..21bb81710e0f 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3362,11 +3362,13 @@ int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code) return svm_exit_handlers[exit_code](vcpu); } -static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2, +static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, + u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code) { struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; + *reason = control->exit_code; *info1 = control->exit_info_1; *info2 = control->exit_info_2; *intr_info = control->exit_int_info; @@ -3383,7 +3385,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) struct kvm_run *kvm_run = vcpu->run; u32 exit_code = svm->vmcb->control.exit_code; - trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); + trace_kvm_exit(vcpu, KVM_ISA_SVM); /* SEV-ES guests must use the CR write traps to track CR registers. */ if (!sev_es_guest(vcpu->kvm)) { @@ -3396,7 +3398,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) if (is_guest_mode(vcpu)) { int vmexit; - trace_kvm_nested_vmexit(exit_code, vcpu, KVM_ISA_SVM); + trace_kvm_nested_vmexit(vcpu, KVM_ISA_SVM); vmexit = nested_svm_exit_special(svm); diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 03ebe368333e..953b0fcb21ee 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -288,8 +288,8 @@ TRACE_EVENT(kvm_apic, #define TRACE_EVENT_KVM_EXIT(name) \ TRACE_EVENT(name, \ - TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa), \ - TP_ARGS(exit_reason, vcpu, isa), \ + TP_PROTO(struct kvm_vcpu *vcpu, u32 isa), \ + TP_ARGS(vcpu, isa), \ \ TP_STRUCT__entry( \ __field( unsigned int, exit_reason ) \ @@ -303,11 +303,12 @@ TRACE_EVENT(name, \ ), \ \ TP_fast_assign( \ - __entry->exit_reason = exit_reason; \ __entry->guest_rip = kvm_rip_read(vcpu); \ __entry->isa = isa; \ __entry->vcpu_id = vcpu->vcpu_id; \ - static_call(kvm_x86_get_exit_info)(vcpu, &__entry->info1, \ + static_call(kvm_x86_get_exit_info)(vcpu, \ + &__entry->exit_reason, \ + &__entry->info1, \ &__entry->info2, \ &__entry->intr_info, \ &__entry->error_code); \ diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index af1bbb73430a..b4ee5e9f9e20 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6066,7 +6066,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) goto reflect_vmexit; } - trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX); + trace_kvm_nested_vmexit(vcpu, KVM_ISA_VMX); /* If L0 (KVM) wants the exit, it trumps L1's desires. */ if (nested_vmx_l0_wants_exit(vcpu, exit_reason)) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index c11688f64e80..85ce11dac8fd 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5658,11 +5658,13 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { static const int kvm_vmx_max_exit_handlers = ARRAY_SIZE(kvm_vmx_exit_handlers); -static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2, +static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, + u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code) { struct vcpu_vmx *vmx = to_vmx(vcpu); + *reason = vmx->exit_reason.full; *info1 = vmx_get_exit_qual(vcpu); if (!(vmx->exit_reason.failed_vmentry)) { *info2 = vmx->idt_vectoring_info; @@ -6814,7 +6816,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) if (likely(!vmx->exit_reason.failed_vmentry)) vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); - trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX); + trace_kvm_exit(vcpu, KVM_ISA_VMX); if (unlikely(vmx->exit_reason.failed_vmentry)) return EXIT_FASTPATH_NONE; -- cgit v1.2.3 From e615e355894e619785af81479ad6f5a05a8a2e3f Mon Sep 17 00:00:00 2001 From: David Edmondson Date: Mon, 20 Sep 2021 11:37:36 +0100 Subject: KVM: x86: On emulation failure, convey the exit reason, etc. to userspace Should instruction emulation fail, include the VM exit reason, etc. in the emulation_failure data passed to userspace, in order that the VMM can report it as a debugging aid when describing the failure. Suggested-by: Joao Martins Signed-off-by: David Edmondson Reviewed-by: Sean Christopherson Message-Id: <20210920103737.2696756-4-david.edmondson@oracle.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 3 ++ arch/x86/kvm/vmx/vmx.c | 5 +-- arch/x86/kvm/x86.c | 73 +++++++++++++++++++++++++++++++++-------- include/uapi/linux/kvm.h | 6 ++++ 4 files changed, 69 insertions(+), 18 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0b8a9ea7a47b..88fce6ab4bbd 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1667,6 +1667,9 @@ extern u64 kvm_mce_cap_supported; int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type); int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, void *insn, int insn_len); +void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, + u64 *data, u8 ndata); +void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu); void kvm_enable_efer_bits(u64); bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 85ce11dac8fd..71f54d85f104 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5408,10 +5408,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) if (vmx->emulation_required && !vmx->rmode.vm86_active && vcpu->arch.exception.pending) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = - KVM_INTERNAL_ERROR_EMULATION; - vcpu->run->internal.ndata = 0; + kvm_prepare_emulation_failure_exit(vcpu); return 0; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0377e61b8fc0..ac83d873d65b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7664,29 +7664,78 @@ void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) } EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); -static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) +static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, + u8 ndata, u8 *insn_bytes, u8 insn_size) { - struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; - u32 insn_size = ctxt->fetch.end - ctxt->fetch.data; struct kvm_run *run = vcpu->run; + u64 info[5]; + u8 info_start; + + /* + * Zero the whole array used to retrieve the exit info, as casting to + * u32 for select entries will leave some chunks uninitialized. + */ + memset(&info, 0, sizeof(info)); + + static_call(kvm_x86_get_exit_info)(vcpu, (u32 *)&info[0], &info[1], + &info[2], (u32 *)&info[3], + (u32 *)&info[4]); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION; - run->emulation_failure.ndata = 0; + + /* + * There's currently space for 13 entries, but 5 are used for the exit + * reason and info. Restrict to 4 to reduce the maintenance burden + * when expanding kvm_run.emulation_failure in the future. + */ + if (WARN_ON_ONCE(ndata > 4)) + ndata = 4; + + /* Always include the flags as a 'data' entry. */ + info_start = 1; run->emulation_failure.flags = 0; if (insn_size) { - run->emulation_failure.ndata = 3; + BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) + + sizeof(run->emulation_failure.insn_bytes) != 16)); + info_start += 2; run->emulation_failure.flags |= KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES; run->emulation_failure.insn_size = insn_size; memset(run->emulation_failure.insn_bytes, 0x90, sizeof(run->emulation_failure.insn_bytes)); - memcpy(run->emulation_failure.insn_bytes, - ctxt->fetch.data, insn_size); + memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size); } + + memcpy(&run->internal.data[info_start], info, sizeof(info)); + memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data, + ndata * sizeof(data[0])); + + run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata; } +static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu) +{ + struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; + + prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data, + ctxt->fetch.end - ctxt->fetch.data); +} + +void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, + u8 ndata) +{ + prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0); +} +EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit); + +void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) +{ + __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0); +} +EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit); + static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) { struct kvm *kvm = vcpu->kvm; @@ -7701,16 +7750,14 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) if (kvm->arch.exit_on_emulation_error || (emulation_type & EMULTYPE_SKIP)) { - prepare_emulation_failure_exit(vcpu); + prepare_emulation_ctxt_failure_exit(vcpu); return 0; } kvm_queue_exception(vcpu, UD_VECTOR); if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; - vcpu->run->internal.ndata = 0; + prepare_emulation_ctxt_failure_exit(vcpu); return 0; } @@ -12336,9 +12383,7 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, * doesn't seem to be a real use-case behind such requests, just return * KVM_EXIT_INTERNAL_ERROR for now. */ - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; - vcpu->run->internal.ndata = 0; + kvm_prepare_emulation_failure_exit(vcpu); return 0; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 2c8aa8d4dac1..78f0719cc2a3 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -398,6 +398,11 @@ struct kvm_run { * "ndata" is correct, that new fields are enumerated in "flags", * and that each flag enumerates fields that are 64-bit aligned * and sized (so that ndata+internal.data[] is valid/accurate). + * + * Space beyond the defined fields may be used to store arbitrary + * debug information relating to the emulation failure. It is + * accounted for in "ndata" but the format is unspecified and is + * not represented in "flags". Any such information is *not* ABI! */ struct { __u32 suberror; @@ -409,6 +414,7 @@ struct kvm_run { __u8 insn_bytes[15]; }; }; + /* Arbitrary debug data may follow. */ } emulation_failure; /* KVM_EXIT_OSI */ struct { -- cgit v1.2.3 From 0d7d84498fb43c42fa730c3a46b3048dc62165a5 Mon Sep 17 00:00:00 2001 From: David Edmondson Date: Mon, 20 Sep 2021 11:37:37 +0100 Subject: KVM: x86: SGX must obey the KVM_INTERNAL_ERROR_EMULATION protocol When passing the failing address and size out to user space, SGX must ensure not to trample on the earlier fields of the emulation_failure sub-union of struct kvm_run. Signed-off-by: David Edmondson Reviewed-by: Sean Christopherson Message-Id: <20210920103737.2696756-5-david.edmondson@oracle.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/sgx.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index 6693ebdc0770..35e7ec91ae86 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -53,11 +53,9 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset, static void sgx_handle_emulation_failure(struct kvm_vcpu *vcpu, u64 addr, unsigned int size) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; - vcpu->run->internal.ndata = 2; - vcpu->run->internal.data[0] = addr; - vcpu->run->internal.data[1] = size; + uint64_t data[2] = { addr, size }; + + __kvm_prepare_emulation_failure_exit(vcpu, data, ARRAY_SIZE(data)); } static int sgx_read_hva(struct kvm_vcpu *vcpu, unsigned long hva, void *data, @@ -112,9 +110,7 @@ static int sgx_inject_fault(struct kvm_vcpu *vcpu, gva_t gva, int trapnr) * but the error code isn't (yet) plumbed through the ENCLS helpers. */ if (trapnr == PF_VECTOR && !boot_cpu_has(X86_FEATURE_SGX2)) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; - vcpu->run->internal.ndata = 0; + kvm_prepare_emulation_failure_exit(vcpu); return 0; } @@ -155,9 +151,7 @@ static int __handle_encls_ecreate(struct kvm_vcpu *vcpu, sgx_12_0 = kvm_find_cpuid_entry(vcpu, 0x12, 0); sgx_12_1 = kvm_find_cpuid_entry(vcpu, 0x12, 1); if (!sgx_12_0 || !sgx_12_1) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; - vcpu->run->internal.ndata = 0; + kvm_prepare_emulation_failure_exit(vcpu); return 0; } -- cgit v1.2.3 From 85f517b29418158d3e6e90c3f0fc01b306d2f1a1 Mon Sep 17 00:00:00 2001 From: Janis Schoetterl-Glausch Date: Fri, 22 Oct 2021 17:26:48 +0200 Subject: KVM: s390: Fix handle_sske page fault handling If handle_sske cannot set the storage key, because there is no page table entry or no present large page entry, it calls fixup_user_fault. However, currently, if the call succeeds, handle_sske returns -EAGAIN, without having set the storage key. Instead, retry by continue'ing the loop without incrementing the address. The same issue in handle_pfmf was fixed by a11bdb1a6b78 ("KVM: s390: Fix pfmf and conditional skey emulation"). Fixes: bd096f644319 ("KVM: s390: Add skey emulation fault handling") Signed-off-by: Janis Schoetterl-Glausch Reviewed-by: Christian Borntraeger Reviewed-by: Claudio Imbrenda Link: https://lore.kernel.org/r/20211022152648.26536-1-scgl@linux.ibm.com Signed-off-by: Christian Borntraeger --- arch/s390/kvm/priv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 53da4ceb16a3..417154b314a6 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -397,6 +397,8 @@ static int handle_sske(struct kvm_vcpu *vcpu) mmap_read_unlock(current->mm); if (rc == -EFAULT) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + if (rc == -EAGAIN) + continue; if (rc < 0) return rc; start += PAGE_SIZE; -- cgit v1.2.3 From 380d97bd02fca7b9b41aec2d1c767874d602bc78 Mon Sep 17 00:00:00 2001 From: Claudio Imbrenda Date: Mon, 20 Sep 2021 15:24:54 +0200 Subject: KVM: s390: pv: properly handle page flags for protected guests Introduce variants of the convert and destroy page functions that also clear the PG_arch_1 bit used to mark them as secure pages. The PG_arch_1 flag is always allowed to overindicate; using the new functions introduced here allows to reduce the extent of overindication and thus improve performance. These new functions can only be called on pages for which a reference is already being held. Signed-off-by: Claudio Imbrenda Reviewed-by: Janosch Frank Reviewed-by: Christian Borntraeger Link: https://lore.kernel.org/r/20210920132502.36111-7-imbrenda@linux.ibm.com Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/pgtable.h | 9 ++++++--- arch/s390/include/asm/uv.h | 10 ++++++++-- arch/s390/kernel/uv.c | 34 +++++++++++++++++++++++++++++++++- arch/s390/mm/gmap.c | 4 +++- 4 files changed, 50 insertions(+), 7 deletions(-) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index b61426c9ef17..e43416950245 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1074,8 +1074,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, pte_t res; res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); + /* At this point the reference through the mapping is still present */ if (mm_is_protected(mm) && pte_present(res)) - uv_convert_from_secure(pte_val(res) & PAGE_MASK); + uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); return res; } @@ -1091,8 +1092,9 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, pte_t res; res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID)); + /* At this point the reference through the mapping is still present */ if (mm_is_protected(vma->vm_mm) && pte_present(res)) - uv_convert_from_secure(pte_val(res) & PAGE_MASK); + uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); return res; } @@ -1116,8 +1118,9 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, } else { res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); } + /* At this point the reference through the mapping is still present */ if (mm_is_protected(mm) && pte_present(res)) - uv_convert_from_secure(pte_val(res) & PAGE_MASK); + uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); return res; } diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h index 9ab1914c5b95..72d3e49c2860 100644 --- a/arch/s390/include/asm/uv.h +++ b/arch/s390/include/asm/uv.h @@ -356,8 +356,9 @@ static inline int is_prot_virt_host(void) } int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb); -int uv_destroy_page(unsigned long paddr); +int uv_destroy_owned_page(unsigned long paddr); int uv_convert_from_secure(unsigned long paddr); +int uv_convert_owned_from_secure(unsigned long paddr); int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr); void setup_uv(void); @@ -365,7 +366,7 @@ void setup_uv(void); #define is_prot_virt_host() 0 static inline void setup_uv(void) {} -static inline int uv_destroy_page(unsigned long paddr) +static inline int uv_destroy_owned_page(unsigned long paddr) { return 0; } @@ -374,6 +375,11 @@ static inline int uv_convert_from_secure(unsigned long paddr) { return 0; } + +static inline int uv_convert_owned_from_secure(unsigned long paddr) +{ + return 0; +} #endif #endif /* _ASM_S390_UV_H */ diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index 09b80d371409..8b0e62507d62 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -100,7 +100,7 @@ static int uv_pin_shared(unsigned long paddr) * * @paddr: Absolute host address of page to be destroyed */ -int uv_destroy_page(unsigned long paddr) +static int uv_destroy_page(unsigned long paddr) { struct uv_cb_cfs uvcb = { .header.cmd = UVC_CMD_DESTR_SEC_STOR, @@ -120,6 +120,22 @@ int uv_destroy_page(unsigned long paddr) return 0; } +/* + * The caller must already hold a reference to the page + */ +int uv_destroy_owned_page(unsigned long paddr) +{ + struct page *page = phys_to_page(paddr); + int rc; + + get_page(page); + rc = uv_destroy_page(paddr); + if (!rc) + clear_bit(PG_arch_1, &page->flags); + put_page(page); + return rc; +} + /* * Requests the Ultravisor to encrypt a guest page and make it * accessible to the host for paging (export). @@ -139,6 +155,22 @@ int uv_convert_from_secure(unsigned long paddr) return 0; } +/* + * The caller must already hold a reference to the page + */ +int uv_convert_owned_from_secure(unsigned long paddr) +{ + struct page *page = phys_to_page(paddr); + int rc; + + get_page(page); + rc = uv_convert_from_secure(paddr); + if (!rc) + clear_bit(PG_arch_1, &page->flags); + put_page(page); + return rc; +} + /* * Calculate the expected ref_count for a page that would otherwise have no * further pins. This was cribbed from similar functions in other places in diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index d63c0ccc5ccd..dfee0ebb2fac 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2684,8 +2684,10 @@ static int __s390_reset_acc(pte_t *ptep, unsigned long addr, { pte_t pte = READ_ONCE(*ptep); + /* There is a reference through the mapping */ if (pte_present(pte)) - WARN_ON_ONCE(uv_destroy_page(pte_val(pte) & PAGE_MASK)); + WARN_ON_ONCE(uv_destroy_owned_page(pte_val(pte) & PAGE_MASK)); + return 0; } -- cgit v1.2.3 From 3fd8417f2c728d810a3b26d7e2008012ffb7fd01 Mon Sep 17 00:00:00 2001 From: Collin Walling Date: Tue, 26 Oct 2021 22:54:51 -0400 Subject: KVM: s390: add debug statement for diag 318 CPNC data The diag 318 data contains values that denote information regarding the guest's environment. Currently, it is unecessarily difficult to observe this value (either manually-inserted debug statements, gdb stepping, mem dumping etc). It's useful to observe this information to obtain an at-a-glance view of the guest's environment, so lets add a simple VCPU event that prints the CPNC to the s390dbf logs. Signed-off-by: Collin Walling Acked-by: Christian Borntraeger Link: https://lore.kernel.org/r/20211027025451.290124-1-walling@linux.ibm.com [borntraeger@de.ibm.com]: change debug level to 3 Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 6482ea9139bb..c6257f625929 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -4255,6 +4255,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu) if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; + VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc); } /* * If userspace sets the riccb (e.g. after migration) to a valid state, -- cgit v1.2.3 From 0a86512dc113e4de6550d49f276142009231c846 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Tue, 26 Oct 2021 22:31:35 +0530 Subject: RISC-V: KVM: Factor-out FP virtualization into separate sources The timer and SBI virtualization is already in separate sources. In future, we will have vector and AIA virtualization also added as separate sources. To align with above described modularity, we factor-out FP virtualization into separate sources. Signed-off-by: Anup Patel Message-Id: <20211026170136.2147619-3-anup.patel@wdc.com> Signed-off-by: Paolo Bonzini --- arch/riscv/include/asm/kvm_host.h | 5 +- arch/riscv/include/asm/kvm_vcpu_fp.h | 59 ++++++++++++ arch/riscv/kvm/Makefile | 1 + arch/riscv/kvm/vcpu.c | 172 ----------------------------------- arch/riscv/kvm/vcpu_fp.c | 167 ++++++++++++++++++++++++++++++++++ 5 files changed, 228 insertions(+), 176 deletions(-) create mode 100644 arch/riscv/include/asm/kvm_vcpu_fp.h create mode 100644 arch/riscv/kvm/vcpu_fp.c diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index d7e1696cd2ec..d27878d6adf9 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #ifdef CONFIG_64BIT @@ -247,10 +248,6 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_cpu_trap *trap); void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch); -void __kvm_riscv_fp_f_save(struct kvm_cpu_context *context); -void __kvm_riscv_fp_f_restore(struct kvm_cpu_context *context); -void __kvm_riscv_fp_d_save(struct kvm_cpu_context *context); -void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context); int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); diff --git a/arch/riscv/include/asm/kvm_vcpu_fp.h b/arch/riscv/include/asm/kvm_vcpu_fp.h new file mode 100644 index 000000000000..4da9b8e0f050 --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_fp.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra + * Anup Patel + */ + +#ifndef __KVM_VCPU_RISCV_FP_H +#define __KVM_VCPU_RISCV_FP_H + +#include + +struct kvm_cpu_context; + +#ifdef CONFIG_FPU +void __kvm_riscv_fp_f_save(struct kvm_cpu_context *context); +void __kvm_riscv_fp_f_restore(struct kvm_cpu_context *context); +void __kvm_riscv_fp_d_save(struct kvm_cpu_context *context); +void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context); + +void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, + unsigned long isa); +void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, + unsigned long isa); +void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx); +void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx); +#else +static inline void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) +{ +} +static inline void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, + unsigned long isa) +{ +} +static inline void kvm_riscv_vcpu_guest_fp_restore( + struct kvm_cpu_context *cntx, + unsigned long isa) +{ +} +static inline void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) +{ +} +static inline void kvm_riscv_vcpu_host_fp_restore( + struct kvm_cpu_context *cntx) +{ +} +#endif + +int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype); +int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype); + +#endif diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 3226696b8340..30cdd1df0098 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -20,6 +20,7 @@ kvm-y += tlb.o kvm-y += mmu.o kvm-y += vcpu.o kvm-y += vcpu_exit.o +kvm-y += vcpu_fp.o kvm-y += vcpu_switch.o kvm-y += vcpu_sbi.o kvm-y += vcpu_timer.o diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index c44cabce7dd8..e92ba3e5db8c 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -38,86 +38,6 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { sizeof(kvm_vcpu_stats_desc), }; -#ifdef CONFIG_FPU -static void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) -{ - unsigned long isa = vcpu->arch.isa; - struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; - - cntx->sstatus &= ~SR_FS; - if (riscv_isa_extension_available(&isa, f) || - riscv_isa_extension_available(&isa, d)) - cntx->sstatus |= SR_FS_INITIAL; - else - cntx->sstatus |= SR_FS_OFF; -} - -static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx) -{ - cntx->sstatus &= ~SR_FS; - cntx->sstatus |= SR_FS_CLEAN; -} - -static void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, - unsigned long isa) -{ - if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) { - if (riscv_isa_extension_available(&isa, d)) - __kvm_riscv_fp_d_save(cntx); - else if (riscv_isa_extension_available(&isa, f)) - __kvm_riscv_fp_f_save(cntx); - kvm_riscv_vcpu_fp_clean(cntx); - } -} - -static void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, - unsigned long isa) -{ - if ((cntx->sstatus & SR_FS) != SR_FS_OFF) { - if (riscv_isa_extension_available(&isa, d)) - __kvm_riscv_fp_d_restore(cntx); - else if (riscv_isa_extension_available(&isa, f)) - __kvm_riscv_fp_f_restore(cntx); - kvm_riscv_vcpu_fp_clean(cntx); - } -} - -static void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) -{ - /* No need to check host sstatus as it can be modified outside */ - if (riscv_isa_extension_available(NULL, d)) - __kvm_riscv_fp_d_save(cntx); - else if (riscv_isa_extension_available(NULL, f)) - __kvm_riscv_fp_f_save(cntx); -} - -static void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx) -{ - if (riscv_isa_extension_available(NULL, d)) - __kvm_riscv_fp_d_restore(cntx); - else if (riscv_isa_extension_available(NULL, f)) - __kvm_riscv_fp_f_restore(cntx); -} -#else -static void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) -{ -} -static void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, - unsigned long isa) -{ -} -static void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, - unsigned long isa) -{ -} -static void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) -{ -} -static void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx) -{ -} -#endif - #define KVM_RISCV_ISA_ALLOWED (riscv_isa_extension_mask(a) | \ riscv_isa_extension_mask(c) | \ riscv_isa_extension_mask(d) | \ @@ -414,98 +334,6 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, return 0; } -static int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg, - unsigned long rtype) -{ - struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; - unsigned long isa = vcpu->arch.isa; - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - rtype); - void *reg_val; - - if ((rtype == KVM_REG_RISCV_FP_F) && - riscv_isa_extension_available(&isa, f)) { - if (KVM_REG_SIZE(reg->id) != sizeof(u32)) - return -EINVAL; - if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) - reg_val = &cntx->fp.f.fcsr; - else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && - reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) - reg_val = &cntx->fp.f.f[reg_num]; - else - return -EINVAL; - } else if ((rtype == KVM_REG_RISCV_FP_D) && - riscv_isa_extension_available(&isa, d)) { - if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) { - if (KVM_REG_SIZE(reg->id) != sizeof(u32)) - return -EINVAL; - reg_val = &cntx->fp.d.fcsr; - } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) && - reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { - if (KVM_REG_SIZE(reg->id) != sizeof(u64)) - return -EINVAL; - reg_val = &cntx->fp.d.f[reg_num]; - } else - return -EINVAL; - } else - return -EINVAL; - - if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - return 0; -} - -static int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg, - unsigned long rtype) -{ - struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; - unsigned long isa = vcpu->arch.isa; - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - rtype); - void *reg_val; - - if ((rtype == KVM_REG_RISCV_FP_F) && - riscv_isa_extension_available(&isa, f)) { - if (KVM_REG_SIZE(reg->id) != sizeof(u32)) - return -EINVAL; - if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) - reg_val = &cntx->fp.f.fcsr; - else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && - reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) - reg_val = &cntx->fp.f.f[reg_num]; - else - return -EINVAL; - } else if ((rtype == KVM_REG_RISCV_FP_D) && - riscv_isa_extension_available(&isa, d)) { - if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) { - if (KVM_REG_SIZE(reg->id) != sizeof(u32)) - return -EINVAL; - reg_val = &cntx->fp.d.fcsr; - } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) && - reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { - if (KVM_REG_SIZE(reg->id) != sizeof(u64)) - return -EINVAL; - reg_val = &cntx->fp.d.f[reg_num]; - } else - return -EINVAL; - } else - return -EINVAL; - - if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - return 0; -} - static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c new file mode 100644 index 000000000000..1b070152578f --- /dev/null +++ b/arch/riscv/kvm/vcpu_fp.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra + * Anup Patel + */ + +#include +#include +#include +#include + +#ifdef CONFIG_FPU +void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) +{ + unsigned long isa = vcpu->arch.isa; + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + + cntx->sstatus &= ~SR_FS; + if (riscv_isa_extension_available(&isa, f) || + riscv_isa_extension_available(&isa, d)) + cntx->sstatus |= SR_FS_INITIAL; + else + cntx->sstatus |= SR_FS_OFF; +} + +void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx) +{ + cntx->sstatus &= ~SR_FS; + cntx->sstatus |= SR_FS_CLEAN; +} + +void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, + unsigned long isa) +{ + if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) { + if (riscv_isa_extension_available(&isa, d)) + __kvm_riscv_fp_d_save(cntx); + else if (riscv_isa_extension_available(&isa, f)) + __kvm_riscv_fp_f_save(cntx); + kvm_riscv_vcpu_fp_clean(cntx); + } +} + +void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, + unsigned long isa) +{ + if ((cntx->sstatus & SR_FS) != SR_FS_OFF) { + if (riscv_isa_extension_available(&isa, d)) + __kvm_riscv_fp_d_restore(cntx); + else if (riscv_isa_extension_available(&isa, f)) + __kvm_riscv_fp_f_restore(cntx); + kvm_riscv_vcpu_fp_clean(cntx); + } +} + +void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) +{ + /* No need to check host sstatus as it can be modified outside */ + if (riscv_isa_extension_available(NULL, d)) + __kvm_riscv_fp_d_save(cntx); + else if (riscv_isa_extension_available(NULL, f)) + __kvm_riscv_fp_f_save(cntx); +} + +void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx) +{ + if (riscv_isa_extension_available(NULL, d)) + __kvm_riscv_fp_d_restore(cntx); + else if (riscv_isa_extension_available(NULL, f)) + __kvm_riscv_fp_f_restore(cntx); +} +#endif + +int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype) +{ + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + unsigned long isa = vcpu->arch.isa; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + rtype); + void *reg_val; + + if ((rtype == KVM_REG_RISCV_FP_F) && + riscv_isa_extension_available(&isa, f)) { + if (KVM_REG_SIZE(reg->id) != sizeof(u32)) + return -EINVAL; + if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) + reg_val = &cntx->fp.f.fcsr; + else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && + reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) + reg_val = &cntx->fp.f.f[reg_num]; + else + return -EINVAL; + } else if ((rtype == KVM_REG_RISCV_FP_D) && + riscv_isa_extension_available(&isa, d)) { + if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) { + if (KVM_REG_SIZE(reg->id) != sizeof(u32)) + return -EINVAL; + reg_val = &cntx->fp.d.fcsr; + } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) && + reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { + if (KVM_REG_SIZE(reg->id) != sizeof(u64)) + return -EINVAL; + reg_val = &cntx->fp.d.f[reg_num]; + } else + return -EINVAL; + } else + return -EINVAL; + + if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + +int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype) +{ + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + unsigned long isa = vcpu->arch.isa; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + rtype); + void *reg_val; + + if ((rtype == KVM_REG_RISCV_FP_F) && + riscv_isa_extension_available(&isa, f)) { + if (KVM_REG_SIZE(reg->id) != sizeof(u32)) + return -EINVAL; + if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) + reg_val = &cntx->fp.f.fcsr; + else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && + reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) + reg_val = &cntx->fp.f.f[reg_num]; + else + return -EINVAL; + } else if ((rtype == KVM_REG_RISCV_FP_D) && + riscv_isa_extension_available(&isa, d)) { + if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) { + if (KVM_REG_SIZE(reg->id) != sizeof(u32)) + return -EINVAL; + reg_val = &cntx->fp.d.fcsr; + } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) && + reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { + if (KVM_REG_SIZE(reg->id) != sizeof(u64)) + return -EINVAL; + reg_val = &cntx->fp.d.f[reg_num]; + } else + return -EINVAL; + } else + return -EINVAL; + + if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} -- cgit v1.2.3 From 7c8de080d476e3433d9aec0d6111758c3e4ea917 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Tue, 26 Oct 2021 22:31:36 +0530 Subject: RISC-V: KVM: Fix GPA passed to __kvm_riscv_hfence_gvma_xyz() functions The parameter passed to HFENCE.GVMA instruction in rs1 register is guest physical address right shifted by 2 (i.e. divided by 4). Unfortunately, we overlooked the semantics of rs1 registers for HFENCE.GVMA instruction and never right shifted guest physical address by 2. This issue did not manifest for hypervisors till now because: 1) Currently, only __kvm_riscv_hfence_gvma_all() and SBI HFENCE calls are used to invalidate TLB. 2) All H-extension implementations (such as QEMU, Spike, Rocket Core FPGA, etc) that we tried till now were conservatively flushing everything upon any HFENCE.GVMA instruction. This patch fixes GPA passed to __kvm_riscv_hfence_gvma_vmid_gpa() and __kvm_riscv_hfence_gvma_gpa() functions. Fixes: fd7bb4a251df ("RISC-V: KVM: Implement VMID allocator") Reported-by: Ian Huang Signed-off-by: Anup Patel Message-Id: <20211026170136.2147619-4-anup.patel@wdc.com> Signed-off-by: Paolo Bonzini --- arch/riscv/include/asm/kvm_host.h | 5 +++-- arch/riscv/kvm/tlb.S | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index d27878d6adf9..25ba21f98504 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -214,9 +214,10 @@ static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} #define KVM_ARCH_WANT_MMU_NOTIFIER -void __kvm_riscv_hfence_gvma_vmid_gpa(unsigned long gpa, unsigned long vmid); +void __kvm_riscv_hfence_gvma_vmid_gpa(unsigned long gpa_divby_4, + unsigned long vmid); void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid); -void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa); +void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa_divby_4); void __kvm_riscv_hfence_gvma_all(void); int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, diff --git a/arch/riscv/kvm/tlb.S b/arch/riscv/kvm/tlb.S index c858570f0856..899f75d60bad 100644 --- a/arch/riscv/kvm/tlb.S +++ b/arch/riscv/kvm/tlb.S @@ -31,7 +31,7 @@ ENTRY(__kvm_riscv_hfence_gvma_vmid_gpa) /* - * rs1 = a0 (GPA) + * rs1 = a0 (GPA >> 2) * rs2 = a1 (VMID) * HFENCE.GVMA a0, a1 * 0110001 01011 01010 000 00000 1110011 @@ -53,7 +53,7 @@ ENDPROC(__kvm_riscv_hfence_gvma_vmid) ENTRY(__kvm_riscv_hfence_gvma_gpa) /* - * rs1 = a0 (GPA) + * rs1 = a0 (GPA >> 2) * rs2 = zero * HFENCE.GVMA a0 * 0110001 00000 01010 000 00000 1110011 -- cgit v1.2.3 From 7b161d9cab5d2c853b8861b2ad21bcd79d669fe3 Mon Sep 17 00:00:00 2001 From: ran jianping Date: Thu, 21 Oct 2021 11:57:06 +0000 Subject: RISC-V: KVM: remove unneeded semicolon Elimate the following coccinelle check warning: ./arch/riscv/kvm/vcpu_sbi.c:169:2-3: Unneeded semicolon ./arch/riscv/kvm/vcpu_exit.c:397:2-3: Unneeded semicolon ./arch/riscv/kvm/vcpu_exit.c:687:2-3: Unneeded semicolon ./arch/riscv/kvm/vcpu_exit.c:645:2-3: Unneeded semicolon ./arch/riscv/kvm/vcpu.c:247:2-3: Unneeded semicolon ./arch/riscv/kvm/vcpu.c:284:2-3: Unneeded semicolon ./arch/riscv/kvm/vcpu_timer.c:123:2-3: Unneeded semicolon ./arch/riscv/kvm/vcpu_timer.c:170:2-3: Unneeded semicolon Reported-by: Zeal Robot Signed-off-by: ran jianping Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu.c | 4 ++-- arch/riscv/kvm/vcpu_exit.c | 6 +++--- arch/riscv/kvm/vcpu_sbi.c | 2 +- arch/riscv/kvm/vcpu_timer.c | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index e92ba3e5db8c..e3d3aed46184 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -164,7 +164,7 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, break; default: return -EINVAL; - }; + } if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) return -EFAULT; @@ -201,7 +201,7 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, break; default: return -EINVAL; - }; + } return 0; } diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c index 13bbc3f73713..7f2d742ae4c6 100644 --- a/arch/riscv/kvm/vcpu_exit.c +++ b/arch/riscv/kvm/vcpu_exit.c @@ -394,7 +394,7 @@ static int emulate_store(struct kvm_vcpu *vcpu, struct kvm_run *run, break; default: return -EOPNOTSUPP; - }; + } /* Update MMIO details in kvm_run struct */ run->mmio.is_write = true; @@ -642,7 +642,7 @@ int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) break; default: return -EOPNOTSUPP; - }; + } done: /* Move to next instruction */ @@ -684,7 +684,7 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, break; default: break; - }; + } /* Print details in-case of error */ if (ret < 0) { diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index ebdcdbade9c6..eb3c045edf11 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -166,7 +166,7 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) /* Return error for unsupported SBI calls */ cp->a0 = SBI_ERR_NOT_SUPPORTED; break; - }; + } if (next_sepc) cp->sepc += 4; diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c index ddd0ce727b83..5c4c37ff2d48 100644 --- a/arch/riscv/kvm/vcpu_timer.c +++ b/arch/riscv/kvm/vcpu_timer.c @@ -120,7 +120,7 @@ int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu, break; default: return -EINVAL; - }; + } if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) return -EFAULT; @@ -167,7 +167,7 @@ int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu, default: ret = -EINVAL; break; - }; + } return ret; } -- cgit v1.2.3 From bbd5ba8db7662dbfcc15204eb105cd0c2971a47c Mon Sep 17 00:00:00 2001 From: Bixuan Cui Date: Wed, 27 Oct 2021 15:20:53 +0800 Subject: RISC-V: KVM: fix boolreturn.cocci warnings Fix boolreturn.cocci warnings: ./arch/riscv/kvm/mmu.c:603:9-10: WARNING: return of 0/1 in function 'kvm_age_gfn' with return type bool ./arch/riscv/kvm/mmu.c:582:9-10: WARNING: return of 0/1 in function 'kvm_set_spte_gfn' with return type bool ./arch/riscv/kvm/mmu.c:621:9-10: WARNING: return of 0/1 in function 'kvm_test_age_gfn' with return type bool ./arch/riscv/kvm/mmu.c:568:9-10: WARNING: return of 0/1 in function 'kvm_unmap_gfn_range' with return type bool Signed-off-by: Bixuan Cui Signed-off-by: Anup Patel --- arch/riscv/kvm/mmu.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 3a00c2df7640..d81bae8eb55e 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -565,12 +565,12 @@ out: bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { if (!kvm->arch.pgd) - return 0; + return false; stage2_unmap_range(kvm, range->start << PAGE_SHIFT, (range->end - range->start) << PAGE_SHIFT, range->may_block); - return 0; + return false; } bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) @@ -579,7 +579,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) kvm_pfn_t pfn = pte_pfn(range->pte); if (!kvm->arch.pgd) - return 0; + return false; WARN_ON(range->end - range->start != 1); @@ -587,10 +587,10 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) __pfn_to_phys(pfn), PAGE_SIZE, true, true); if (ret) { kvm_debug("Failed to map stage2 page (error %d)\n", ret); - return 1; + return true; } - return 0; + return false; } bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) @@ -600,13 +600,13 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) u64 size = (range->end - range->start) << PAGE_SHIFT; if (!kvm->arch.pgd) - return 0; + return false; WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE); if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT, &ptep, &ptep_level)) - return 0; + return false; return ptep_test_and_clear_young(NULL, 0, ptep); } @@ -618,13 +618,13 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) u64 size = (range->end - range->start) << PAGE_SHIFT; if (!kvm->arch.pgd) - return 0; + return false; WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE); if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT, &ptep, &ptep_level)) - return 0; + return false; return pte_young(*ptep); } -- cgit v1.2.3