diff options
author | Sean Christopherson <seanjc@google.com> | 2021-08-10 15:45:54 -0700 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2021-08-20 16:06:35 -0400 |
commit | 9653f2da7522c5e762e2edd2beb53170669d0a2b (patch) | |
tree | 41327f7a51e7a15ecf4d55a528cdff7c2d7cbf09 /arch/x86 | |
parent | 71f51d2c3253645ccff69d6fa3a870f47005f0b3 (diff) | |
download | linux-stable-9653f2da7522c5e762e2edd2beb53170669d0a2b.tar.gz linux-stable-9653f2da7522c5e762e2edd2beb53170669d0a2b.tar.bz2 linux-stable-9653f2da7522c5e762e2edd2beb53170669d0a2b.zip |
KVM: x86/mmu: Drop 'shared' param from tdp_mmu_link_page()
Drop @shared from tdp_mmu_link_page() and hardcode it to work for
mmu_lock being held for read. The helper has exactly one caller and
in all likelihood will only ever have exactly one caller. Even if KVM
adds a path to install translations without an initiating page fault,
odds are very, very good that the path will just be a wrapper to the
"page fault" handler (both SNP and TDX RFCs propose patches to do
exactly that).
No functional change intended.
Cc: Ben Gardon <bgardon@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210810224554.2978735-3-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/mmu/tdp_mmu.c | 17 |
1 files changed, 4 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index db636250972a..64ccfc1fa553 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -255,26 +255,17 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, * * @kvm: kvm instance * @sp: the new page - * @shared: This operation may not be running under the exclusive use of - * the MMU lock and the operation must synchronize with other - * threads that might be adding or removing pages. * @account_nx: This page replaces a NX large page and should be marked for * eventual reclaim. */ static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp, - bool shared, bool account_nx) + bool account_nx) { - if (shared) - spin_lock(&kvm->arch.tdp_mmu_pages_lock); - else - lockdep_assert_held_write(&kvm->mmu_lock); - + spin_lock(&kvm->arch.tdp_mmu_pages_lock); list_add(&sp->link, &kvm->arch.tdp_mmu_pages); if (account_nx) account_huge_nx_page(kvm, sp); - - if (shared) - spin_unlock(&kvm->arch.tdp_mmu_pages_lock); + spin_unlock(&kvm->arch.tdp_mmu_pages_lock); } /** @@ -1062,7 +1053,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, !shadow_accessed_mask); if (tdp_mmu_set_spte_atomic_no_dirty_log(vcpu->kvm, &iter, new_spte)) { - tdp_mmu_link_page(vcpu->kvm, sp, true, + tdp_mmu_link_page(vcpu->kvm, sp, huge_page_disallowed && req_level >= iter.level); |