summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2020-08-11 11:27:24 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-08-26 10:31:07 +0200
commita53dc16499fc9efd8db0b40e45f3344a0fb9c0a2 (patch)
tree62fcefee901521dd4b324b438eec3d8577534dcf /virt
parent903c6bd937ca84ee9e60eca6beb5c2593d0bfd4e (diff)
downloadlinux-stable-a53dc16499fc9efd8db0b40e45f3344a0fb9c0a2.tar.gz
linux-stable-a53dc16499fc9efd8db0b40e45f3344a0fb9c0a2.tar.bz2
linux-stable-a53dc16499fc9efd8db0b40e45f3344a0fb9c0a2.zip
KVM: Pass MMU notifier range flags to kvm_unmap_hva_range()
commit fdfe7cbd58806522e799e2a50a15aee7f2cbb7b6 upstream. The 'flags' field of 'struct mmu_notifier_range' is used to indicate whether invalidate_range_{start,end}() are permitted to block. In the case of kvm_mmu_notifier_invalidate_range_start(), this field is not forwarded on to the architecture-specific implementation of kvm_unmap_hva_range() and therefore the backend cannot sensibly decide whether or not to block. Add an extra 'flags' parameter to kvm_unmap_hva_range() so that architectures are aware as to whether or not they are permitted to block. Cc: <stable@vger.kernel.org> Cc: Marc Zyngier <maz@kernel.org> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: James Morse <james.morse@arm.com> Signed-off-by: Will Deacon <will@kernel.org> Message-Id: <20200811102725.7121-2-will@kernel.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> [will: Backport to 4.19; use 'blockable' instead of non-existent range flags] Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/mmu.c2
-rw-r--r--virt/kvm/kvm_main.c2
2 files changed, 2 insertions, 2 deletions
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index a5bc10d30618..3957ff0ecda5 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1825,7 +1825,7 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
}
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end, bool blockable)
{
if (!kvm->arch.pgd)
return 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1218ea663c6d..2155b52b17ec 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -410,7 +410,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_notifier_count++;
- need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
+ need_tlb_flush = kvm_unmap_hva_range(kvm, start, end, blockable);
need_tlb_flush |= kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)