summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2019-04-17 15:28:44 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-05-25 18:22:29 +0200
commitfb654d0763c83406347588f4544d6507c7ee51d7 (patch)
tree47b65cda053b3ed96f48c5a3d53a1c03478a20c4 /tools
parent497ce5c7f5387d7cc0a17c414b247b1e7ef7e288 (diff)
downloadlinux-stable-fb654d0763c83406347588f4544d6507c7ee51d7.tar.gz
linux-stable-fb654d0763c83406347588f4544d6507c7ee51d7.tar.bz2
linux-stable-fb654d0763c83406347588f4544d6507c7ee51d7.zip
KVM: fix KVM_CLEAR_DIRTY_LOG for memory slots of unaligned size
[ Upstream commit 76d58e0f07ec203bbdfcaabd9a9fc10a5a3ed5ea ] If a memory slot's size is not a multiple of 64 pages (256K), then the KVM_CLEAR_DIRTY_LOG API is unusable: clearing the final 64 pages either requires the requested page range to go beyond memslot->npages, or requires log->num_pages to be unaligned, and kvm_clear_dirty_log_protect requires log->num_pages to be both in range and aligned. To allow this case, allow log->num_pages not to be a multiple of 64 if it ends exactly on the last page of the slot. Reported-by: Peter Xu <peterx@redhat.com> Fixes: 98938aa8edd6 ("KVM: validate userspace input in kvm_clear_dirty_log_protect()", 2019-01-02) Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 4715cfba20dc..93f99c6b7d79 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -288,8 +288,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
#endif
max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
guest_page_size = (1ul << guest_page_shift);
- /* 1G of guest page sized pages */
- guest_num_pages = (1ul << (30 - guest_page_shift));
+ /*
+ * A little more than 1G of guest page sized pages. Cover the
+ * case where the size is not aligned to 64 pages.
+ */
+ guest_num_pages = (1ul << (30 - guest_page_shift)) + 3;
host_page_size = getpagesize();
host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
!!((guest_num_pages * guest_page_size) % host_page_size);
@@ -359,7 +362,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
#ifdef USE_CLEAR_DIRTY_LOG
kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
- DIV_ROUND_UP(host_num_pages, 64) * 64);
+ host_num_pages);
#endif
vm_dirty_log_verify(bmap);
iteration++;