summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2021-11-11 00:03:02 +0000
committerPaolo Bonzini <pbonzini@redhat.com>2021-11-16 07:43:25 -0500
commit69cdcfa6f321da2cc1dd2e62fa4a9ee256299b18 (patch)
treeef620336a28bf1ec75239f221ac267c85bca7ad1 /tools
parentf4870ef3e15ab889a689f99a579fe0fe7c53a960 (diff)
downloadlinux-stable-69cdcfa6f321da2cc1dd2e62fa4a9ee256299b18.tar.gz
linux-stable-69cdcfa6f321da2cc1dd2e62fa4a9ee256299b18.tar.bz2
linux-stable-69cdcfa6f321da2cc1dd2e62fa4a9ee256299b18.zip
KVM: selftests: Require GPA to be aligned when backed by hugepages
Assert that the GPA for a memslot backed by a hugepage is aligned to the hugepage size and fix perf_test_util accordingly. Lack of GPA alignment prevents KVM from backing the guest with hugepages, e.g. x86's write-protection of hugepages when dirty logging is activated is otherwise not exercised. Add a comment explaining that guest_page_size is for non-huge pages to try and avoid confusion about what it actually tracks. Cc: Ben Gardon <bgardon@google.com> Cc: Yanan Wang <wangyanan55@huawei.com> Cc: Andrew Jones <drjones@redhat.com> Cc: Peter Xu <peterx@redhat.com> Cc: Aaron Lewis <aaronlewis@google.com> Signed-off-by: Sean Christopherson <seanjc@google.com> [Used get_backing_src_pagesz() to determine alignment dynamically.] Signed-off-by: David Matlack <dmatlack@google.com> Message-Id: <20211111000310.1435032-5-dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c2
-rw-r--r--tools/testing/selftests/kvm/lib/perf_test_util.c7
2 files changed, 8 insertions, 1 deletions
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 07f37456bba0..1f6a01c33dce 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -875,6 +875,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
alignment = max(backing_src_pagesz, alignment);
+ ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
+
/* Add enough memory to align up if necessary */
if (alignment > 1)
region->mmap_size += alignment;
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
index 6b8d5020dc54..a015f267d945 100644
--- a/tools/testing/selftests/kvm/lib/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
@@ -55,11 +55,16 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
{
struct kvm_vm *vm;
uint64_t guest_num_pages;
+ uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
int i;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
perf_test_args.host_page_size = getpagesize();
+ /*
+ * Snapshot the non-huge page size. This is used by the guest code to
+ * access/dirty pages at the logging granularity.
+ */
perf_test_args.guest_page_size = vm_guest_mode_params[mode].page_size;
guest_num_pages = vm_adjust_num_guest_pages(mode,
@@ -92,7 +97,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
perf_test_args.guest_page_size;
- guest_test_phys_mem = align_down(guest_test_phys_mem, perf_test_args.host_page_size);
+ guest_test_phys_mem = align_down(guest_test_phys_mem, backing_src_pagesz);
#ifdef __s390x__
/* Align to 1M (segment size) */
guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20);