summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/kvm')
-rw-r--r--tools/testing/selftests/kvm/.gitignore1
-rw-r--r--tools/testing/selftests/kvm/Makefile3
-rw-r--r--tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c3
-rw-r--r--tools/testing/selftests/kvm/aarch64/debug-exceptions.c311
-rw-r--r--tools/testing/selftests/kvm/aarch64/page_fault_test.c1117
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c8
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c228
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c53
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/processor.h35
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_base.h31
-rw-r--r--tools/testing/selftests/kvm/include/memstress.h3
-rw-r--r--tools/testing/selftests/kvm/include/userfaultfd_util.h45
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c55
-rw-r--r--tools/testing/selftests/kvm/lib/elf.c3
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c84
-rw-r--r--tools/testing/selftests/kvm/lib/memstress.c3
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/processor.c29
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/processor.c8
-rw-r--r--tools/testing/selftests/kvm/lib/userfaultfd_util.c186
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c13
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c6
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c313
22 files changed, 2053 insertions, 485 deletions
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 082855d94c72..6ce8c488d62e 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -4,6 +4,7 @@
/aarch64/debug-exceptions
/aarch64/get-reg-list
/aarch64/hypercalls
+/aarch64/page_fault_test
/aarch64/psci_test
/aarch64/vcpu_width_config
/aarch64/vgic_init
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 2275ba861e0e..947676983da1 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -48,6 +48,7 @@ LIBKVM += lib/rbtree.c
LIBKVM += lib/sparsebit.c
LIBKVM += lib/test_util.c
LIBKVM += lib/ucall_common.c
+LIBKVM += lib/userfaultfd_util.c
LIBKVM_STRING += lib/string_override.c
@@ -158,10 +159,12 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
+TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
TEST_GEN_PROGS_aarch64 += aarch64/psci_test
TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
+TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
diff --git a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c
index b1d2158c0b6d..4951ac53d1f8 100644
--- a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c
+++ b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c
@@ -13,6 +13,7 @@
#include "kvm_util.h"
#include "processor.h"
#include "test_util.h"
+#include <linux/bitfield.h>
#define BAD_ID_REG_VAL 0x1badc0deul
@@ -145,7 +146,7 @@ static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
- el0 = (val & ARM64_FEATURE_MASK(ID_AA64PFR0_EL0)) >> ID_AA64PFR0_EL0_SHIFT;
+ el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), val);
return el0 == ID_AA64PFR0_ELx_64BIT_ONLY;
}
diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
index d86c4e4d1c82..8a3fb212084a 100644
--- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
+++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
@@ -2,6 +2,7 @@
#include <test_util.h>
#include <kvm_util.h>
#include <processor.h>
+#include <linux/bitfield.h>
#define MDSCR_KDE (1 << 13)
#define MDSCR_MDE (1 << 15)
@@ -11,17 +12,24 @@
#define DBGBCR_EXEC (0x0 << 3)
#define DBGBCR_EL1 (0x1 << 1)
#define DBGBCR_E (0x1 << 0)
+#define DBGBCR_LBN_SHIFT 16
+#define DBGBCR_BT_SHIFT 20
+#define DBGBCR_BT_ADDR_LINK_CTX (0x1 << DBGBCR_BT_SHIFT)
+#define DBGBCR_BT_CTX_LINK (0x3 << DBGBCR_BT_SHIFT)
#define DBGWCR_LEN8 (0xff << 5)
#define DBGWCR_RD (0x1 << 3)
#define DBGWCR_WR (0x2 << 3)
#define DBGWCR_EL1 (0x1 << 1)
#define DBGWCR_E (0x1 << 0)
+#define DBGWCR_LBN_SHIFT 16
+#define DBGWCR_WT_SHIFT 20
+#define DBGWCR_WT_LINK (0x1 << DBGWCR_WT_SHIFT)
#define SPSR_D (1 << 9)
#define SPSR_SS (1 << 21)
-extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start;
+extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start, hw_bp_ctx;
extern unsigned char iter_ss_begin, iter_ss_end;
static volatile uint64_t sw_bp_addr, hw_bp_addr;
static volatile uint64_t wp_addr, wp_data_addr;
@@ -29,8 +37,74 @@ static volatile uint64_t svc_addr;
static volatile uint64_t ss_addr[4], ss_idx;
#define PC(v) ((uint64_t)&(v))
+#define GEN_DEBUG_WRITE_REG(reg_name) \
+static void write_##reg_name(int num, uint64_t val) \
+{ \
+ switch (num) { \
+ case 0: \
+ write_sysreg(val, reg_name##0_el1); \
+ break; \
+ case 1: \
+ write_sysreg(val, reg_name##1_el1); \
+ break; \
+ case 2: \
+ write_sysreg(val, reg_name##2_el1); \
+ break; \
+ case 3: \
+ write_sysreg(val, reg_name##3_el1); \
+ break; \
+ case 4: \
+ write_sysreg(val, reg_name##4_el1); \
+ break; \
+ case 5: \
+ write_sysreg(val, reg_name##5_el1); \
+ break; \
+ case 6: \
+ write_sysreg(val, reg_name##6_el1); \
+ break; \
+ case 7: \
+ write_sysreg(val, reg_name##7_el1); \
+ break; \
+ case 8: \
+ write_sysreg(val, reg_name##8_el1); \
+ break; \
+ case 9: \
+ write_sysreg(val, reg_name##9_el1); \
+ break; \
+ case 10: \
+ write_sysreg(val, reg_name##10_el1); \
+ break; \
+ case 11: \
+ write_sysreg(val, reg_name##11_el1); \
+ break; \
+ case 12: \
+ write_sysreg(val, reg_name##12_el1); \
+ break; \
+ case 13: \
+ write_sysreg(val, reg_name##13_el1); \
+ break; \
+ case 14: \
+ write_sysreg(val, reg_name##14_el1); \
+ break; \
+ case 15: \
+ write_sysreg(val, reg_name##15_el1); \
+ break; \
+ default: \
+ GUEST_ASSERT(0); \
+ } \
+}
+
+/* Define write_dbgbcr()/write_dbgbvr()/write_dbgwcr()/write_dbgwvr() */
+GEN_DEBUG_WRITE_REG(dbgbcr)
+GEN_DEBUG_WRITE_REG(dbgbvr)
+GEN_DEBUG_WRITE_REG(dbgwcr)
+GEN_DEBUG_WRITE_REG(dbgwvr)
+
static void reset_debug_state(void)
{
+ uint8_t brps, wrps, i;
+ uint64_t dfr0;
+
asm volatile("msr daifset, #8");
write_sysreg(0, osdlr_el1);
@@ -38,11 +112,21 @@ static void reset_debug_state(void)
isb();
write_sysreg(0, mdscr_el1);
- /* This test only uses the first bp and wp slot. */
- write_sysreg(0, dbgbvr0_el1);
- write_sysreg(0, dbgbcr0_el1);
- write_sysreg(0, dbgwcr0_el1);
- write_sysreg(0, dbgwvr0_el1);
+ write_sysreg(0, contextidr_el1);
+
+ /* Reset all bcr/bvr/wcr/wvr registers */
+ dfr0 = read_sysreg(id_aa64dfr0_el1);
+ brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_BRPS), dfr0);
+ for (i = 0; i <= brps; i++) {
+ write_dbgbcr(i, 0);
+ write_dbgbvr(i, 0);
+ }
+ wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_WRPS), dfr0);
+ for (i = 0; i <= wrps; i++) {
+ write_dbgwcr(i, 0);
+ write_dbgwvr(i, 0);
+ }
+
isb();
}
@@ -54,16 +138,10 @@ static void enable_os_lock(void)
GUEST_ASSERT(read_sysreg(oslsr_el1) & 2);
}
-static void install_wp(uint64_t addr)
+static void enable_monitor_debug_exceptions(void)
{
- uint32_t wcr;
uint32_t mdscr;
- wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E;
- write_sysreg(wcr, dbgwcr0_el1);
- write_sysreg(addr, dbgwvr0_el1);
- isb();
-
asm volatile("msr daifclr, #8");
mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE;
@@ -71,21 +149,76 @@ static void install_wp(uint64_t addr)
isb();
}
-static void install_hw_bp(uint64_t addr)
+static void install_wp(uint8_t wpn, uint64_t addr)
+{
+ uint32_t wcr;
+
+ wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E;
+ write_dbgwcr(wpn, wcr);
+ write_dbgwvr(wpn, addr);
+
+ isb();
+
+ enable_monitor_debug_exceptions();
+}
+
+static void install_hw_bp(uint8_t bpn, uint64_t addr)
{
uint32_t bcr;
- uint32_t mdscr;
bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E;
- write_sysreg(bcr, dbgbcr0_el1);
- write_sysreg(addr, dbgbvr0_el1);
+ write_dbgbcr(bpn, bcr);
+ write_dbgbvr(bpn, addr);
isb();
- asm volatile("msr daifclr, #8");
+ enable_monitor_debug_exceptions();
+}
- mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE;
- write_sysreg(mdscr, mdscr_el1);
+static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr,
+ uint64_t ctx)
+{
+ uint32_t wcr;
+ uint64_t ctx_bcr;
+
+ /* Setup a context-aware breakpoint for Linked Context ID Match */
+ ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
+ DBGBCR_BT_CTX_LINK;
+ write_dbgbcr(ctx_bp, ctx_bcr);
+ write_dbgbvr(ctx_bp, ctx);
+
+ /* Setup a linked watchpoint (linked to the context-aware breakpoint) */
+ wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E |
+ DBGWCR_WT_LINK | ((uint32_t)ctx_bp << DBGWCR_LBN_SHIFT);
+ write_dbgwcr(addr_wp, wcr);
+ write_dbgwvr(addr_wp, addr);
isb();
+
+ enable_monitor_debug_exceptions();
+}
+
+void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr,
+ uint64_t ctx)
+{
+ uint32_t addr_bcr, ctx_bcr;
+
+ /* Setup a context-aware breakpoint for Linked Context ID Match */
+ ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
+ DBGBCR_BT_CTX_LINK;
+ write_dbgbcr(ctx_bp, ctx_bcr);
+ write_dbgbvr(ctx_bp, ctx);
+
+ /*
+ * Setup a normal breakpoint for Linked Address Match, and link it
+ * to the context-aware breakpoint.
+ */
+ addr_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
+ DBGBCR_BT_ADDR_LINK_CTX |
+ ((uint32_t)ctx_bp << DBGBCR_LBN_SHIFT);
+ write_dbgbcr(addr_bp, addr_bcr);
+ write_dbgbvr(addr_bp, addr);
+ isb();
+
+ enable_monitor_debug_exceptions();
}
static void install_ss(void)
@@ -101,52 +234,42 @@ static void install_ss(void)
static volatile char write_data;
-static void guest_code(void)
+static void guest_code(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
{
- GUEST_SYNC(0);
+ uint64_t ctx = 0xabcdef; /* a random context number */
/* Software-breakpoint */
reset_debug_state();
asm volatile("sw_bp: brk #0");
GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp));
- GUEST_SYNC(1);
-
/* Hardware-breakpoint */
reset_debug_state();
- install_hw_bp(PC(hw_bp));
+ install_hw_bp(bpn, PC(hw_bp));
asm volatile("hw_bp: nop");
GUEST_ASSERT_EQ(hw_bp_addr, PC(hw_bp));
- GUEST_SYNC(2);
-
/* Hardware-breakpoint + svc */
reset_debug_state();
- install_hw_bp(PC(bp_svc));
+ install_hw_bp(bpn, PC(bp_svc));
asm volatile("bp_svc: svc #0");
GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_svc));
GUEST_ASSERT_EQ(svc_addr, PC(bp_svc) + 4);
- GUEST_SYNC(3);
-
/* Hardware-breakpoint + software-breakpoint */
reset_debug_state();
- install_hw_bp(PC(bp_brk));
+ install_hw_bp(bpn, PC(bp_brk));
asm volatile("bp_brk: brk #0");
GUEST_ASSERT_EQ(sw_bp_addr, PC(bp_brk));
GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_brk));
- GUEST_SYNC(4);
-
/* Watchpoint */
reset_debug_state();
- install_wp(PC(write_data));
+ install_wp(wpn, PC(write_data));
write_data = 'x';
GUEST_ASSERT_EQ(write_data, 'x');
GUEST_ASSERT_EQ(wp_data_addr, PC(write_data));
- GUEST_SYNC(5);
-
/* Single-step */
reset_debug_state();
install_ss();
@@ -160,8 +283,6 @@ static void guest_code(void)
GUEST_ASSERT_EQ(ss_addr[1], PC(ss_start) + 4);
GUEST_ASSERT_EQ(ss_addr[2], PC(ss_start) + 8);
- GUEST_SYNC(6);
-
/* OS Lock does not block software-breakpoint */
reset_debug_state();
enable_os_lock();
@@ -169,30 +290,24 @@ static void guest_code(void)
asm volatile("sw_bp2: brk #0");
GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp2));
- GUEST_SYNC(7);
-
/* OS Lock blocking hardware-breakpoint */
reset_debug_state();
enable_os_lock();
- install_hw_bp(PC(hw_bp2));
+ install_hw_bp(bpn, PC(hw_bp2));
hw_bp_addr = 0;
asm volatile("hw_bp2: nop");
GUEST_ASSERT_EQ(hw_bp_addr, 0);
- GUEST_SYNC(8);
-
/* OS Lock blocking watchpoint */
reset_debug_state();
enable_os_lock();
write_data = '\0';
wp_data_addr = 0;
- install_wp(PC(write_data));
+ install_wp(wpn, PC(write_data));
write_data = 'x';
GUEST_ASSERT_EQ(write_data, 'x');
GUEST_ASSERT_EQ(wp_data_addr, 0);
- GUEST_SYNC(9);
-
/* OS Lock blocking single-step */
reset_debug_state();
enable_os_lock();
@@ -205,6 +320,27 @@ static void guest_code(void)
: : : "x0");
GUEST_ASSERT_EQ(ss_addr[0], 0);
+ /* Linked hardware-breakpoint */
+ hw_bp_addr = 0;
+ reset_debug_state();
+ install_hw_bp_ctx(bpn, ctx_bpn, PC(hw_bp_ctx), ctx);
+ /* Set context id */
+ write_sysreg(ctx, contextidr_el1);
+ isb();
+ asm volatile("hw_bp_ctx: nop");
+ write_sysreg(0, contextidr_el1);
+ GUEST_ASSERT_EQ(hw_bp_addr, PC(hw_bp_ctx));
+
+ /* Linked watchpoint */
+ reset_debug_state();
+ install_wp_ctx(wpn, ctx_bpn, PC(write_data), ctx);
+ /* Set context id */
+ write_sysreg(ctx, contextidr_el1);
+ isb();
+ write_data = 'x';
+ GUEST_ASSERT_EQ(write_data, 'x');
+ GUEST_ASSERT_EQ(wp_data_addr, PC(write_data));
+
GUEST_DONE();
}
@@ -276,20 +412,16 @@ static void guest_code_ss(int test_cnt)
GUEST_DONE();
}
-static int debug_version(struct kvm_vcpu *vcpu)
+static int debug_version(uint64_t id_aa64dfr0)
{
- uint64_t id_aa64dfr0;
-
- vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0);
- return id_aa64dfr0 & 0xf;
+ return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), id_aa64dfr0);
}
-static void test_guest_debug_exceptions(void)
+static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
- int stage;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
@@ -307,23 +439,19 @@ static void test_guest_debug_exceptions(void)
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
ESR_EC_SVC64, guest_svc_handler);
- for (stage = 0; stage < 11; stage++) {
- vcpu_run(vcpu);
-
- switch (get_ucall(vcpu, &uc)) {
- case UCALL_SYNC:
- TEST_ASSERT(uc.args[1] == stage,
- "Stage %d: Unexpected sync ucall, got %lx",
- stage, (ulong)uc.args[1]);
- break;
- case UCALL_ABORT:
- REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
- break;
- case UCALL_DONE:
- goto done;
- default:
- TEST_FAIL("Unknown ucall %lu", uc.cmd);
- }
+ /* Specify bpn/wpn/ctx_bpn to be tested */
+ vcpu_args_set(vcpu, 3, bpn, wpn, ctx_bpn);
+ pr_debug("Use bpn#%d, wpn#%d and ctx_bpn#%d\n", bpn, wpn, ctx_bpn);
+
+ vcpu_run(vcpu);
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
done:
@@ -400,6 +528,43 @@ void test_single_step_from_userspace(int test_cnt)
kvm_vm_free(vm);
}
+/*
+ * Run debug testing using the various breakpoint#, watchpoint# and
+ * context-aware breakpoint# with the given ID_AA64DFR0_EL1 configuration.
+ */
+void test_guest_debug_exceptions_all(uint64_t aa64dfr0)
+{
+ uint8_t brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base;
+ int b, w, c;
+
+ /* Number of breakpoints */
+ brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_BRPS), aa64dfr0) + 1;
+ __TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required");
+
+ /* Number of watchpoints */
+ wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_WRPS), aa64dfr0) + 1;
+
+ /* Number of context aware breakpoints */
+ ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_CTX_CMPS), aa64dfr0) + 1;
+
+ pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__,
+ brp_num, wrp_num, ctx_brp_num);
+
+ /* Number of normal (non-context aware) breakpoints */
+ normal_brp_num = brp_num - ctx_brp_num;
+
+ /* Lowest context aware breakpoint number */
+ ctx_brp_base = normal_brp_num;
+
+ /* Run tests with all supported breakpoints/watchpoints */
+ for (c = ctx_brp_base; c < ctx_brp_base + ctx_brp_num; c++) {
+ for (b = 0; b < normal_brp_num; b++) {
+ for (w = 0; w < wrp_num; w++)
+ test_guest_debug_exceptions(b, w, c);
+ }
+ }
+}
+
static void help(char *name)
{
puts("");
@@ -414,9 +579,11 @@ int main(int argc, char *argv[])
struct kvm_vm *vm;
int opt;
int ss_iteration = 10000;
+ uint64_t aa64dfr0;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- __TEST_REQUIRE(debug_version(vcpu) >= 6,
+ vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &aa64dfr0);
+ __TEST_REQUIRE(debug_version(aa64dfr0) >= 6,
"Armv8 debug architecture not supported.");
kvm_vm_free(vm);
@@ -432,7 +599,7 @@ int main(int argc, char *argv[])
}
}
- test_guest_debug_exceptions();
+ test_guest_debug_exceptions_all(aa64dfr0);
test_single_step_from_userspace(ss_iteration);
return 0;
diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
new file mode 100644
index 000000000000..95d22cfb7b41
--- /dev/null
+++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
@@ -0,0 +1,1117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * page_fault_test.c - Test stage 2 faults.
+ *
+ * This test tries different combinations of guest accesses (e.g., write,
+ * S1PTW), backing source type (e.g., anon) and types of faults (e.g., read on
+ * hugetlbfs with a hole). It checks that the expected handling method is
+ * called (e.g., uffd faults with the right address and write/read flag).
+ */
+
+#define _GNU_SOURCE
+#include <linux/bitmap.h>
+#include <fcntl.h>
+#include <test_util.h>
+#include <kvm_util.h>
+#include <processor.h>
+#include <asm/sysreg.h>
+#include <linux/bitfield.h>
+#include "guest_modes.h"
+#include "userfaultfd_util.h"
+
+/* Guest virtual addresses that point to the test page and its PTE. */
+#define TEST_GVA 0xc0000000
+#define TEST_EXEC_GVA (TEST_GVA + 0x8)
+#define TEST_PTE_GVA 0xb0000000
+#define TEST_DATA 0x0123456789ABCDEF
+
+static uint64_t *guest_test_memory = (uint64_t *)TEST_GVA;
+
+#define CMD_NONE (0)
+#define CMD_SKIP_TEST (1ULL << 1)
+#define CMD_HOLE_PT (1ULL << 2)
+#define CMD_HOLE_DATA (1ULL << 3)
+#define CMD_CHECK_WRITE_IN_DIRTY_LOG (1ULL << 4)
+#define CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG (1ULL << 5)
+#define CMD_CHECK_NO_WRITE_IN_DIRTY_LOG (1ULL << 6)
+#define CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG (1ULL << 7)
+#define CMD_SET_PTE_AF (1ULL << 8)
+
+#define PREPARE_FN_NR 10
+#define CHECK_FN_NR 10
+
+static struct event_cnt {
+ int mmio_exits;
+ int fail_vcpu_runs;
+ int uffd_faults;
+ /* uffd_faults is incremented from multiple threads. */
+ pthread_mutex_t uffd_faults_mutex;
+} events;
+
+struct test_desc {
+ const char *name;
+ uint64_t mem_mark_cmd;
+ /* Skip the test if any prepare function returns false */
+ bool (*guest_prepare[PREPARE_FN_NR])(void);
+ void (*guest_test)(void);
+ void (*guest_test_check[CHECK_FN_NR])(void);
+ uffd_handler_t uffd_pt_handler;
+ uffd_handler_t uffd_data_handler;
+ void (*dabt_handler)(struct ex_regs *regs);
+ void (*iabt_handler)(struct ex_regs *regs);
+ void (*mmio_handler)(struct kvm_vm *vm, struct kvm_run *run);
+ void (*fail_vcpu_run_handler)(int ret);
+ uint32_t pt_memslot_flags;
+ uint32_t data_memslot_flags;
+ bool skip;
+ struct event_cnt expected_events;
+};
+
+struct test_params {
+ enum vm_mem_backing_src_type src_type;
+ struct test_desc *test_desc;
+};
+
+static inline void flush_tlb_page(uint64_t vaddr)
+{
+ uint64_t page = vaddr >> 12;
+
+ dsb(ishst);
+ asm volatile("tlbi vaae1is, %0" :: "r" (page));
+ dsb(ish);
+ isb();
+}
+
+static void guest_write64(void)
+{
+ uint64_t val;
+
+ WRITE_ONCE(*guest_test_memory, TEST_DATA);
+ val = READ_ONCE(*guest_test_memory);
+ GUEST_ASSERT_EQ(val, TEST_DATA);
+}
+
+/* Check the system for atomic instructions. */
+static bool guest_check_lse(void)
+{
+ uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
+ uint64_t atomic;
+
+ atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMICS), isar0);
+ return atomic >= 2;
+}
+
+static bool guest_check_dc_zva(void)
+{
+ uint64_t dczid = read_sysreg(dczid_el0);
+ uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_DZP), dczid);
+
+ return dzp == 0;
+}
+
+/* Compare and swap instruction. */
+static void guest_cas(void)
+{
+ uint64_t val;
+
+ GUEST_ASSERT(guest_check_lse());
+ asm volatile(".arch_extension lse\n"
+ "casal %0, %1, [%2]\n"
+ :: "r" (0), "r" (TEST_DATA), "r" (guest_test_memory));
+ val = READ_ONCE(*guest_test_memory);
+ GUEST_ASSERT_EQ(val, TEST_DATA);
+}
+
+static void guest_read64(void)
+{
+ uint64_t val;
+
+ val = READ_ONCE(*guest_test_memory);
+ GUEST_ASSERT_EQ(val, 0);
+}
+
+/* Address translation instruction */
+static void guest_at(void)
+{
+ uint64_t par;
+
+ asm volatile("at s1e1r, %0" :: "r" (guest_test_memory));
+ par = read_sysreg(par_el1);
+ isb();
+
+ /* Bit 1 indicates whether the AT was successful */
+ GUEST_ASSERT_EQ(par & 1, 0);
+}
+
+/*
+ * The size of the block written by "dc zva" is guaranteed to be between (2 <<
+ * 0) and (2 << 9), which is safe in our case as we need the write to happen
+ * for at least a word, and not more than a page.
+ */
+static void guest_dc_zva(void)
+{
+ uint16_t val;
+
+ asm volatile("dc zva, %0" :: "r" (guest_test_memory));
+ dsb(ish);
+ val = READ_ONCE(*guest_test_memory);
+ GUEST_ASSERT_EQ(val, 0);
+}
+
+/*
+ * Pre-indexing loads and stores don't have a valid syndrome (ESR_EL2.ISV==0).
+ * And that's special because KVM must take special care with those: they
+ * should still count as accesses for dirty logging or user-faulting, but
+ * should be handled differently on mmio.
+ */
+static void guest_ld_preidx(void)
+{
+ uint64_t val;
+ uint64_t addr = TEST_GVA - 8;
+
+ /*
+ * This ends up accessing "TEST_GVA + 8 - 8", where "TEST_GVA - 8" is
+ * in a gap between memslots not backing by anything.
+ */
+ asm volatile("ldr %0, [%1, #8]!"
+ : "=r" (val), "+r" (addr));
+ GUEST_ASSERT_EQ(val, 0);
+ GUEST_ASSERT_EQ(addr, TEST_GVA);
+}
+
+static void guest_st_preidx(void)
+{
+ uint64_t val = TEST_DATA;
+ uint64_t addr = TEST_GVA - 8;
+
+ asm volatile("str %0, [%1, #8]!"
+ : "+r" (val), "+r" (addr));
+
+ GUEST_ASSERT_EQ(addr, TEST_GVA);
+ val = READ_ONCE(*guest_test_memory);
+}
+
+static bool guest_set_ha(void)
+{
+ uint64_t mmfr1 = read_sysreg(id_aa64mmfr1_el1);
+ uint64_t hadbs, tcr;
+
+ /* Skip if HA is not supported. */
+ hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS), mmfr1);
+ if (hadbs == 0)
+ return false;
+
+ tcr = read_sysreg(tcr_el1) | TCR_EL1_HA;
+ write_sysreg(tcr, tcr_el1);
+ isb();
+
+ return true;
+}
+
+static bool guest_clear_pte_af(void)
+{
+ *((uint64_t *)TEST_PTE_GVA) &= ~PTE_AF;
+ flush_tlb_page(TEST_GVA);
+
+ return true;
+}
+
+static void guest_check_pte_af(void)
+{
+ dsb(ish);
+ GUEST_ASSERT_EQ(*((uint64_t *)TEST_PTE_GVA) & PTE_AF, PTE_AF);
+}
+
+static void guest_check_write_in_dirty_log(void)
+{
+ GUEST_SYNC(CMD_CHECK_WRITE_IN_DIRTY_LOG);
+}
+
+static void guest_check_no_write_in_dirty_log(void)
+{
+ GUEST_SYNC(CMD_CHECK_NO_WRITE_IN_DIRTY_LOG);
+}
+
+static void guest_check_s1ptw_wr_in_dirty_log(void)
+{
+ GUEST_SYNC(CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG);
+}
+
+static void guest_exec(void)
+{
+ int (*code)(void) = (int (*)(void))TEST_EXEC_GVA;
+ int ret;
+
+ ret = code();
+ GUEST_ASSERT_EQ(ret, 0x77);
+}
+
+static bool guest_prepare(struct test_desc *test)
+{
+ bool (*prepare_fn)(void);
+ int i;
+
+ for (i = 0; i < PREPARE_FN_NR; i++) {
+ prepare_fn = test->guest_prepare[i];
+ if (prepare_fn && !prepare_fn())
+ return false;
+ }
+
+ return true;
+}
+
+static void guest_test_check(struct test_desc *test)
+{
+ void (*check_fn)(void);
+ int i;
+
+ for (i = 0; i < CHECK_FN_NR; i++) {
+ check_fn = test->guest_test_check[i];
+ if (check_fn)
+ check_fn();
+ }
+}
+
+static void guest_code(struct test_desc *test)
+{
+ if (!guest_prepare(test))
+ GUEST_SYNC(CMD_SKIP_TEST);
+
+ GUEST_SYNC(test->mem_mark_cmd);
+
+ if (test->guest_test)
+ test->guest_test();
+
+ guest_test_check(test);
+ GUEST_DONE();
+}
+
+static void no_dabt_handler(struct ex_regs *regs)
+{
+ GUEST_ASSERT_1(false, read_sysreg(far_el1));
+}
+
+static void no_iabt_handler(struct ex_regs *regs)
+{
+ GUEST_ASSERT_1(false, regs->pc);
+}
+
+static struct uffd_args {
+ char *copy;
+ void *hva;
+ uint64_t paging_size;
+} pt_args, data_args;
+
+/* Returns true to continue the test, and false if it should be skipped. */
+static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
+ struct uffd_args *args, bool expect_write)
+{
+ uint64_t addr = msg->arg.pagefault.address;
+ uint64_t flags = msg->arg.pagefault.flags;
+ struct uffdio_copy copy;
+ int ret;
+
+ TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING,
+ "The only expected UFFD mode is MISSING");
+ ASSERT_EQ(!!(flags & UFFD_PAGEFAULT_FLAG_WRITE), expect_write);
+ ASSERT_EQ(addr, (uint64_t)args->hva);
+
+ pr_debug("uffd fault: addr=%p write=%d\n",
+ (void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE));
+
+ copy.src = (uint64_t)args->copy;
+ copy.dst = addr;
+ copy.len = args->paging_size;
+ copy.mode = 0;
+
+ ret = ioctl(uffd, UFFDIO_COPY, &copy);
+ if (ret == -1) {
+ pr_info("Failed UFFDIO_COPY in 0x%lx with errno: %d\n",
+ addr, errno);
+ return ret;
+ }
+
+ pthread_mutex_lock(&events.uffd_faults_mutex);
+ events.uffd_faults += 1;
+ pthread_mutex_unlock(&events.uffd_faults_mutex);
+ return 0;
+}
+
+static int uffd_pt_write_handler(int mode, int uffd, struct uffd_msg *msg)
+{
+ return uffd_generic_handler(mode, uffd, msg, &pt_args, true);
+}
+
+static int uffd_data_write_handler(int mode, int uffd, struct uffd_msg *msg)
+{
+ return uffd_generic_handler(mode, uffd, msg, &data_args, true);
+}
+
+static int uffd_data_read_handler(int mode, int uffd, struct uffd_msg *msg)
+{
+ return uffd_generic_handler(mode, uffd, msg, &data_args, false);
+}
+
+static void setup_uffd_args(struct userspace_mem_region *region,
+ struct uffd_args *args)
+{
+ args->hva = (void *)region->region.userspace_addr;
+ args->paging_size = region->region.memory_size;
+
+ args->copy = malloc(args->paging_size);
+ TEST_ASSERT(args->copy, "Failed to allocate data copy.");
+ memcpy(args->copy, args->hva, args->paging_size);
+}
+
+static void setup_uffd(struct kvm_vm *vm, struct test_params *p,
+ struct uffd_desc **pt_uffd, struct uffd_desc **data_uffd)
+{
+ struct test_desc *test = p->test_desc;
+ int uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
+
+ setup_uffd_args(vm_get_mem_region(vm, MEM_REGION_PT), &pt_args);
+ setup_uffd_args(vm_get_mem_region(vm, MEM_REGION_TEST_DATA), &data_args);
+
+ *pt_uffd = NULL;
+ if (test->uffd_pt_handler)
+ *pt_uffd = uffd_setup_demand_paging(uffd_mode, 0,
+ pt_args.hva,
+ pt_args.paging_size,
+ test->uffd_pt_handler);
+
+ *data_uffd = NULL;
+ if (test->uffd_data_handler)
+ *data_uffd = uffd_setup_demand_paging(uffd_mode, 0,
+ data_args.hva,
+ data_args.paging_size,
+ test->uffd_data_handler);
+}
+
+static void free_uffd(struct test_desc *test, struct uffd_desc *pt_uffd,
+ struct uffd_desc *data_uffd)
+{
+ if (test->uffd_pt_handler)
+ uffd_stop_demand_paging(pt_uffd);
+ if (test->uffd_data_handler)
+ uffd_stop_demand_paging(data_uffd);
+
+ free(pt_args.copy);
+ free(data_args.copy);
+}
+
+static int uffd_no_handler(int mode, int uffd, struct uffd_msg *msg)
+{
+ TEST_FAIL("There was no UFFD fault expected.");
+ return -1;
+}
+
+/* Returns false if the test should be skipped. */
+static bool punch_hole_in_backing_store(struct kvm_vm *vm,
+ struct userspace_mem_region *region)
+{
+ void *hva = (void *)region->region.userspace_addr;
+ uint64_t paging_size = region->region.memory_size;
+ int ret, fd = region->fd;
+
+ if (fd != -1) {
+ ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ 0, paging_size);
+ TEST_ASSERT(ret == 0, "fallocate failed\n");
+ } else {
+ ret = madvise(hva, paging_size, MADV_DONTNEED);
+ TEST_ASSERT(ret == 0, "madvise failed\n");
+ }
+
+ return true;
+}
+
+static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run)
+{
+ struct userspace_mem_region *region;
+ void *hva;
+
+ region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
+ hva = (void *)region->region.userspace_addr;
+
+ ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr);
+
+ memcpy(hva, run->mmio.data, run->mmio.len);
+ events.mmio_exits += 1;
+}
+
+static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run)
+{
+ uint64_t data;
+
+ memcpy(&data, run->mmio.data, sizeof(data));
+ pr_debug("addr=%lld len=%d w=%d data=%lx\n",
+ run->mmio.phys_addr, run->mmio.len,
+ run->mmio.is_write, data);
+ TEST_FAIL("There was no MMIO exit expected.");
+}
+
+static bool check_write_in_dirty_log(struct kvm_vm *vm,
+ struct userspace_mem_region *region,
+ uint64_t host_pg_nr)
+{
+ unsigned long *bmap;
+ bool first_page_dirty;
+ uint64_t size = region->region.memory_size;
+
+ /* getpage_size() is not always equal to vm->page_size */
+ bmap = bitmap_zalloc(size / getpagesize());
+ kvm_vm_get_dirty_log(vm, region->region.slot, bmap);
+ first_page_dirty = test_bit(host_pg_nr, bmap);
+ free(bmap);
+ return first_page_dirty;
+}
+
+/* Returns true to continue the test, and false if it should be skipped. */
+static bool handle_cmd(struct kvm_vm *vm, int cmd)
+{
+ struct userspace_mem_region *data_region, *pt_region;
+ bool continue_test = true;
+
+ data_region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
+ pt_region = vm_get_mem_region(vm, MEM_REGION_PT);
+
+ if (cmd == CMD_SKIP_TEST)
+ continue_test = false;
+
+ if (cmd & CMD_HOLE_PT)
+ continue_test = punch_hole_in_backing_store(vm, pt_region);
+ if (cmd & CMD_HOLE_DATA)
+ continue_test = punch_hole_in_backing_store(vm, data_region);
+ if (cmd & CMD_CHECK_WRITE_IN_DIRTY_LOG)
+ TEST_ASSERT(check_write_in_dirty_log(vm, data_region, 0),
+ "Missing write in dirty log");
+ if (cmd & CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG)
+ TEST_ASSERT(check_write_in_dirty_log(vm, pt_region, 0),
+ "Missing s1ptw write in dirty log");
+ if (cmd & CMD_CHECK_NO_WRITE_IN_DIRTY_LOG)
+ TEST_ASSERT(!check_write_in_dirty_log(vm, data_region, 0),
+ "Unexpected write in dirty log");
+ if (cmd & CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG)
+ TEST_ASSERT(!check_write_in_dirty_log(vm, pt_region, 0),
+ "Unexpected s1ptw write in dirty log");
+
+ return continue_test;
+}
+
+void fail_vcpu_run_no_handler(int ret)
+{
+ TEST_FAIL("Unexpected vcpu run failure\n");
+}
+
+void fail_vcpu_run_mmio_no_syndrome_handler(int ret)
+{
+ TEST_ASSERT(errno == ENOSYS,
+ "The mmio handler should have returned not implemented.");
+ events.fail_vcpu_runs += 1;
+}
+
+typedef uint32_t aarch64_insn_t;
+extern aarch64_insn_t __exec_test[2];
+
+noinline void __return_0x77(void)
+{
+ asm volatile("__exec_test: mov x0, #0x77\n"
+ "ret\n");
+}
+
+/*
+ * Note that this function runs on the host before the test VM starts: there's
+ * no need to sync the D$ and I$ caches.
+ */
+static void load_exec_code_for_test(struct kvm_vm *vm)
+{
+ uint64_t *code;
+ struct userspace_mem_region *region;
+ void *hva;
+
+ region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
+ hva = (void *)region->region.userspace_addr;
+
+ assert(TEST_EXEC_GVA > TEST_GVA);
+ code = hva + TEST_EXEC_GVA - TEST_GVA;
+ memcpy(code, __exec_test, sizeof(__exec_test));
+}
+
+static void setup_abort_handlers(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
+ struct test_desc *test)
+{
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vcpu);
+
+ vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
+ ESR_EC_DABT, no_dabt_handler);
+ vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
+ ESR_EC_IABT, no_iabt_handler);
+}
+
+static void setup_gva_maps(struct kvm_vm *vm)
+{
+ struct userspace_mem_region *region;
+ uint64_t pte_gpa;
+
+ region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
+ /* Map TEST_GVA first. This will install a new PTE. */
+ virt_pg_map(vm, TEST_GVA, region->region.guest_phys_addr);
+ /* Then map TEST_PTE_GVA to the above PTE. */
+ pte_gpa = addr_hva2gpa(vm, virt_get_pte_hva(vm, TEST_GVA));
+ virt_pg_map(vm, TEST_PTE_GVA, pte_gpa);
+}
+
+enum pf_test_memslots {
+ CODE_AND_DATA_MEMSLOT,
+ PAGE_TABLE_MEMSLOT,
+ TEST_DATA_MEMSLOT,
+};
+
+/*
+ * Create a memslot for code and data at pfn=0, and test-data and PT ones
+ * at max_gfn.
+ */
+static void setup_memslots(struct kvm_vm *vm, struct test_params *p)
+{
+ uint64_t backing_src_pagesz = get_backing_src_pagesz(p->src_type);
+ uint64_t guest_page_size = vm->page_size;
+ uint64_t max_gfn = vm_compute_max_gfn(vm);
+ /* Enough for 2M of code when using 4K guest pages. */
+ uint64_t code_npages = 512;
+ uint64_t pt_size, data_size, data_gpa;
+
+ /*
+ * This test requires 1 pgd, 2 pud, 4 pmd, and 6 pte pages when using
+ * VM_MODE_P48V48_4K. Note that the .text takes ~1.6MBs. That's 13
+ * pages. VM_MODE_P48V48_4K is the mode with most PT pages; let's use
+ * twice that just in case.
+ */
+ pt_size = 26 * guest_page_size;
+
+ /* memslot sizes and gpa's must be aligned to the backing page size */
+ pt_size = align_up(pt_size, backing_src_pagesz);
+ data_size = align_up(guest_page_size, backing_src_pagesz);
+ data_gpa = (max_gfn * guest_page_size) - data_size;
+ data_gpa = align_down(data_gpa, backing_src_pagesz);
+
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0,
+ CODE_AND_DATA_MEMSLOT, code_npages, 0);
+ vm->memslots[MEM_REGION_CODE] = CODE_AND_DATA_MEMSLOT;
+ vm->memslots[MEM_REGION_DATA] = CODE_AND_DATA_MEMSLOT;
+
+ vm_userspace_mem_region_add(vm, p->src_type, data_gpa - pt_size,
+ PAGE_TABLE_MEMSLOT, pt_size / guest_page_size,
+ p->test_desc->pt_memslot_flags);
+ vm->memslots[MEM_REGION_PT] = PAGE_TABLE_MEMSLOT;
+
+ vm_userspace_mem_region_add(vm, p->src_type, data_gpa, TEST_DATA_MEMSLOT,
+ data_size / guest_page_size,
+ p->test_desc->data_memslot_flags);
+ vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT;
+}
+
+static void setup_ucall(struct kvm_vm *vm)
+{
+ struct userspace_mem_region *region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
+
+ ucall_init(vm, region->region.guest_phys_addr + region->region.memory_size);
+}
+
+static void setup_default_handlers(struct test_desc *test)
+{
+ if (!test->mmio_handler)
+ test->mmio_handler = mmio_no_handler;
+
+ if (!test->fail_vcpu_run_handler)
+ test->fail_vcpu_run_handler = fail_vcpu_run_no_handler;
+}
+
+static void check_event_counts(struct test_desc *test)
+{
+ ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults);
+ ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits);
+ ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs);
+}
+
+static void print_test_banner(enum vm_guest_mode mode, struct test_params *p)
+{
+ struct test_desc *test = p->test_desc;
+
+ pr_debug("Test: %s\n", test->name);
+ pr_debug("Testing guest mode: %s\n", vm_guest_mode_string(mode));
+ pr_debug("Testing memory backing src type: %s\n",
+ vm_mem_backing_src_alias(p->src_type)->name);
+}
+
+static void reset_event_counts(void)
+{
+ memset(&events, 0, sizeof(events));
+}
+
+/*
+ * This function either succeeds, skips the test (after setting test->skip), or
+ * fails with a TEST_FAIL that aborts all tests.
+ */
+static void vcpu_run_loop(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
+ struct test_desc *test)
+{
+ struct kvm_run *run;
+ struct ucall uc;
+ int ret;
+
+ run = vcpu->run;
+
+ for (;;) {
+ ret = _vcpu_run(vcpu);
+ if (ret) {
+ test->fail_vcpu_run_handler(ret);
+ goto done;
+ }
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_SYNC:
+ if (!handle_cmd(vm, uc.args[1])) {
+ test->skip = true;
+ goto done;
+ }
+ break;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
+ break;
+ case UCALL_DONE:
+ goto done;
+ case UCALL_NONE:
+ if (run->exit_reason == KVM_EXIT_MMIO)
+ test->mmio_handler(vm, run);
+ break;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+
+done:
+ pr_debug(test->skip ? "Skipped.\n" : "Done.\n");
+}
+
+static void run_test(enum vm_guest_mode mode, void *arg)
+{
+ struct test_params *p = (struct test_params *)arg;
+ struct test_desc *test = p->test_desc;
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+ struct uffd_desc *pt_uffd, *data_uffd;
+
+ print_test_banner(mode, p);
+
+ vm = ____vm_create(mode);
+ setup_memslots(vm, p);
+ kvm_vm_elf_load(vm, program_invocation_name);
+ setup_ucall(vm);
+ vcpu = vm_vcpu_add(vm, 0, guest_code);
+
+ setup_gva_maps(vm);
+
+ reset_event_counts();
+
+ /*
+ * Set some code in the data memslot for the guest to execute (only
+ * applicable to the EXEC tests). This has to be done before
+ * setup_uffd() as that function copies the memslot data for the uffd
+ * handler.
+ */
+ load_exec_code_for_test(vm);
+ setup_uffd(vm, p, &pt_uffd, &data_uffd);
+ setup_abort_handlers(vm, vcpu, test);
+ setup_default_handlers(test);
+ vcpu_args_set(vcpu, 1, test);
+
+ vcpu_run_loop(vm, vcpu, test);
+
+ kvm_vm_free(vm);
+ free_uffd(test, pt_uffd, data_uffd);
+
+ /*
+ * Make sure we check the events after the uffd threads have exited,
+ * which means they updated their respective event counters.
+ */
+ if (!test->skip)
+ check_event_counts(test);
+}
+
+static void help(char *name)
+{
+ puts("");
+ printf("usage: %s [-h] [-s mem-type]\n", name);
+ puts("");
+ guest_modes_help();
+ backing_src_help("-s");
+ puts("");
+}
+
+#define SNAME(s) #s
+#define SCAT2(a, b) SNAME(a ## _ ## b)
+#define SCAT3(a, b, c) SCAT2(a, SCAT2(b, c))
+#define SCAT4(a, b, c, d) SCAT2(a, SCAT3(b, c, d))
+
+#define _CHECK(_test) _CHECK_##_test
+#define _PREPARE(_test) _PREPARE_##_test
+#define _PREPARE_guest_read64 NULL
+#define _PREPARE_guest_ld_preidx NULL
+#define _PREPARE_guest_write64 NULL
+#define _PREPARE_guest_st_preidx NULL
+#define _PREPARE_guest_exec NULL
+#define _PREPARE_guest_at NULL
+#define _PREPARE_guest_dc_zva guest_check_dc_zva
+#define _PREPARE_guest_cas guest_check_lse
+
+/* With or without access flag checks */
+#define _PREPARE_with_af guest_set_ha, guest_clear_pte_af
+#define _PREPARE_no_af NULL
+#define _CHECK_with_af guest_check_pte_af
+#define _CHECK_no_af NULL
+
+/* Performs an access and checks that no faults were triggered. */
+#define TEST_ACCESS(_access, _with_af, _mark_cmd) \
+{ \
+ .name = SCAT3(_access, _with_af, #_mark_cmd), \
+ .guest_prepare = { _PREPARE(_with_af), \
+ _PREPARE(_access) }, \
+ .mem_mark_cmd = _mark_cmd, \
+ .guest_test = _access, \
+ .guest_test_check = { _CHECK(_with_af) }, \
+ .expected_events = { 0 }, \
+}
+
+#define TEST_UFFD(_access, _with_af, _mark_cmd, \
+ _uffd_data_handler, _uffd_pt_handler, _uffd_faults) \
+{ \
+ .name = SCAT4(uffd, _access, _with_af, #_mark_cmd), \
+ .guest_prepare = { _PREPARE(_with_af), \
+ _PREPARE(_access) }, \
+ .guest_test = _access, \
+ .mem_mark_cmd = _mark_cmd, \
+ .guest_test_check = { _CHECK(_with_af) }, \
+ .uffd_data_handler = _uffd_data_handler, \
+ .uffd_pt_handler = _uffd_pt_handler, \
+ .expected_events = { .uffd_faults = _uffd_faults, }, \
+}
+
+#define TEST_DIRTY_LOG(_access, _with_af, _test_check) \
+{ \
+ .name = SCAT3(dirty_log, _access, _with_af), \
+ .data_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
+ .pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
+ .guest_prepare = { _PREPARE(_with_af), \
+ _PREPARE(_access) }, \
+ .guest_test = _access, \
+ .guest_test_check = { _CHECK(_with_af), _test_check, \
+ guest_check_s1ptw_wr_in_dirty_log}, \
+ .expected_events = { 0 }, \
+}
+
+#define TEST_UFFD_AND_DIRTY_LOG(_access, _with_af, _uffd_data_handler, \
+ _uffd_faults, _test_check) \
+{ \
+ .name = SCAT3(uffd_and_dirty_log, _access, _with_af), \
+ .data_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
+ .pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
+ .guest_prepare = { _PREPARE(_with_af), \
+ _PREPARE(_access) }, \
+ .guest_test = _access, \
+ .mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
+ .guest_test_check = { _CHECK(_with_af), _test_check }, \
+ .uffd_data_handler = _uffd_data_handler, \
+ .uffd_pt_handler = uffd_pt_write_handler, \
+ .expected_events = { .uffd_faults = _uffd_faults, }, \
+}
+
+#define TEST_RO_MEMSLOT(_access, _mmio_handler, _mmio_exits) \
+{ \
+ .name = SCAT3(ro_memslot, _access, _with_af), \
+ .data_memslot_flags = KVM_MEM_READONLY, \
+ .guest_prepare = { _PREPARE(_access) }, \
+ .guest_test = _access, \
+ .mmio_handler = _mmio_handler, \
+ .expected_events = { .mmio_exits = _mmio_exits }, \
+}
+
+#define TEST_RO_MEMSLOT_NO_SYNDROME(_access) \
+{ \
+ .name = SCAT2(ro_memslot_no_syndrome, _access), \
+ .data_memslot_flags = KVM_MEM_READONLY, \
+ .guest_test = _access, \
+ .fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
+ .expected_events = { .fail_vcpu_runs = 1 }, \
+}
+
+#define TEST_RO_MEMSLOT_AND_DIRTY_LOG(_access, _mmio_handler, _mmio_exits, \
+ _test_check) \
+{ \
+ .name = SCAT3(ro_memslot, _access, _with_af), \
+ .data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
+ .pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
+ .guest_prepare = { _PREPARE(_access) }, \
+ .guest_test = _access, \
+ .guest_test_check = { _test_check }, \
+ .mmio_handler = _mmio_handler, \
+ .expected_events = { .mmio_exits = _mmio_exits}, \
+}
+
+#define TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(_access, _test_check) \
+{ \
+ .name = SCAT2(ro_memslot_no_syn_and_dlog, _access), \
+ .data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
+ .pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
+ .guest_test = _access, \
+ .guest_test_check = { _test_check }, \
+ .fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
+ .expected_events = { .fail_vcpu_runs = 1 }, \
+}
+
+#define TEST_RO_MEMSLOT_AND_UFFD(_access, _mmio_handler, _mmio_exits, \
+ _uffd_data_handler, _uffd_faults) \
+{ \
+ .name = SCAT2(ro_memslot_uffd, _access), \
+ .data_memslot_flags = KVM_MEM_READONLY, \
+ .mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
+ .guest_prepare = { _PREPARE(_access) }, \
+ .guest_test = _access, \
+ .uffd_data_handler = _uffd_data_handler, \
+ .uffd_pt_handler = uffd_pt_write_handler, \
+ .mmio_handler = _mmio_handler, \
+ .expected_events = { .mmio_exits = _mmio_exits, \
+ .uffd_faults = _uffd_faults }, \
+}
+
+#define TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(_access, _uffd_data_handler, \
+ _uffd_faults) \
+{ \
+ .name = SCAT2(ro_memslot_no_syndrome, _access), \
+ .data_memslot_flags = KVM_MEM_READONLY, \
+ .mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
+ .guest_test = _access, \
+ .uffd_data_handler = _uffd_data_handler, \
+ .uffd_pt_handler = uffd_pt_write_handler, \
+ .fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
+ .expected_events = { .fail_vcpu_runs = 1, \
+ .uffd_faults = _uffd_faults }, \
+}
+
+static struct test_desc tests[] = {
+
+ /* Check that HW is setting the Access Flag (AF) (sanity checks). */
+ TEST_ACCESS(guest_read64, with_af, CMD_NONE),
+ TEST_ACCESS(guest_ld_preidx, with_af, CMD_NONE),
+ TEST_ACCESS(guest_cas, with_af, CMD_NONE),
+ TEST_ACCESS(guest_write64, with_af, CMD_NONE),
+ TEST_ACCESS(guest_st_preidx, with_af, CMD_NONE),
+ TEST_ACCESS(guest_dc_zva, with_af, CMD_NONE),
+ TEST_ACCESS(guest_exec, with_af, CMD_NONE),
+
+ /*
+ * Punch a hole in the data backing store, and then try multiple
+ * accesses: reads should rturn zeroes, and writes should
+ * re-populate the page. Moreover, the test also check that no
+ * exception was generated in the guest. Note that this
+ * reading/writing behavior is the same as reading/writing a
+ * punched page (with fallocate(FALLOC_FL_PUNCH_HOLE)) from
+ * userspace.
+ */
+ TEST_ACCESS(guest_read64, no_af, CMD_HOLE_DATA),
+ TEST_ACCESS(guest_cas, no_af, CMD_HOLE_DATA),
+ TEST_ACCESS(guest_ld_preidx, no_af, CMD_HOLE_DATA),
+ TEST_ACCESS(guest_write64, no_af, CMD_HOLE_DATA),
+ TEST_ACCESS(guest_st_preidx, no_af, CMD_HOLE_DATA),
+ TEST_ACCESS(guest_at, no_af, CMD_HOLE_DATA),
+ TEST_ACCESS(guest_dc_zva, no_af, CMD_HOLE_DATA),
+
+ /*
+ * Punch holes in the data and PT backing stores and mark them for
+ * userfaultfd handling. This should result in 2 faults: the access
+ * on the data backing store, and its respective S1 page table walk
+ * (S1PTW).
+ */
+ TEST_UFFD(guest_read64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
+ uffd_data_read_handler, uffd_pt_write_handler, 2),
+ /* no_af should also lead to a PT write. */
+ TEST_UFFD(guest_read64, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
+ uffd_data_read_handler, uffd_pt_write_handler, 2),
+ /* Note how that cas invokes the read handler. */
+ TEST_UFFD(guest_cas, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
+ uffd_data_read_handler, uffd_pt_write_handler, 2),
+ /*
+ * Can't test guest_at with_af as it's IMPDEF whether the AF is set.
+ * The S1PTW fault should still be marked as a write.
+ */
+ TEST_UFFD(guest_at, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
+ uffd_data_read_handler, uffd_pt_write_handler, 1),
+ TEST_UFFD(guest_ld_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
+ uffd_data_read_handler, uffd_pt_write_handler, 2),
+ TEST_UFFD(guest_write64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
+ uffd_data_write_handler, uffd_pt_write_handler, 2),
+ TEST_UFFD(guest_dc_zva, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
+ uffd_data_write_handler, uffd_pt_write_handler, 2),
+ TEST_UFFD(guest_st_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
+ uffd_data_write_handler, uffd_pt_write_handler, 2),
+ TEST_UFFD(guest_exec, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
+ uffd_data_read_handler, uffd_pt_write_handler, 2),
+
+ /*
+ * Try accesses when the data and PT memory regions are both
+ * tracked for dirty logging.
+ */
+ TEST_DIRTY_LOG(guest_read64, with_af, guest_check_no_write_in_dirty_log),
+ /* no_af should also lead to a PT write. */
+ TEST_DIRTY_LOG(guest_read64, no_af, guest_check_no_write_in_dirty_log),
+ TEST_DIRTY_LOG(guest_ld_preidx, with_af, guest_check_no_write_in_dirty_log),
+ TEST_DIRTY_LOG(guest_at, no_af, guest_check_no_write_in_dirty_log),
+ TEST_DIRTY_LOG(guest_exec, with_af, guest_check_no_write_in_dirty_log),
+ TEST_DIRTY_LOG(guest_write64, with_af, guest_check_write_in_dirty_log),
+ TEST_DIRTY_LOG(guest_cas, with_af, guest_check_write_in_dirty_log),
+ TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log),
+ TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log),
+
+ /*
+ * Access when the data and PT memory regions are both marked for
+ * dirty logging and UFFD at the same time. The expected result is
+ * that writes should mark the dirty log and trigger a userfaultfd
+ * write fault. Reads/execs should result in a read userfaultfd
+ * fault, and nothing in the dirty log. Any S1PTW should result in
+ * a write in the dirty log and a userfaultfd write.
+ */
+ TEST_UFFD_AND_DIRTY_LOG(guest_read64, with_af, uffd_data_read_handler, 2,
+ guest_check_no_write_in_dirty_log),
+ /* no_af should also lead to a PT write. */
+ TEST_UFFD_AND_DIRTY_LOG(guest_read64, no_af, uffd_data_read_handler, 2,
+ guest_check_no_write_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_ld_preidx, with_af, uffd_data_read_handler,
+ 2, guest_check_no_write_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_at, with_af, 0, 1,
+ guest_check_no_write_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_exec, with_af, uffd_data_read_handler, 2,
+ guest_check_no_write_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_write64, with_af, uffd_data_write_handler,
+ 2, guest_check_write_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_cas, with_af, uffd_data_read_handler, 2,
+ guest_check_write_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_dc_zva, with_af, uffd_data_write_handler,
+ 2, guest_check_write_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_st_preidx, with_af,
+ uffd_data_write_handler, 2,
+ guest_check_write_in_dirty_log),
+
+ /*
+ * Try accesses when the data memory region is marked read-only
+ * (with KVM_MEM_READONLY). Writes with a syndrome result in an
+ * MMIO exit, writes with no syndrome (e.g., CAS) result in a
+ * failed vcpu run, and reads/execs with and without syndroms do
+ * not fault.
+ */
+ TEST_RO_MEMSLOT(guest_read64, 0, 0),
+ TEST_RO_MEMSLOT(guest_ld_preidx, 0, 0),
+ TEST_RO_MEMSLOT(guest_at, 0, 0),
+ TEST_RO_MEMSLOT(guest_exec, 0, 0),
+ TEST_RO_MEMSLOT(guest_write64, mmio_on_test_gpa_handler, 1),
+ TEST_RO_MEMSLOT_NO_SYNDROME(guest_dc_zva),
+ TEST_RO_MEMSLOT_NO_SYNDROME(guest_cas),
+ TEST_RO_MEMSLOT_NO_SYNDROME(guest_st_preidx),
+
+ /*
+ * Access when both the data region is both read-only and marked
+ * for dirty logging at the same time. The expected result is that
+ * for writes there should be no write in the dirty log. The
+ * readonly handling is the same as if the memslot was not marked
+ * for dirty logging: writes with a syndrome result in an MMIO
+ * exit, and writes with no syndrome result in a failed vcpu run.
+ */
+ TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_read64, 0, 0,
+ guest_check_no_write_in_dirty_log),
+ TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_ld_preidx, 0, 0,
+ guest_check_no_write_in_dirty_log),
+ TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_at, 0, 0,
+ guest_check_no_write_in_dirty_log),
+ TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_exec, 0, 0,
+ guest_check_no_write_in_dirty_log),
+ TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_write64, mmio_on_test_gpa_handler,
+ 1, guest_check_no_write_in_dirty_log),
+ TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_dc_zva,
+ guest_check_no_write_in_dirty_log),
+ TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_cas,
+ guest_check_no_write_in_dirty_log),
+ TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_st_preidx,
+ guest_check_no_write_in_dirty_log),
+
+ /*
+ * Access when the data region is both read-only and punched with
+ * holes tracked with userfaultfd. The expected result is the
+ * union of both userfaultfd and read-only behaviors. For example,
+ * write accesses result in a userfaultfd write fault and an MMIO
+ * exit. Writes with no syndrome result in a failed vcpu run and
+ * no userfaultfd write fault. Reads result in userfaultfd getting
+ * triggered.
+ */
+ TEST_RO_MEMSLOT_AND_UFFD(guest_read64, 0, 0,
+ uffd_data_read_handler, 2),
+ TEST_RO_MEMSLOT_AND_UFFD(guest_ld_preidx, 0, 0,
+ uffd_data_read_handler, 2),
+ TEST_RO_MEMSLOT_AND_UFFD(guest_at, 0, 0,
+ uffd_no_handler, 1),
+ TEST_RO_MEMSLOT_AND_UFFD(guest_exec, 0, 0,
+ uffd_data_read_handler, 2),
+ TEST_RO_MEMSLOT_AND_UFFD(guest_write64, mmio_on_test_gpa_handler, 1,
+ uffd_data_write_handler, 2),
+ TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_cas,
+ uffd_data_read_handler, 2),
+ TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_dc_zva,
+ uffd_no_handler, 1),
+ TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_st_preidx,
+ uffd_no_handler, 1),
+
+ { 0 }
+};
+
+static void for_each_test_and_guest_mode(enum vm_mem_backing_src_type src_type)
+{
+ struct test_desc *t;
+
+ for (t = &tests[0]; t->name; t++) {
+ if (t->skip)
+ continue;
+
+ struct test_params p = {
+ .src_type = src_type,
+ .test_desc = t,
+ };
+
+ for_each_guest_mode(run_test, &p);
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ enum vm_mem_backing_src_type src_type;
+ int opt;
+
+ setbuf(stdout, NULL);
+
+ src_type = DEFAULT_VM_MEM_SRC;
+
+ while ((opt = getopt(argc, argv, "hm:s:")) != -1) {
+ switch (opt) {
+ case 'm':
+ guest_modes_cmdline(optarg);
+ break;
+ case 's':
+ src_type = parse_backing_src_type(optarg);
+ break;
+ case 'h':
+ default:
+ help(argv[0]);
+ exit(0);
+ }
+ }
+
+ for_each_test_and_guest_mode(src_type);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
index 02d3587cab0a..57a16371e9c2 100644
--- a/tools/testing/selftests/kvm/access_tracking_perf_test.c
+++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c
@@ -58,9 +58,6 @@ static enum {
ITERATION_MARK_IDLE,
} iteration_work;
-/* Set to true when vCPU threads should exit. */
-static bool done;
-
/* The iteration that was last completed by each vCPU. */
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
@@ -211,7 +208,7 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
int last_iteration = *current_iteration;
do {
- if (READ_ONCE(done))
+ if (READ_ONCE(memstress_args.stop_vcpus))
return false;
*current_iteration = READ_ONCE(iteration);
@@ -321,9 +318,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
mark_memory_idle(vm, nr_vcpus);
access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from idle memory");
- /* Set done to signal the vCPU threads to exit */
- done = true;
-
memstress_join_vcpu_threads(nr_vcpus);
memstress_destroy_vm(vm);
}
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index 3a977ddf07b2..b0e1fc4de9e2 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -22,23 +22,13 @@
#include "test_util.h"
#include "memstress.h"
#include "guest_modes.h"
+#include "userfaultfd_util.h"
#ifdef __NR_userfaultfd
-#ifdef PRINT_PER_PAGE_UPDATES
-#define PER_PAGE_DEBUG(...) printf(__VA_ARGS__)
-#else
-#define PER_PAGE_DEBUG(...) _no_printf(__VA_ARGS__)
-#endif
-
-#ifdef PRINT_PER_VCPU_UPDATES
-#define PER_VCPU_DEBUG(...) printf(__VA_ARGS__)
-#else
-#define PER_VCPU_DEBUG(...) _no_printf(__VA_ARGS__)
-#endif
-
static int nr_vcpus = 1;
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+
static size_t demand_paging_size;
static char *guest_data_prototype;
@@ -67,9 +57,11 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
ts_diff.tv_sec, ts_diff.tv_nsec);
}
-static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr)
+static int handle_uffd_page_request(int uffd_mode, int uffd,
+ struct uffd_msg *msg)
{
pid_t tid = syscall(__NR_gettid);
+ uint64_t addr = msg->arg.pagefault.address;
struct timespec start;
struct timespec ts_diff;
int r;
@@ -116,174 +108,32 @@ static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr)
return 0;
}
-bool quit_uffd_thread;
-
-struct uffd_handler_args {
+struct test_params {
int uffd_mode;
- int uffd;
- int pipefd;
- useconds_t delay;
+ useconds_t uffd_delay;
+ enum vm_mem_backing_src_type src_type;
+ bool partition_vcpu_memory_access;
};
-static void *uffd_handler_thread_fn(void *arg)
-{
- struct uffd_handler_args *uffd_args = (struct uffd_handler_args *)arg;
- int uffd = uffd_args->uffd;
- int pipefd = uffd_args->pipefd;
- useconds_t delay = uffd_args->delay;
- int64_t pages = 0;
- struct timespec start;
- struct timespec ts_diff;
-
- clock_gettime(CLOCK_MONOTONIC, &start);
- while (!quit_uffd_thread) {
- struct uffd_msg msg;
- struct pollfd pollfd[2];
- char tmp_chr;
- int r;
- uint64_t addr;
-
- pollfd[0].fd = uffd;
- pollfd[0].events = POLLIN;
- pollfd[1].fd = pipefd;
- pollfd[1].events = POLLIN;
-
- r = poll(pollfd, 2, -1);
- switch (r) {
- case -1:
- pr_info("poll err");
- continue;
- case 0:
- continue;
- case 1:
- break;
- default:
- pr_info("Polling uffd returned %d", r);
- return NULL;
- }
-
- if (pollfd[0].revents & POLLERR) {
- pr_info("uffd revents has POLLERR");
- return NULL;
- }
-
- if (pollfd[1].revents & POLLIN) {
- r = read(pollfd[1].fd, &tmp_chr, 1);
- TEST_ASSERT(r == 1,
- "Error reading pipefd in UFFD thread\n");
- return NULL;
- }
-
- if (!(pollfd[0].revents & POLLIN))
- continue;
-
- r = read(uffd, &msg, sizeof(msg));
- if (r == -1) {
- if (errno == EAGAIN)
- continue;
- pr_info("Read of uffd got errno %d\n", errno);
- return NULL;
- }
-
- if (r != sizeof(msg)) {
- pr_info("Read on uffd returned unexpected size: %d bytes", r);
- return NULL;
- }
-
- if (!(msg.event & UFFD_EVENT_PAGEFAULT))
- continue;
-
- if (delay)
- usleep(delay);
- addr = msg.arg.pagefault.address;
- r = handle_uffd_page_request(uffd_args->uffd_mode, uffd, addr);
- if (r < 0)
- return NULL;
- pages++;
- }
-
- ts_diff = timespec_elapsed(start);
- PER_VCPU_DEBUG("userfaulted %ld pages over %ld.%.9lds. (%f/sec)\n",
- pages, ts_diff.tv_sec, ts_diff.tv_nsec,
- pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
-
- return NULL;
-}
-
-static void setup_demand_paging(struct kvm_vm *vm,
- pthread_t *uffd_handler_thread, int pipefd,
- int uffd_mode, useconds_t uffd_delay,
- struct uffd_handler_args *uffd_args,
- void *hva, void *alias, uint64_t len)
+static void prefault_mem(void *alias, uint64_t len)
{
- bool is_minor = (uffd_mode == UFFDIO_REGISTER_MODE_MINOR);
- int uffd;
- struct uffdio_api uffdio_api;
- struct uffdio_register uffdio_register;
- uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY;
- int ret;
+ size_t p;
- PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n",
- is_minor ? "MINOR" : "MISSING",
- is_minor ? "UFFDIO_CONINUE" : "UFFDIO_COPY");
-
- /* In order to get minor faults, prefault via the alias. */
- if (is_minor) {
- size_t p;
-
- expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE;
-
- TEST_ASSERT(alias != NULL, "Alias required for minor faults");
- for (p = 0; p < (len / demand_paging_size); ++p) {
- memcpy(alias + (p * demand_paging_size),
- guest_data_prototype, demand_paging_size);
- }
+ TEST_ASSERT(alias != NULL, "Alias required for minor faults");
+ for (p = 0; p < (len / demand_paging_size); ++p) {
+ memcpy(alias + (p * demand_paging_size),
+ guest_data_prototype, demand_paging_size);
}
-
- uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
- TEST_ASSERT(uffd >= 0, __KVM_SYSCALL_ERROR("userfaultfd()", uffd));
-
- uffdio_api.api = UFFD_API;
- uffdio_api.features = 0;
- ret = ioctl(uffd, UFFDIO_API, &uffdio_api);
- TEST_ASSERT(ret != -1, __KVM_SYSCALL_ERROR("UFFDIO_API", ret));
-
- uffdio_register.range.start = (uint64_t)hva;
- uffdio_register.range.len = len;
- uffdio_register.mode = uffd_mode;
- ret = ioctl(uffd, UFFDIO_REGISTER, &uffdio_register);
- TEST_ASSERT(ret != -1, __KVM_SYSCALL_ERROR("UFFDIO_REGISTER", ret));
- TEST_ASSERT((uffdio_register.ioctls & expected_ioctls) ==
- expected_ioctls, "missing userfaultfd ioctls");
-
- uffd_args->uffd_mode = uffd_mode;
- uffd_args->uffd = uffd;
- uffd_args->pipefd = pipefd;
- uffd_args->delay = uffd_delay;
- pthread_create(uffd_handler_thread, NULL, uffd_handler_thread_fn,
- uffd_args);
-
- PER_VCPU_DEBUG("Created uffd thread for HVA range [%p, %p)\n",
- hva, hva + len);
}
-struct test_params {
- int uffd_mode;
- useconds_t uffd_delay;
- enum vm_mem_backing_src_type src_type;
- bool partition_vcpu_memory_access;
-};
-
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
- pthread_t *uffd_handler_threads = NULL;
- struct uffd_handler_args *uffd_args = NULL;
+ struct uffd_desc **uffd_descs = NULL;
struct timespec start;
struct timespec ts_diff;
- int *pipefds = NULL;
struct kvm_vm *vm;
- int r, i;
+ int i;
vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
p->src_type, p->partition_vcpu_memory_access);
@@ -296,15 +146,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
memset(guest_data_prototype, 0xAB, demand_paging_size);
if (p->uffd_mode) {
- uffd_handler_threads =
- malloc(nr_vcpus * sizeof(*uffd_handler_threads));
- TEST_ASSERT(uffd_handler_threads, "Memory allocation failed");
-
- uffd_args = malloc(nr_vcpus * sizeof(*uffd_args));
- TEST_ASSERT(uffd_args, "Memory allocation failed");
-
- pipefds = malloc(sizeof(int) * nr_vcpus * 2);
- TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");
+ uffd_descs = malloc(nr_vcpus * sizeof(struct uffd_desc *));
+ TEST_ASSERT(uffd_descs, "Memory allocation failed");
for (i = 0; i < nr_vcpus; i++) {
struct memstress_vcpu_args *vcpu_args;
@@ -317,19 +160,17 @@ static void run_test(enum vm_guest_mode mode, void *arg)
vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
vcpu_alias = addr_gpa2alias(vm, vcpu_args->gpa);
+ prefault_mem(vcpu_alias,
+ vcpu_args->pages * memstress_args.guest_page_size);
+
/*
* Set up user fault fd to handle demand paging
* requests.
*/
- r = pipe2(&pipefds[i * 2],
- O_CLOEXEC | O_NONBLOCK);
- TEST_ASSERT(!r, "Failed to set up pipefd");
-
- setup_demand_paging(vm, &uffd_handler_threads[i],
- pipefds[i * 2], p->uffd_mode,
- p->uffd_delay, &uffd_args[i],
- vcpu_hva, vcpu_alias,
- vcpu_args->pages * memstress_args.guest_page_size);
+ uffd_descs[i] = uffd_setup_demand_paging(
+ p->uffd_mode, p->uffd_delay, vcpu_hva,
+ vcpu_args->pages * memstress_args.guest_page_size,
+ &handle_uffd_page_request);
}
}
@@ -344,15 +185,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("All vCPU threads joined\n");
if (p->uffd_mode) {
- char c;
-
/* Tell the user fault fd handler threads to quit */
- for (i = 0; i < nr_vcpus; i++) {
- r = write(pipefds[i * 2 + 1], &c, 1);
- TEST_ASSERT(r == 1, "Unable to write to pipefd");
-
- pthread_join(uffd_handler_threads[i], NULL);
- }
+ for (i = 0; i < nr_vcpus; i++)
+ uffd_stop_demand_paging(uffd_descs[i]);
}
pr_info("Total guest execution time: %ld.%.9lds\n",
@@ -364,11 +199,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
memstress_destroy_vm(vm);
free(guest_data_prototype);
- if (p->uffd_mode) {
- free(uffd_handler_threads);
- free(uffd_args);
- free(pipefds);
- }
+ if (p->uffd_mode)
+ free(uffd_descs);
}
static void help(char *name)
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index a38c4369fb8e..9d4c50c4e72e 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -24,6 +24,9 @@
#include "guest_modes.h"
#include "processor.h"
+#define DIRTY_MEM_BITS 30 /* 1G */
+#define PAGE_SHIFT_4K 12
+
/* The memory slot index to track dirty pages */
#define TEST_MEM_SLOT_INDEX 1
@@ -226,13 +229,15 @@ static void clear_log_create_vm_done(struct kvm_vm *vm)
}
static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
- void *bitmap, uint32_t num_pages)
+ void *bitmap, uint32_t num_pages,
+ uint32_t *unused)
{
kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
}
static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
- void *bitmap, uint32_t num_pages)
+ void *bitmap, uint32_t num_pages,
+ uint32_t *unused)
{
kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages);
@@ -271,6 +276,24 @@ static bool dirty_ring_supported(void)
static void dirty_ring_create_vm_done(struct kvm_vm *vm)
{
+ uint64_t pages;
+ uint32_t limit;
+
+ /*
+ * We rely on vcpu exit due to full dirty ring state. Adjust
+ * the ring buffer size to ensure we're able to reach the
+ * full dirty ring state.
+ */
+ pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3;
+ pages = vm_adjust_num_guest_pages(vm->mode, pages);
+ if (vm->page_size < getpagesize())
+ pages = vm_num_host_pages(vm->mode, pages);
+
+ limit = 1 << (31 - __builtin_clz(pages));
+ test_dirty_ring_count = 1 << (31 - __builtin_clz(test_dirty_ring_count));
+ test_dirty_ring_count = min(limit, test_dirty_ring_count);
+ pr_info("dirty ring count: 0x%x\n", test_dirty_ring_count);
+
/*
* Switch to dirty ring mode after VM creation but before any
* of the vcpu creation.
@@ -329,10 +352,9 @@ static void dirty_ring_continue_vcpu(void)
}
static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
- void *bitmap, uint32_t num_pages)
+ void *bitmap, uint32_t num_pages,
+ uint32_t *ring_buf_idx)
{
- /* We only have one vcpu */
- static uint32_t fetch_index = 0;
uint32_t count = 0, cleared;
bool continued_vcpu = false;
@@ -349,7 +371,8 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
/* Only have one vcpu */
count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
- slot, bitmap, num_pages, &fetch_index);
+ slot, bitmap, num_pages,
+ ring_buf_idx);
cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
@@ -406,7 +429,8 @@ struct log_mode {
void (*create_vm_done)(struct kvm_vm *vm);
/* Hook to collect the dirty pages into the bitmap provided */
void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot,
- void *bitmap, uint32_t num_pages);
+ void *bitmap, uint32_t num_pages,
+ uint32_t *ring_buf_idx);
/* Hook to call when after each vcpu run */
void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
void (*before_vcpu_join) (void);
@@ -471,13 +495,14 @@ static void log_mode_create_vm_done(struct kvm_vm *vm)
}
static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
- void *bitmap, uint32_t num_pages)
+ void *bitmap, uint32_t num_pages,
+ uint32_t *ring_buf_idx)
{
struct log_mode *mode = &log_modes[host_log_mode];
TEST_ASSERT(mode->collect_dirty_pages != NULL,
"collect_dirty_pages() is required for any log mode!");
- mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages);
+ mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages, ring_buf_idx);
}
static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
@@ -681,9 +706,6 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
return vm;
}
-#define DIRTY_MEM_BITS 30 /* 1G */
-#define PAGE_SHIFT_4K 12
-
struct test_params {
unsigned long iterations;
unsigned long interval;
@@ -696,6 +718,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
unsigned long *bmap;
+ uint32_t ring_buf_idx = 0;
if (!log_mode_supported()) {
print_skip("Log mode '%s' not supported",
@@ -769,6 +792,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
host_dirty_count = 0;
host_clear_count = 0;
host_track_next_count = 0;
+ WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
@@ -776,7 +800,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Give the vcpu thread some time to dirty some pages */
usleep(p->interval * 1000);
log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
- bmap, host_num_pages);
+ bmap, host_num_pages,
+ &ring_buf_idx);
/*
* See vcpu_sync_stop_requested definition for details on why
@@ -820,7 +845,7 @@ static void help(char *name)
printf("usage: %s [-h] [-i iterations] [-I interval] "
"[-p offset] [-m mode]\n", name);
puts("");
- printf(" -c: specify dirty ring size, in number of entries\n");
+ printf(" -c: hint to dirty ring size, in number of entries\n");
printf(" (only useful for dirty-ring test; default: %"PRIu32")\n",
TEST_DIRTY_RING_COUNT);
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
index a8124f9dd68a..5f977528e09c 100644
--- a/tools/testing/selftests/kvm/include/aarch64/processor.h
+++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
@@ -38,12 +38,25 @@
* NORMAL 4 1111:1111
* NORMAL_WT 5 1011:1011
*/
-#define DEFAULT_MAIR_EL1 ((0x00ul << (0 * 8)) | \
- (0x04ul << (1 * 8)) | \
- (0x0cul << (2 * 8)) | \
- (0x44ul << (3 * 8)) | \
- (0xfful << (4 * 8)) | \
- (0xbbul << (5 * 8)))
+
+/* Linux doesn't use these memory types, so let's define them. */
+#define MAIR_ATTR_DEVICE_GRE UL(0x0c)
+#define MAIR_ATTR_NORMAL_WT UL(0xbb)
+
+#define MT_DEVICE_nGnRnE 0
+#define MT_DEVICE_nGnRE 1
+#define MT_DEVICE_GRE 2
+#define MT_NORMAL_NC 3
+#define MT_NORMAL 4
+#define MT_NORMAL_WT 5
+
+#define DEFAULT_MAIR_EL1 \
+ (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
+ MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
+ MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT))
#define MPIDR_HWID_BITMASK (0xff00fffffful)
@@ -92,11 +105,19 @@ enum {
#define ESR_EC_MASK (ESR_EC_NUM - 1)
#define ESR_EC_SVC64 0x15
+#define ESR_EC_IABT 0x21
+#define ESR_EC_DABT 0x25
#define ESR_EC_HW_BP_CURRENT 0x31
#define ESR_EC_SSTEP_CURRENT 0x33
#define ESR_EC_WP_CURRENT 0x35
#define ESR_EC_BRK_INS 0x3c
+/* Access flag */
+#define PTE_AF (1ULL << 10)
+
+/* Access flag update enable/disable */
+#define TCR_EL1_HA (1ULL << 39)
+
void aarch64_get_supported_page_sizes(uint32_t ipa,
bool *ps4k, bool *ps16k, bool *ps64k);
@@ -109,6 +130,8 @@ void vm_install_exception_handler(struct kvm_vm *vm,
void vm_install_sync_handler(struct kvm_vm *vm,
int vector, int ec, handler_fn handler);
+uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva);
+
static inline void cpu_relax(void)
{
asm volatile("yield" ::: "memory");
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index c7685c7038ff..37500c92dd0a 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -35,6 +35,7 @@ struct userspace_mem_region {
struct sparsebit *unused_phy_pages;
int fd;
off_t offset;
+ enum vm_mem_backing_src_type backing_src_type;
void *host_mem;
void *host_alias;
void *mmap_start;
@@ -65,6 +66,14 @@ struct userspace_mem_regions {
DECLARE_HASHTABLE(slot_hash, 9);
};
+enum kvm_mem_region_type {
+ MEM_REGION_CODE,
+ MEM_REGION_DATA,
+ MEM_REGION_PT,
+ MEM_REGION_TEST_DATA,
+ NR_MEM_REGIONS,
+};
+
struct kvm_vm {
int mode;
unsigned long type;
@@ -94,6 +103,13 @@ struct kvm_vm {
int stats_fd;
struct kvm_stats_header stats_header;
struct kvm_stats_desc *stats_desc;
+
+ /*
+ * KVM region slots. These are the default memslots used by page
+ * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
+ * memslot.
+ */
+ uint32_t memslots[NR_MEM_REGIONS];
};
@@ -106,6 +122,13 @@ struct kvm_vm {
struct userspace_mem_region *
memslot2region(struct kvm_vm *vm, uint32_t memslot);
+static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
+ enum kvm_mem_region_type type)
+{
+ assert(type < NR_MEM_REGIONS);
+ return memslot2region(vm, vm->memslots[type]);
+}
+
/* Minimum allocated guest virtual and physical addresses */
#define KVM_UTIL_MIN_VADDR 0x2000
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
@@ -387,7 +410,11 @@ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
+vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
+ enum kvm_mem_region_type type);
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
+vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
+ enum kvm_mem_region_type type);
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
@@ -649,13 +676,13 @@ vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
* __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
* calculate the amount of memory needed for per-vCPU data, e.g. stacks.
*/
-struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages);
+struct kvm_vm *____vm_create(enum vm_guest_mode mode);
struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
uint64_t nr_extra_pages);
static inline struct kvm_vm *vm_create_barebones(void)
{
- return ____vm_create(VM_MODE_DEFAULT, 0);
+ return ____vm_create(VM_MODE_DEFAULT);
}
static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
diff --git a/tools/testing/selftests/kvm/include/memstress.h b/tools/testing/selftests/kvm/include/memstress.h
index bbd2a302df10..72e3e358ef7b 100644
--- a/tools/testing/selftests/kvm/include/memstress.h
+++ b/tools/testing/selftests/kvm/include/memstress.h
@@ -47,6 +47,9 @@ struct memstress_args {
/* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */
uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS];
+ /* Test is done, stop running vCPUs. */
+ bool stop_vcpus;
+
struct memstress_vcpu_args vcpu_args[KVM_MAX_VCPUS];
};
diff --git a/tools/testing/selftests/kvm/include/userfaultfd_util.h b/tools/testing/selftests/kvm/include/userfaultfd_util.h
new file mode 100644
index 000000000000..877449c34592
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/userfaultfd_util.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KVM userfaultfd util
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ * Copyright (C) 2019-2022 Google LLC
+ */
+
+#define _GNU_SOURCE /* for pipe2 */
+
+#include <inttypes.h>
+#include <time.h>
+#include <pthread.h>
+#include <linux/userfaultfd.h>
+
+#include "test_util.h"
+
+typedef int (*uffd_handler_t)(int uffd_mode, int uffd, struct uffd_msg *msg);
+
+struct uffd_desc {
+ int uffd_mode;
+ int uffd;
+ int pipefds[2];
+ useconds_t delay;
+ uffd_handler_t handler;
+ pthread_t thread;
+};
+
+struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
+ void *hva, uint64_t len,
+ uffd_handler_t handler);
+
+void uffd_stop_demand_paging(struct uffd_desc *uffd);
+
+#ifdef PRINT_PER_PAGE_UPDATES
+#define PER_PAGE_DEBUG(...) printf(__VA_ARGS__)
+#else
+#define PER_PAGE_DEBUG(...) _no_printf(__VA_ARGS__)
+#endif
+
+#ifdef PRINT_PER_VCPU_UPDATES
+#define PER_VCPU_DEBUG(...) printf(__VA_ARGS__)
+#else
+#define PER_VCPU_DEBUG(...) _no_printf(__VA_ARGS__)
+#endif
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index 0de4aabc0c76..316de70db91d 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -11,6 +11,7 @@
#include "guest_modes.h"
#include "kvm_util.h"
#include "processor.h"
+#include <linux/bitfield.h>
#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
@@ -76,13 +77,15 @@ static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
- if (!vm->pgd_created) {
- vm_paddr_t paddr = vm_phy_pages_alloc(vm,
- page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
- KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
- vm->pgd = paddr;
- vm->pgd_created = true;
- }
+ size_t nr_pages = page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size;
+
+ if (vm->pgd_created)
+ return;
+
+ vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
+ KVM_GUEST_PAGE_TABLE_MIN_PADDR,
+ vm->memslots[MEM_REGION_PT]);
+ vm->pgd_created = true;
}
static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
@@ -133,12 +136,12 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{
- uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
+ uint64_t attr_idx = MT_NORMAL;
_virt_pg_map(vm, vaddr, paddr, attr_idx);
}
-vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva)
{
uint64_t *ptep;
@@ -169,11 +172,18 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
TEST_FAIL("Page table levels must be 2, 3, or 4");
}
- return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
+ return ptep;
unmapped_gva:
TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
- exit(1);
+ exit(EXIT_FAILURE);
+}
+
+vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+{
+ uint64_t *ptep = virt_get_pte_hva(vm, gva);
+
+ return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
}
static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
@@ -318,13 +328,16 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_vcpu_init *init, void *guest_code)
{
- size_t stack_size = vm->page_size == 4096 ?
- DEFAULT_STACK_PGS * vm->page_size :
- vm->page_size;
- uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
- DEFAULT_ARM64_GUEST_STACK_VADDR_MIN);
+ size_t stack_size;
+ uint64_t stack_vaddr;
struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
+ stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
+ vm->page_size;
+ stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
+ DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
+ MEM_REGION_DATA);
+
aarch64_vcpu_setup(vcpu, init);
vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
@@ -428,8 +441,8 @@ unexpected_exception:
void vm_init_descriptor_tables(struct kvm_vm *vm)
{
- vm->handlers = vm_vaddr_alloc(vm, sizeof(struct handlers),
- vm->page_size);
+ vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
+ vm->page_size, MEM_REGION_DATA);
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
}
@@ -486,9 +499,9 @@ void aarch64_get_supported_page_sizes(uint32_t ipa,
err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
- *ps4k = ((val >> 28) & 0xf) != 0xf;
- *ps64k = ((val >> 24) & 0xf) == 0;
- *ps16k = ((val >> 20) & 0xf) != 0;
+ *ps4k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN4), val) != 0xf;
+ *ps64k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN64), val) == 0;
+ *ps16k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN16), val) != 0;
close(vcpu_fd);
close(vm_fd);
diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c
index d71a9a5974de..820ac2d08c98 100644
--- a/tools/testing/selftests/kvm/lib/elf.c
+++ b/tools/testing/selftests/kvm/lib/elf.c
@@ -161,7 +161,8 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
seg_vend |= vm->page_size - 1;
size_t seg_size = seg_vend - seg_vstart + 1;
- vm_vaddr_t vaddr = vm_vaddr_alloc(vm, seg_size, seg_vstart);
+ vm_vaddr_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart,
+ MEM_REGION_CODE);
TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate "
"virtual memory for segment at requested min addr,\n"
" segment idx: %u\n"
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 1d26a2160178..e9607eb089be 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -186,13 +186,10 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
-struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages)
+struct kvm_vm *____vm_create(enum vm_guest_mode mode)
{
struct kvm_vm *vm;
- pr_debug("%s: mode='%s' pages='%ld'\n", __func__,
- vm_guest_mode_string(mode), nr_pages);
-
vm = calloc(1, sizeof(*vm));
TEST_ASSERT(vm != NULL, "Insufficient Memory");
@@ -288,9 +285,6 @@ struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages)
/* Allocate and setup memory for guest. */
vm->vpages_mapped = sparsebit_alloc();
- if (nr_pages != 0)
- vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
- 0, 0, nr_pages, 0);
return vm;
}
@@ -337,8 +331,16 @@ struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
nr_extra_pages);
struct userspace_mem_region *slot0;
struct kvm_vm *vm;
+ int i;
+
+ pr_debug("%s: mode='%s' pages='%ld'\n", __func__,
+ vm_guest_mode_string(mode), nr_pages);
- vm = ____vm_create(mode, nr_pages);
+ vm = ____vm_create(mode);
+
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0);
+ for (i = 0; i < NR_MEM_REGIONS; i++)
+ vm->memslots[i] = 0;
kvm_vm_elf_load(vm, program_invocation_name);
@@ -649,6 +651,12 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
sparsebit_free(&region->unused_phy_pages);
ret = munmap(region->mmap_start, region->mmap_size);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
+ if (region->fd >= 0) {
+ /* There's an extra map when using shared memory. */
+ ret = munmap(region->mmap_alias, region->mmap_size);
+ TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
+ close(region->fd);
+ }
free(region);
}
@@ -986,6 +994,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
vm_mem_backing_src_alias(src_type)->name);
}
+ region->backing_src_type = src_type;
region->unused_phy_pages = sparsebit_alloc();
sparsebit_set_num(region->unused_phy_pages,
guest_paddr >> vm->page_shift, npages);
@@ -1280,32 +1289,15 @@ va_found:
return pgidx_start * vm->page_size;
}
-/*
- * VM Virtual Address Allocate
- *
- * Input Args:
- * vm - Virtual Machine
- * sz - Size in bytes
- * vaddr_min - Minimum starting virtual address
- *
- * Output Args: None
- *
- * Return:
- * Starting guest virtual address
- *
- * Allocates at least sz bytes within the virtual address space of the vm
- * given by vm. The allocated bytes are mapped to a virtual address >=
- * the address given by vaddr_min. Note that each allocation uses a
- * a unique set of pages, with the minimum real allocation being at least
- * a page.
- */
-vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
+vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
+ enum kvm_mem_region_type type)
{
uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
virt_pgd_alloc(vm);
vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
- KVM_UTIL_MIN_PFN * vm->page_size, 0);
+ KVM_UTIL_MIN_PFN * vm->page_size,
+ vm->memslots[type]);
/*
* Find an unused range of virtual page addresses of at least
@@ -1326,6 +1318,30 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
}
/*
+ * VM Virtual Address Allocate
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * sz - Size in bytes
+ * vaddr_min - Minimum starting virtual address
+ *
+ * Output Args: None
+ *
+ * Return:
+ * Starting guest virtual address
+ *
+ * Allocates at least sz bytes within the virtual address space of the vm
+ * given by vm. The allocated bytes are mapped to a virtual address >=
+ * the address given by vaddr_min. Note that each allocation uses a
+ * a unique set of pages, with the minimum real allocation being at least
+ * a page. The allocated physical space comes from the TEST_DATA memory region.
+ */
+vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
+{
+ return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
+}
+
+/*
* VM Virtual Address Allocate Pages
*
* Input Args:
@@ -1344,6 +1360,11 @@ vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
}
+vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
+{
+ return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
+}
+
/*
* VM Virtual Address Allocate Page
*
@@ -1570,7 +1591,7 @@ struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
{
- uint32_t page_size = vcpu->vm->page_size;
+ uint32_t page_size = getpagesize();
uint32_t size = vcpu->vm->dirty_ring_size;
TEST_ASSERT(size > 0, "Should enable dirty ring first");
@@ -1911,7 +1932,8 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
{
- return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
+ return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,
+ vm->memslots[MEM_REGION_PT]);
}
/*
diff --git a/tools/testing/selftests/kvm/lib/memstress.c b/tools/testing/selftests/kvm/lib/memstress.c
index 2de8a5d527b3..5f1d3173c238 100644
--- a/tools/testing/selftests/kvm/lib/memstress.c
+++ b/tools/testing/selftests/kvm/lib/memstress.c
@@ -292,6 +292,7 @@ void memstress_start_vcpu_threads(int nr_vcpus,
vcpu_thread_fn = vcpu_fn;
WRITE_ONCE(all_vcpu_threads_running, false);
+ WRITE_ONCE(memstress_args.stop_vcpus, false);
for (i = 0; i < nr_vcpus; i++) {
struct vcpu_thread *vcpu = &vcpu_threads[i];
@@ -314,6 +315,8 @@ void memstress_join_vcpu_threads(int nr_vcpus)
{
int i;
+ WRITE_ONCE(memstress_args.stop_vcpus, true);
+
for (i = 0; i < nr_vcpus; i++)
pthread_join(vcpu_threads[i].thread, NULL);
}
diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c
index 604478151212..d146ca71e0c0 100644
--- a/tools/testing/selftests/kvm/lib/riscv/processor.c
+++ b/tools/testing/selftests/kvm/lib/riscv/processor.c
@@ -55,13 +55,15 @@ static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
- if (!vm->pgd_created) {
- vm_paddr_t paddr = vm_phy_pages_alloc(vm,
- page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size,
- KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
- vm->pgd = paddr;
- vm->pgd_created = true;
- }
+ size_t nr_pages = page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size;
+
+ if (vm->pgd_created)
+ return;
+
+ vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
+ KVM_GUEST_PAGE_TABLE_MIN_PADDR,
+ vm->memslots[MEM_REGION_PT]);
+ vm->pgd_created = true;
}
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
@@ -279,15 +281,18 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code)
{
int r;
- size_t stack_size = vm->page_size == 4096 ?
- DEFAULT_STACK_PGS * vm->page_size :
- vm->page_size;
- unsigned long stack_vaddr = vm_vaddr_alloc(vm, stack_size,
- DEFAULT_RISCV_GUEST_STACK_VADDR_MIN);
+ size_t stack_size;
+ unsigned long stack_vaddr;
unsigned long current_gp = 0;
struct kvm_mp_state mps;
struct kvm_vcpu *vcpu;
+ stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
+ vm->page_size;
+ stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
+ DEFAULT_RISCV_GUEST_STACK_VADDR_MIN,
+ MEM_REGION_DATA);
+
vcpu = __vm_vcpu_add(vm, vcpu_id);
riscv_vcpu_mmu_setup(vcpu);
diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c
index 89d7340d9cbd..15945121daf1 100644
--- a/tools/testing/selftests/kvm/lib/s390x/processor.c
+++ b/tools/testing/selftests/kvm/lib/s390x/processor.c
@@ -21,7 +21,8 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
return;
paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
- KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
+ KVM_GUEST_PAGE_TABLE_MIN_PADDR,
+ vm->memslots[MEM_REGION_PT]);
memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
vm->pgd = paddr;
@@ -167,8 +168,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
vm->page_size);
- stack_vaddr = vm_vaddr_alloc(vm, stack_size,
- DEFAULT_GUEST_STACK_VADDR_MIN);
+ stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
+ DEFAULT_GUEST_STACK_VADDR_MIN,
+ MEM_REGION_DATA);
vcpu = __vm_vcpu_add(vm, vcpu_id);
diff --git a/tools/testing/selftests/kvm/lib/userfaultfd_util.c b/tools/testing/selftests/kvm/lib/userfaultfd_util.c
new file mode 100644
index 000000000000..92cef20902f1
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/userfaultfd_util.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KVM userfaultfd util
+ * Adapted from demand_paging_test.c
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ * Copyright (C) 2019-2022 Google LLC
+ */
+
+#define _GNU_SOURCE /* for pipe2 */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <poll.h>
+#include <pthread.h>
+#include <linux/userfaultfd.h>
+#include <sys/syscall.h>
+
+#include "kvm_util.h"
+#include "test_util.h"
+#include "memstress.h"
+#include "userfaultfd_util.h"
+
+#ifdef __NR_userfaultfd
+
+static void *uffd_handler_thread_fn(void *arg)
+{
+ struct uffd_desc *uffd_desc = (struct uffd_desc *)arg;
+ int uffd = uffd_desc->uffd;
+ int pipefd = uffd_desc->pipefds[0];
+ useconds_t delay = uffd_desc->delay;
+ int64_t pages = 0;
+ struct timespec start;
+ struct timespec ts_diff;
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ while (1) {
+ struct uffd_msg msg;
+ struct pollfd pollfd[2];
+ char tmp_chr;
+ int r;
+
+ pollfd[0].fd = uffd;
+ pollfd[0].events = POLLIN;
+ pollfd[1].fd = pipefd;
+ pollfd[1].events = POLLIN;
+
+ r = poll(pollfd, 2, -1);
+ switch (r) {
+ case -1:
+ pr_info("poll err");
+ continue;
+ case 0:
+ continue;
+ case 1:
+ break;
+ default:
+ pr_info("Polling uffd returned %d", r);
+ return NULL;
+ }
+
+ if (pollfd[0].revents & POLLERR) {
+ pr_info("uffd revents has POLLERR");
+ return NULL;
+ }
+
+ if (pollfd[1].revents & POLLIN) {
+ r = read(pollfd[1].fd, &tmp_chr, 1);
+ TEST_ASSERT(r == 1,
+ "Error reading pipefd in UFFD thread\n");
+ return NULL;
+ }
+
+ if (!(pollfd[0].revents & POLLIN))
+ continue;
+
+ r = read(uffd, &msg, sizeof(msg));
+ if (r == -1) {
+ if (errno == EAGAIN)
+ continue;
+ pr_info("Read of uffd got errno %d\n", errno);
+ return NULL;
+ }
+
+ if (r != sizeof(msg)) {
+ pr_info("Read on uffd returned unexpected size: %d bytes", r);
+ return NULL;
+ }
+
+ if (!(msg.event & UFFD_EVENT_PAGEFAULT))
+ continue;
+
+ if (delay)
+ usleep(delay);
+ r = uffd_desc->handler(uffd_desc->uffd_mode, uffd, &msg);
+ if (r < 0)
+ return NULL;
+ pages++;
+ }
+
+ ts_diff = timespec_elapsed(start);
+ PER_VCPU_DEBUG("userfaulted %ld pages over %ld.%.9lds. (%f/sec)\n",
+ pages, ts_diff.tv_sec, ts_diff.tv_nsec,
+ pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
+
+ return NULL;
+}
+
+struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
+ void *hva, uint64_t len,
+ uffd_handler_t handler)
+{
+ struct uffd_desc *uffd_desc;
+ bool is_minor = (uffd_mode == UFFDIO_REGISTER_MODE_MINOR);
+ int uffd;
+ struct uffdio_api uffdio_api;
+ struct uffdio_register uffdio_register;
+ uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY;
+ int ret;
+
+ PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n",
+ is_minor ? "MINOR" : "MISSING",
+ is_minor ? "UFFDIO_CONINUE" : "UFFDIO_COPY");
+
+ uffd_desc = malloc(sizeof(struct uffd_desc));
+ TEST_ASSERT(uffd_desc, "malloc failed");
+
+ /* In order to get minor faults, prefault via the alias. */
+ if (is_minor)
+ expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE;
+
+ uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+ TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno);
+
+ uffdio_api.api = UFFD_API;
+ uffdio_api.features = 0;
+ TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1,
+ "ioctl UFFDIO_API failed: %" PRIu64,
+ (uint64_t)uffdio_api.api);
+
+ uffdio_register.range.start = (uint64_t)hva;
+ uffdio_register.range.len = len;
+ uffdio_register.mode = uffd_mode;
+ TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1,
+ "ioctl UFFDIO_REGISTER failed");
+ TEST_ASSERT((uffdio_register.ioctls & expected_ioctls) ==
+ expected_ioctls, "missing userfaultfd ioctls");
+
+ ret = pipe2(uffd_desc->pipefds, O_CLOEXEC | O_NONBLOCK);
+ TEST_ASSERT(!ret, "Failed to set up pipefd");
+
+ uffd_desc->uffd_mode = uffd_mode;
+ uffd_desc->uffd = uffd;
+ uffd_desc->delay = delay;
+ uffd_desc->handler = handler;
+ pthread_create(&uffd_desc->thread, NULL, uffd_handler_thread_fn,
+ uffd_desc);
+
+ PER_VCPU_DEBUG("Created uffd thread for HVA range [%p, %p)\n",
+ hva, hva + len);
+
+ return uffd_desc;
+}
+
+void uffd_stop_demand_paging(struct uffd_desc *uffd)
+{
+ char c = 0;
+ int ret;
+
+ ret = write(uffd->pipefds[1], &c, 1);
+ TEST_ASSERT(ret == 1, "Unable to write to pipefd");
+
+ ret = pthread_join(uffd->thread, NULL);
+ TEST_ASSERT(ret == 0, "Pthread_join failed.");
+
+ close(uffd->uffd);
+
+ close(uffd->pipefds[1]);
+ close(uffd->pipefds[0]);
+
+ free(uffd);
+}
+
+#endif /* __NR_userfaultfd */
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index d532c20c74fd..c5c46f5b767c 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -499,7 +499,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt)
{
if (!vm->gdt)
- vm->gdt = vm_vaddr_alloc_page(vm);
+ vm->gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
dt->base = vm->gdt;
dt->limit = getpagesize();
@@ -509,7 +509,7 @@ static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp,
int selector)
{
if (!vm->tss)
- vm->tss = vm_vaddr_alloc_page(vm);
+ vm->tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
memset(segp, 0, sizeof(*segp));
segp->base = vm->tss;
@@ -599,8 +599,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
vm_vaddr_t stack_vaddr;
struct kvm_vcpu *vcpu;
- stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
- DEFAULT_GUEST_STACK_VADDR_MIN);
+ stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
+ DEFAULT_GUEST_STACK_VADDR_MIN,
+ MEM_REGION_DATA);
vcpu = __vm_vcpu_add(vm, vcpu_id);
vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
@@ -1093,8 +1094,8 @@ void vm_init_descriptor_tables(struct kvm_vm *vm)
extern void *idt_handlers;
int i;
- vm->idt = vm_vaddr_alloc_page(vm);
- vm->handlers = vm_vaddr_alloc_page(vm);
+ vm->idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
+ vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
/* Handlers have the same address in both address spaces.*/
for (i = 0; i < NUM_INTERRUPTS; i++)
set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0,
diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
index d07e921bfcc5..9855c41ca811 100644
--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
@@ -34,8 +34,6 @@
static int nr_vcpus = 1;
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
-static bool run_vcpus = true;
-
static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
@@ -45,7 +43,7 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
run = vcpu->run;
/* Let the guest access its memory until a stop signal is received */
- while (READ_ONCE(run_vcpus)) {
+ while (!READ_ONCE(memstress_args.stop_vcpus)) {
ret = _vcpu_run(vcpu);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
@@ -109,8 +107,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
add_remove_memslot(vm, p->delay, p->nr_iterations);
- run_vcpus = false;
-
memstress_join_vcpu_threads(nr_vcpus);
pr_info("All vCPU threads joined\n");
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
index 36b20abfb948..e698306bf49d 100644
--- a/tools/testing/selftests/kvm/memslot_perf_test.c
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -20,20 +20,20 @@
#include <unistd.h>
#include <linux/compiler.h>
+#include <linux/sizes.h>
#include <test_util.h>
#include <kvm_util.h>
#include <processor.h>
-#define MEM_SIZE ((512U << 20) + 4096)
-#define MEM_SIZE_PAGES (MEM_SIZE / 4096)
-#define MEM_GPA 0x10000000UL
+#define MEM_EXTRA_SIZE SZ_64K
+
+#define MEM_SIZE (SZ_512M + MEM_EXTRA_SIZE)
+#define MEM_GPA SZ_256M
#define MEM_AUX_GPA MEM_GPA
#define MEM_SYNC_GPA MEM_AUX_GPA
-#define MEM_TEST_GPA (MEM_AUX_GPA + 4096)
-#define MEM_TEST_SIZE (MEM_SIZE - 4096)
-static_assert(MEM_SIZE % 4096 == 0, "invalid mem size");
-static_assert(MEM_TEST_SIZE % 4096 == 0, "invalid mem test size");
+#define MEM_TEST_GPA (MEM_AUX_GPA + MEM_EXTRA_SIZE)
+#define MEM_TEST_SIZE (MEM_SIZE - MEM_EXTRA_SIZE)
/*
* 32 MiB is max size that gets well over 100 iterations on 509 slots.
@@ -41,44 +41,38 @@ static_assert(MEM_TEST_SIZE % 4096 == 0, "invalid mem test size");
* 8194 slots in use can then be tested (although with slightly
* limited resolution).
*/
-#define MEM_SIZE_MAP ((32U << 20) + 4096)
-#define MEM_SIZE_MAP_PAGES (MEM_SIZE_MAP / 4096)
-#define MEM_TEST_MAP_SIZE (MEM_SIZE_MAP - 4096)
-#define MEM_TEST_MAP_SIZE_PAGES (MEM_TEST_MAP_SIZE / 4096)
-static_assert(MEM_SIZE_MAP % 4096 == 0, "invalid map test region size");
-static_assert(MEM_TEST_MAP_SIZE % 4096 == 0, "invalid map test region size");
-static_assert(MEM_TEST_MAP_SIZE_PAGES % 2 == 0, "invalid map test region size");
-static_assert(MEM_TEST_MAP_SIZE_PAGES > 2, "invalid map test region size");
+#define MEM_SIZE_MAP (SZ_32M + MEM_EXTRA_SIZE)
+#define MEM_TEST_MAP_SIZE (MEM_SIZE_MAP - MEM_EXTRA_SIZE)
/*
* 128 MiB is min size that fills 32k slots with at least one page in each
* while at the same time gets 100+ iterations in such test
+ *
+ * 2 MiB chunk size like a typical huge page
*/
-#define MEM_TEST_UNMAP_SIZE (128U << 20)
-#define MEM_TEST_UNMAP_SIZE_PAGES (MEM_TEST_UNMAP_SIZE / 4096)
-/* 2 MiB chunk size like a typical huge page */
-#define MEM_TEST_UNMAP_CHUNK_PAGES (2U << (20 - 12))
-static_assert(MEM_TEST_UNMAP_SIZE <= MEM_TEST_SIZE,
- "invalid unmap test region size");
-static_assert(MEM_TEST_UNMAP_SIZE % 4096 == 0,
- "invalid unmap test region size");
-static_assert(MEM_TEST_UNMAP_SIZE_PAGES %
- (2 * MEM_TEST_UNMAP_CHUNK_PAGES) == 0,
- "invalid unmap test region size");
+#define MEM_TEST_UNMAP_SIZE SZ_128M
+#define MEM_TEST_UNMAP_CHUNK_SIZE SZ_2M
/*
* For the move active test the middle of the test area is placed on
* a memslot boundary: half lies in the memslot being moved, half in
* other memslot(s).
*
- * When running this test with 32k memslots (32764, really) each memslot
- * contains 4 pages.
- * The last one additionally contains the remaining 21 pages of memory,
- * for the total size of 25 pages.
- * Hence, the maximum size here is 50 pages.
+ * We have different number of memory slots, excluding the reserved
+ * memory slot 0, on various architectures and configurations. The
+ * memory size in this test is calculated by picking the maximal
+ * last memory slot's memory size, with alignment to the largest
+ * supported page size (64KB). In this way, the selected memory
+ * size for this test is compatible with test_memslot_move_prepare().
+ *
+ * architecture slots memory-per-slot memory-on-last-slot
+ * --------------------------------------------------------------
+ * x86-4KB 32763 16KB 160KB
+ * arm64-4KB 32766 16KB 112KB
+ * arm64-16KB 32766 16KB 112KB
+ * arm64-64KB 8192 64KB 128KB
*/
-#define MEM_TEST_MOVE_SIZE_PAGES (50)
-#define MEM_TEST_MOVE_SIZE (MEM_TEST_MOVE_SIZE_PAGES * 4096)
+#define MEM_TEST_MOVE_SIZE (3 * SZ_64K)
#define MEM_TEST_MOVE_GPA_DEST (MEM_GPA + MEM_SIZE)
static_assert(MEM_TEST_MOVE_SIZE <= MEM_TEST_SIZE,
"invalid move test region size");
@@ -100,6 +94,7 @@ struct vm_data {
};
struct sync_area {
+ uint32_t guest_page_size;
atomic_bool start_flag;
atomic_bool exit_flag;
atomic_bool sync_flag;
@@ -192,14 +187,15 @@ static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
uint64_t gpage, pgoffs;
uint32_t slot, slotoffs;
void *base;
+ uint32_t guest_page_size = data->vm->page_size;
TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate");
- TEST_ASSERT(gpa < MEM_GPA + data->npages * 4096,
+ TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size,
"Too high gpa to translate");
gpa -= MEM_GPA;
- gpage = gpa / 4096;
- pgoffs = gpa % 4096;
+ gpage = gpa / guest_page_size;
+ pgoffs = gpa % guest_page_size;
slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1);
slotoffs = gpage - (slot * data->pages_per_slot);
@@ -217,14 +213,16 @@ static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
}
base = data->hva_slots[slot];
- return (uint8_t *)base + slotoffs * 4096 + pgoffs;
+ return (uint8_t *)base + slotoffs * guest_page_size + pgoffs;
}
static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot)
{
+ uint32_t guest_page_size = data->vm->page_size;
+
TEST_ASSERT(slot < data->nslots, "Too high slot number");
- return MEM_GPA + slot * data->pages_per_slot * 4096;
+ return MEM_GPA + slot * data->pages_per_slot * guest_page_size;
}
static struct vm_data *alloc_vm(void)
@@ -241,81 +239,111 @@ static struct vm_data *alloc_vm(void)
return data;
}
+static bool check_slot_pages(uint32_t host_page_size, uint32_t guest_page_size,
+ uint64_t pages_per_slot, uint64_t rempages)
+{
+ if (!pages_per_slot)
+ return false;
+
+ if ((pages_per_slot * guest_page_size) % host_page_size)
+ return false;
+
+ if ((rempages * guest_page_size) % host_page_size)
+ return false;
+
+ return true;
+}
+
+
+static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size)
+{
+ uint32_t guest_page_size = data->vm->page_size;
+ uint64_t mempages, pages_per_slot, rempages;
+ uint64_t slots;
+
+ mempages = data->npages;
+ slots = data->nslots;
+ while (--slots > 1) {
+ pages_per_slot = mempages / slots;
+ rempages = mempages % pages_per_slot;
+ if (check_slot_pages(host_page_size, guest_page_size,
+ pages_per_slot, rempages))
+ return slots + 1; /* slot 0 is reserved */
+ }
+
+ return 0;
+}
+
static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
- void *guest_code, uint64_t mempages,
+ void *guest_code, uint64_t mem_size,
struct timespec *slot_runtime)
{
- uint32_t max_mem_slots;
- uint64_t rempages;
+ uint64_t mempages, rempages;
uint64_t guest_addr;
- uint32_t slot;
+ uint32_t slot, host_page_size, guest_page_size;
struct timespec tstart;
struct sync_area *sync;
- max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
- TEST_ASSERT(max_mem_slots > 1,
- "KVM_CAP_NR_MEMSLOTS should be greater than 1");
- TEST_ASSERT(nslots > 1 || nslots == -1,
- "Slot count cap should be greater than 1");
- if (nslots != -1)
- max_mem_slots = min(max_mem_slots, (uint32_t)nslots);
- pr_info_v("Allowed number of memory slots: %"PRIu32"\n", max_mem_slots);
+ host_page_size = getpagesize();
+ guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
+ mempages = mem_size / guest_page_size;
- TEST_ASSERT(mempages > 1,
- "Can't test without any memory");
+ data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
+ TEST_ASSERT(data->vm->page_size == guest_page_size, "Invalid VM page size");
data->npages = mempages;
- data->nslots = max_mem_slots - 1;
- data->pages_per_slot = mempages / data->nslots;
- if (!data->pages_per_slot) {
- *maxslots = mempages + 1;
+ TEST_ASSERT(data->npages > 1, "Can't test without any memory");
+ data->nslots = nslots;
+ data->pages_per_slot = data->npages / data->nslots;
+ rempages = data->npages % data->nslots;
+ if (!check_slot_pages(host_page_size, guest_page_size,
+ data->pages_per_slot, rempages)) {
+ *maxslots = get_max_slots(data, host_page_size);
return false;
}
- rempages = mempages % data->nslots;
data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
TEST_ASSERT(data->hva_slots, "malloc() fail");
data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
- max_mem_slots - 1, data->pages_per_slot, rempages);
+ data->nslots, data->pages_per_slot, rempages);
clock_gettime(CLOCK_MONOTONIC, &tstart);
- for (slot = 1, guest_addr = MEM_GPA; slot < max_mem_slots; slot++) {
+ for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) {
uint64_t npages;
npages = data->pages_per_slot;
- if (slot == max_mem_slots - 1)
+ if (slot == data->nslots)
npages += rempages;
vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS,
guest_addr, slot, npages,
0);
- guest_addr += npages * 4096;
+ guest_addr += npages * guest_page_size;
}
*slot_runtime = timespec_elapsed(tstart);
- for (slot = 0, guest_addr = MEM_GPA; slot < max_mem_slots - 1; slot++) {
+ for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) {
uint64_t npages;
uint64_t gpa;
npages = data->pages_per_slot;
- if (slot == max_mem_slots - 2)
+ if (slot == data->nslots)
npages += rempages;
- gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr,
- slot + 1);
+ gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, slot);
TEST_ASSERT(gpa == guest_addr,
"vm_phy_pages_alloc() failed\n");
- data->hva_slots[slot] = addr_gpa2hva(data->vm, guest_addr);
- memset(data->hva_slots[slot], 0, npages * 4096);
+ data->hva_slots[slot - 1] = addr_gpa2hva(data->vm, guest_addr);
+ memset(data->hva_slots[slot - 1], 0, npages * guest_page_size);
- guest_addr += npages * 4096;
+ guest_addr += npages * guest_page_size;
}
- virt_map(data->vm, MEM_GPA, MEM_GPA, mempages);
+ virt_map(data->vm, MEM_GPA, MEM_GPA, data->npages);
sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
atomic_init(&sync->start_flag, false);
@@ -414,6 +442,7 @@ static bool guest_perform_sync(void)
static void guest_code_test_memslot_move(void)
{
struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+ uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr);
GUEST_SYNC(0);
@@ -424,7 +453,7 @@ static void guest_code_test_memslot_move(void)
uintptr_t ptr;
for (ptr = base; ptr < base + MEM_TEST_MOVE_SIZE;
- ptr += 4096)
+ ptr += page_size)
*(uint64_t *)ptr = MEM_TEST_VAL_1;
/*
@@ -442,6 +471,7 @@ static void guest_code_test_memslot_move(void)
static void guest_code_test_memslot_map(void)
{
struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+ uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
GUEST_SYNC(0);
@@ -451,14 +481,16 @@ static void guest_code_test_memslot_map(void)
uintptr_t ptr;
for (ptr = MEM_TEST_GPA;
- ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; ptr += 4096)
+ ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2;
+ ptr += page_size)
*(uint64_t *)ptr = MEM_TEST_VAL_1;
if (!guest_perform_sync())
break;
for (ptr = MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2;
- ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE; ptr += 4096)
+ ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE;
+ ptr += page_size)
*(uint64_t *)ptr = MEM_TEST_VAL_2;
if (!guest_perform_sync())
@@ -505,6 +537,9 @@ static void guest_code_test_memslot_unmap(void)
static void guest_code_test_memslot_rw(void)
{
+ struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+ uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
+
GUEST_SYNC(0);
guest_spin_until_start();
@@ -513,14 +548,14 @@ static void guest_code_test_memslot_rw(void)
uintptr_t ptr;
for (ptr = MEM_TEST_GPA;
- ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096)
+ ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size)
*(uint64_t *)ptr = MEM_TEST_VAL_1;
if (!guest_perform_sync())
break;
- for (ptr = MEM_TEST_GPA + 4096 / 2;
- ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096) {
+ for (ptr = MEM_TEST_GPA + page_size / 2;
+ ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size) {
uint64_t val = *(uint64_t *)ptr;
GUEST_ASSERT_1(val == MEM_TEST_VAL_2, val);
@@ -538,6 +573,7 @@ static bool test_memslot_move_prepare(struct vm_data *data,
struct sync_area *sync,
uint64_t *maxslots, bool isactive)
{
+ uint32_t guest_page_size = data->vm->page_size;
uint64_t movesrcgpa, movetestgpa;
movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
@@ -546,7 +582,7 @@ static bool test_memslot_move_prepare(struct vm_data *data,
uint64_t lastpages;
vm_gpa2hva(data, movesrcgpa, &lastpages);
- if (lastpages < MEM_TEST_MOVE_SIZE_PAGES / 2) {
+ if (lastpages * guest_page_size < MEM_TEST_MOVE_SIZE / 2) {
*maxslots = 0;
return false;
}
@@ -592,8 +628,9 @@ static void test_memslot_do_unmap(struct vm_data *data,
uint64_t offsp, uint64_t count)
{
uint64_t gpa, ctr;
+ uint32_t guest_page_size = data->vm->page_size;
- for (gpa = MEM_TEST_GPA + offsp * 4096, ctr = 0; ctr < count; ) {
+ for (gpa = MEM_TEST_GPA + offsp * guest_page_size, ctr = 0; ctr < count; ) {
uint64_t npages;
void *hva;
int ret;
@@ -601,12 +638,12 @@ static void test_memslot_do_unmap(struct vm_data *data,
hva = vm_gpa2hva(data, gpa, &npages);
TEST_ASSERT(npages, "Empty memory slot at gptr 0x%"PRIx64, gpa);
npages = min(npages, count - ctr);
- ret = madvise(hva, npages * 4096, MADV_DONTNEED);
+ ret = madvise(hva, npages * guest_page_size, MADV_DONTNEED);
TEST_ASSERT(!ret,
"madvise(%p, MADV_DONTNEED) on VM memory should not fail for gptr 0x%"PRIx64,
hva, gpa);
ctr += npages;
- gpa += npages * 4096;
+ gpa += npages * guest_page_size;
}
TEST_ASSERT(ctr == count,
"madvise(MADV_DONTNEED) should exactly cover all of the requested area");
@@ -617,11 +654,12 @@ static void test_memslot_map_unmap_check(struct vm_data *data,
{
uint64_t gpa;
uint64_t *val;
+ uint32_t guest_page_size = data->vm->page_size;
if (!map_unmap_verify)
return;
- gpa = MEM_TEST_GPA + offsp * 4096;
+ gpa = MEM_TEST_GPA + offsp * guest_page_size;
val = (typeof(val))vm_gpa2hva(data, gpa, NULL);
TEST_ASSERT(*val == valexp,
"Guest written values should read back correctly before unmap (%"PRIu64" vs %"PRIu64" @ %"PRIx64")",
@@ -631,12 +669,14 @@ static void test_memslot_map_unmap_check(struct vm_data *data,
static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
{
+ uint32_t guest_page_size = data->vm->page_size;
+ uint64_t guest_pages = MEM_TEST_MAP_SIZE / guest_page_size;
+
/*
* Unmap the second half of the test area while guest writes to (maps)
* the first half.
*/
- test_memslot_do_unmap(data, MEM_TEST_MAP_SIZE_PAGES / 2,
- MEM_TEST_MAP_SIZE_PAGES / 2);
+ test_memslot_do_unmap(data, guest_pages / 2, guest_pages / 2);
/*
* Wait for the guest to finish writing the first half of the test
@@ -647,10 +687,8 @@ static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
*/
host_perform_sync(sync);
test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
- test_memslot_map_unmap_check(data,
- MEM_TEST_MAP_SIZE_PAGES / 2 - 1,
- MEM_TEST_VAL_1);
- test_memslot_do_unmap(data, 0, MEM_TEST_MAP_SIZE_PAGES / 2);
+ test_memslot_map_unmap_check(data, guest_pages / 2 - 1, MEM_TEST_VAL_1);
+ test_memslot_do_unmap(data, 0, guest_pages / 2);
/*
@@ -663,16 +701,16 @@ static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
* the test area.
*/
host_perform_sync(sync);
- test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES / 2,
- MEM_TEST_VAL_2);
- test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES - 1,
- MEM_TEST_VAL_2);
+ test_memslot_map_unmap_check(data, guest_pages / 2, MEM_TEST_VAL_2);
+ test_memslot_map_unmap_check(data, guest_pages - 1, MEM_TEST_VAL_2);
}
static void test_memslot_unmap_loop_common(struct vm_data *data,
struct sync_area *sync,
uint64_t chunk)
{
+ uint32_t guest_page_size = data->vm->page_size;
+ uint64_t guest_pages = MEM_TEST_UNMAP_SIZE / guest_page_size;
uint64_t ctr;
/*
@@ -684,42 +722,49 @@ static void test_memslot_unmap_loop_common(struct vm_data *data,
*/
host_perform_sync(sync);
test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
- for (ctr = 0; ctr < MEM_TEST_UNMAP_SIZE_PAGES / 2; ctr += chunk)
+ for (ctr = 0; ctr < guest_pages / 2; ctr += chunk)
test_memslot_do_unmap(data, ctr, chunk);
/* Likewise, but for the opposite host / guest areas */
host_perform_sync(sync);
- test_memslot_map_unmap_check(data, MEM_TEST_UNMAP_SIZE_PAGES / 2,
- MEM_TEST_VAL_2);
- for (ctr = MEM_TEST_UNMAP_SIZE_PAGES / 2;
- ctr < MEM_TEST_UNMAP_SIZE_PAGES; ctr += chunk)
+ test_memslot_map_unmap_check(data, guest_pages / 2, MEM_TEST_VAL_2);
+ for (ctr = guest_pages / 2; ctr < guest_pages; ctr += chunk)
test_memslot_do_unmap(data, ctr, chunk);
}
static void test_memslot_unmap_loop(struct vm_data *data,
struct sync_area *sync)
{
- test_memslot_unmap_loop_common(data, sync, 1);
+ uint32_t host_page_size = getpagesize();
+ uint32_t guest_page_size = data->vm->page_size;
+ uint64_t guest_chunk_pages = guest_page_size >= host_page_size ?
+ 1 : host_page_size / guest_page_size;
+
+ test_memslot_unmap_loop_common(data, sync, guest_chunk_pages);
}
static void test_memslot_unmap_loop_chunked(struct vm_data *data,
struct sync_area *sync)
{
- test_memslot_unmap_loop_common(data, sync, MEM_TEST_UNMAP_CHUNK_PAGES);
+ uint32_t guest_page_size = data->vm->page_size;
+ uint64_t guest_chunk_pages = MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size;
+
+ test_memslot_unmap_loop_common(data, sync, guest_chunk_pages);
}
static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync)
{
uint64_t gptr;
+ uint32_t guest_page_size = data->vm->page_size;
- for (gptr = MEM_TEST_GPA + 4096 / 2;
- gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096)
+ for (gptr = MEM_TEST_GPA + guest_page_size / 2;
+ gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size)
*(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2;
host_perform_sync(sync);
for (gptr = MEM_TEST_GPA;
- gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096) {
+ gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size) {
uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL);
uint64_t val = *vptr;
@@ -748,7 +793,7 @@ static bool test_execute(int nslots, uint64_t *maxslots,
struct timespec *slot_runtime,
struct timespec *guest_runtime)
{
- uint64_t mem_size = tdata->mem_size ? : MEM_SIZE_PAGES;
+ uint64_t mem_size = tdata->mem_size ? : MEM_SIZE;
struct vm_data *data;
struct sync_area *sync;
struct timespec tstart;
@@ -763,6 +808,7 @@ static bool test_execute(int nslots, uint64_t *maxslots,
sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
+ sync->guest_page_size = data->vm->page_size;
if (tdata->prepare &&
!tdata->prepare(data, sync, maxslots)) {
ret = false;
@@ -796,19 +842,19 @@ exit_free:
static const struct test_data tests[] = {
{
.name = "map",
- .mem_size = MEM_SIZE_MAP_PAGES,
+ .mem_size = MEM_SIZE_MAP,
.guest_code = guest_code_test_memslot_map,
.loop = test_memslot_map_loop,
},
{
.name = "unmap",
- .mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1,
+ .mem_size = MEM_TEST_UNMAP_SIZE + MEM_EXTRA_SIZE,
.guest_code = guest_code_test_memslot_unmap,
.loop = test_memslot_unmap_loop,
},
{
.name = "unmap chunked",
- .mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1,
+ .mem_size = MEM_TEST_UNMAP_SIZE + MEM_EXTRA_SIZE,
.guest_code = guest_code_test_memslot_unmap,
.loop = test_memslot_unmap_loop_chunked,
},
@@ -866,9 +912,46 @@ static void help(char *name, struct test_args *targs)
pr_info("%d: %s\n", ctr, tests[ctr].name);
}
+static bool check_memory_sizes(void)
+{
+ uint32_t host_page_size = getpagesize();
+ uint32_t guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
+
+ if (host_page_size > SZ_64K || guest_page_size > SZ_64K) {
+ pr_info("Unsupported page size on host (0x%x) or guest (0x%x)\n",
+ host_page_size, guest_page_size);
+ return false;
+ }
+
+ if (MEM_SIZE % guest_page_size ||
+ MEM_TEST_SIZE % guest_page_size) {
+ pr_info("invalid MEM_SIZE or MEM_TEST_SIZE\n");
+ return false;
+ }
+
+ if (MEM_SIZE_MAP % guest_page_size ||
+ MEM_TEST_MAP_SIZE % guest_page_size ||
+ (MEM_TEST_MAP_SIZE / guest_page_size) <= 2 ||
+ (MEM_TEST_MAP_SIZE / guest_page_size) % 2) {
+ pr_info("invalid MEM_SIZE_MAP or MEM_TEST_MAP_SIZE\n");
+ return false;
+ }
+
+ if (MEM_TEST_UNMAP_SIZE > MEM_TEST_SIZE ||
+ MEM_TEST_UNMAP_SIZE % guest_page_size ||
+ (MEM_TEST_UNMAP_SIZE / guest_page_size) %
+ (2 * MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size)) {
+ pr_info("invalid MEM_TEST_UNMAP_SIZE or MEM_TEST_UNMAP_CHUNK_SIZE\n");
+ return false;
+ }
+
+ return true;
+}
+
static bool parse_args(int argc, char *argv[],
struct test_args *targs)
{
+ uint32_t max_mem_slots;
int opt;
while ((opt = getopt(argc, argv, "hvds:f:e:l:r:")) != -1) {
@@ -885,8 +968,8 @@ static bool parse_args(int argc, char *argv[],
break;
case 's':
targs->nslots = atoi_paranoid(optarg);
- if (targs->nslots <= 0 && targs->nslots != -1) {
- pr_info("Slot count cap has to be positive or -1 for no cap\n");
+ if (targs->nslots <= 1 && targs->nslots != -1) {
+ pr_info("Slot count cap must be larger than 1 or -1 for no cap\n");
return false;
}
break;
@@ -920,6 +1003,21 @@ static bool parse_args(int argc, char *argv[],
return false;
}
+ max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
+ if (max_mem_slots <= 1) {
+ pr_info("KVM_CAP_NR_MEMSLOTS should be greater than 1\n");
+ return false;
+ }
+
+ /* Memory slot 0 is reserved */
+ if (targs->nslots == -1)
+ targs->nslots = max_mem_slots - 1;
+ else
+ targs->nslots = min_t(int, targs->nslots, max_mem_slots) - 1;
+
+ pr_info_v("Allowed Number of memory slots: %"PRIu32"\n",
+ targs->nslots + 1);
+
return true;
}
@@ -994,6 +1092,9 @@ int main(int argc, char *argv[])
struct test_result rbestslottime;
int tctr;
+ if (!check_memory_sizes())
+ return -1;
+
if (!parse_args(argc, argv, &targs))
return -1;