From 2bc48136224592fe4f8f50629d0469d6d0274207 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Wed, 28 Jun 2023 17:53:09 -0400 Subject: selftests/mm: add -a to run_vmtests.sh Allows to specify optional tests in run_vmtests.sh, where we can run time consuming test matrix only when user specified "-a". Link: https://lkml.kernel.org/r/20230628215310.73782-8-peterx@redhat.com Signed-off-by: Peter Xu Acked-by: David Hildenbrand Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: James Houghton Cc: Jason Gunthorpe Cc: John Hubbard Cc: Kirill A . Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Vlastimil Babka Cc: Yang Shi Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/run_vmtests.sh | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 3f26f6e15b2a..824e651f62f4 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -12,11 +12,14 @@ exitcode=0 usage() { cat <"] +usage: ${BASH_SOURCE[0]:-$0} [ options ] + + -a: run all tests, including extra ones -t: specify specific categories to tests to run -h: display this message -The default behavior is to run all tests. +The default behavior is to run required tests only. If -a is specified, +will run all tests. Alternatively, specific groups tests can be run by passing a string to the -t argument containing one or more of the following categories @@ -60,9 +63,11 @@ EOF exit 0 } +RUN_ALL=false -while getopts "ht:" OPT; do +while getopts "aht:" OPT; do case ${OPT} in + "a") RUN_ALL=true ;; "h") usage ;; "t") VM_SELFTEST_ITEMS=${OPTARG} ;; esac -- cgit v1.2.3 From de4ec376df7bd2db4aa887b350a681686ea54064 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Wed, 28 Jun 2023 17:53:10 -0400 Subject: selftests/mm: add gup test matrix in run_vmtests.sh Add a matrix for testing gup based on the current gup_test. Only run the matrix when -a is specified because it's a bit slow. It covers: - Different types of huge pages: thp, hugetlb, or no huge page - Permissions: Write / Read-only - Fast-gup, with/without - Types of the GUP: pin / gup / longterm pins - Shared / Private memories - GUP size: 1 / 512 / random page sizes Link: https://lkml.kernel.org/r/20230628215310.73782-9-peterx@redhat.com Signed-off-by: Peter Xu Acked-by: David Hildenbrand Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: James Houghton Cc: Jason Gunthorpe Cc: John Hubbard Cc: Kirill A . Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Vlastimil Babka Cc: Yang Shi Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/run_vmtests.sh | 37 ++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 824e651f62f4..9666c0c171ab 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -90,6 +90,30 @@ test_selected() { fi } +run_gup_matrix() { + # -t: thp=on, -T: thp=off, -H: hugetlb=on + local hugetlb_mb=$(( needmem_KB / 1024 )) + + for huge in -t -T "-H -m $hugetlb_mb"; do + # -u: gup-fast, -U: gup-basic, -a: pin-fast, -b: pin-basic, -L: pin-longterm + for test_cmd in -u -U -a -b -L; do + # -w: write=1, -W: write=0 + for write in -w -W; do + # -S: shared + for share in -S " "; do + # -n: How many pages to fetch together? 512 is special + # because it's default thp size (or 2M on x86), 123 to + # just test partial gup when hit a huge in whatever form + for num in "-n 1" "-n 512" "-n 123"; do + CATEGORY="gup_test" run_test ./gup_test \ + $huge $test_cmd $write $share $num + done + done + done + done + done +} + # get huge pagesize and freepages from /proc/meminfo while read -r name size unit; do if [ "$name" = "HugePages_Free:" ]; then @@ -194,13 +218,16 @@ fi CATEGORY="mmap" run_test ./map_fixed_noreplace -# get_user_pages_fast() benchmark -CATEGORY="gup_test" run_test ./gup_test -u -# pin_user_pages_fast() benchmark -CATEGORY="gup_test" run_test ./gup_test -a +if $RUN_ALL; then + run_gup_matrix +else + # get_user_pages_fast() benchmark + CATEGORY="gup_test" run_test ./gup_test -u + # pin_user_pages_fast() benchmark + CATEGORY="gup_test" run_test ./gup_test -a +fi # Dump pages 0, 19, and 4096, using pin_user_pages: CATEGORY="gup_test" run_test ./gup_test -ct -F 0x1 0 19 0x1000 - CATEGORY="gup_test" run_test ./gup_longterm CATEGORY="userfaultfd" run_test ./uffd-unit-tests -- cgit v1.2.3 From c38d9ff2cc6777f7dc5c02fcb4842402ab5f362a Mon Sep 17 00:00:00 2001 From: Peng Zhang Date: Wed, 28 Jun 2023 15:36:55 +0800 Subject: maple_tree: add test for expanding range in RCU mode Add test for expanding range in RCU mode. If we use the fast path of the slot store to expand range in RCU mode, this test will fail. Link: https://lkml.kernel.org/r/20230628073657.75314-3-zhangpeng.00@bytedance.com Signed-off-by: Peng Zhang Reviewed-by: Liam R. Howlett Signed-off-by: Andrew Morton --- tools/testing/radix-tree/maple.c | 75 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) (limited to 'tools') diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c index 75ea2081a317..9901ae821911 100644 --- a/tools/testing/radix-tree/maple.c +++ b/tools/testing/radix-tree/maple.c @@ -45,6 +45,13 @@ struct rcu_test_struct2 { unsigned long last[RCU_RANGE_COUNT]; }; +struct rcu_test_struct3 { + struct maple_tree *mt; + unsigned long index; + unsigned long last; + bool stop; +}; + struct rcu_reader_struct { unsigned int id; int mod; @@ -34954,6 +34961,70 @@ void run_check_rcu(struct maple_tree *mt, struct rcu_test_struct *vals) MT_BUG_ON(mt, !vals->seen_entry2); } +static void *rcu_slot_store_reader(void *ptr) +{ + struct rcu_test_struct3 *test = ptr; + MA_STATE(mas, test->mt, test->index, test->index); + + rcu_register_thread(); + + rcu_read_lock(); + while (!test->stop) { + mas_walk(&mas); + /* The length of growth to both sides must be equal. */ + RCU_MT_BUG_ON(test, (test->index - mas.index) != + (mas.last - test->last)); + } + rcu_read_unlock(); + + rcu_unregister_thread(); + return NULL; +} + +static noinline void run_check_rcu_slot_store(struct maple_tree *mt) +{ + pthread_t readers[20]; + int range_cnt = 200, i, limit = 10000; + unsigned long len = ULONG_MAX / range_cnt, start, end; + struct rcu_test_struct3 test = {.stop = false, .mt = mt}; + + start = range_cnt / 2 * len; + end = start + len - 1; + test.index = start; + test.last = end; + + for (i = 0; i < range_cnt; i++) { + mtree_store_range(mt, i * len, i * len + len - 1, + xa_mk_value(i * 100), GFP_KERNEL); + } + + mt_set_in_rcu(mt); + MT_BUG_ON(mt, !mt_in_rcu(mt)); + + for (i = 0; i < ARRAY_SIZE(readers); i++) { + if (pthread_create(&readers[i], NULL, rcu_slot_store_reader, + &test)) { + perror("creating reader thread"); + exit(1); + } + } + + usleep(5); + + while (limit--) { + /* Step by step, expand the most middle range to both sides. */ + mtree_store_range(mt, --start, ++end, xa_mk_value(100), + GFP_KERNEL); + } + + test.stop = true; + + while (i--) + pthread_join(readers[i], NULL); + + mt_validate(mt); +} + static noinline void run_check_rcu_slowread(struct maple_tree *mt, struct rcu_test_struct *vals) { @@ -35206,6 +35277,10 @@ static noinline void __init check_rcu_threaded(struct maple_tree *mt) run_check_rcu(mt, &vals); mtree_destroy(mt); + /* Check expanding range in RCU mode */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + run_check_rcu_slot_store(mt); + mtree_destroy(mt); /* Forward writer for rcu stress */ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); -- cgit v1.2.3 From fe3b1bf19bdf122ec3d3bff03329df2044ae0f18 Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Wed, 21 Jun 2023 17:35:46 +0200 Subject: selftests: cgroup: add test_zswap program Patch series "selftests: cgroup: add zswap test program". This series adds 2 zswap related selftests that verify known and fixed issues. A new dedicated test program (test_zswap) is proposed since the test cases are specific to zswap and hosts specific helpers. The first patch adds the (empty) test program, while the other 2 add an actual test function each. This patch (of 3): Add empty cgroup-zswap self test scaffold program, test functions to be added in the next commits. Link: https://lkml.kernel.org/r/20230621153548.428093-1-cerasuolodomenico@gmail.com Link: https://lkml.kernel.org/r/20230621153548.428093-2-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Cc: Dan Streetman Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Nhat Pham Cc: Rik van Riel Cc: Roman Gushchin Cc: Seth Jennings Cc: Shakeel Butt Cc: Shuah Khan Cc: Tejun Heo Cc: Vitaly Wool Cc: Zefan Li Signed-off-by: Andrew Morton --- tools/testing/selftests/cgroup/.gitignore | 1 + tools/testing/selftests/cgroup/Makefile | 2 + tools/testing/selftests/cgroup/test_zswap.c | 62 +++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+) create mode 100644 tools/testing/selftests/cgroup/test_zswap.c (limited to 'tools') diff --git a/tools/testing/selftests/cgroup/.gitignore b/tools/testing/selftests/cgroup/.gitignore index c4a57e69f749..4d556df4f77b 100644 --- a/tools/testing/selftests/cgroup/.gitignore +++ b/tools/testing/selftests/cgroup/.gitignore @@ -5,4 +5,5 @@ test_freezer test_kmem test_kill test_cpu +test_zswap wait_inotify diff --git a/tools/testing/selftests/cgroup/Makefile b/tools/testing/selftests/cgroup/Makefile index 3d263747d2ad..27dbdd7bb4bb 100644 --- a/tools/testing/selftests/cgroup/Makefile +++ b/tools/testing/selftests/cgroup/Makefile @@ -12,6 +12,7 @@ TEST_GEN_PROGS += test_core TEST_GEN_PROGS += test_freezer TEST_GEN_PROGS += test_kill TEST_GEN_PROGS += test_cpu +TEST_GEN_PROGS += test_zswap LOCAL_HDRS += $(selfdir)/clone3/clone3_selftests.h $(selfdir)/pidfd/pidfd.h @@ -23,3 +24,4 @@ $(OUTPUT)/test_core: cgroup_util.c $(OUTPUT)/test_freezer: cgroup_util.c $(OUTPUT)/test_kill: cgroup_util.c $(OUTPUT)/test_cpu: cgroup_util.c +$(OUTPUT)/test_zswap: cgroup_util.c diff --git a/tools/testing/selftests/cgroup/test_zswap.c b/tools/testing/selftests/cgroup/test_zswap.c new file mode 100644 index 000000000000..001533667322 --- /dev/null +++ b/tools/testing/selftests/cgroup/test_zswap.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE + +#include +#include +#include + +#include "../kselftest.h" +#include "cgroup_util.h" + +#define T(x) { x, #x } +struct zswap_test { + int (*fn)(const char *root); + const char *name; +} tests[] = { +}; +#undef T + +static bool zswap_configured(void) +{ + return access("/sys/module/zswap", F_OK) == 0; +} + +int main(int argc, char **argv) +{ + char root[PATH_MAX]; + int i, ret = EXIT_SUCCESS; + + if (cg_find_unified_root(root, sizeof(root))) + ksft_exit_skip("cgroup v2 isn't mounted\n"); + + if (!zswap_configured()) + ksft_exit_skip("zswap isn't configured\n"); + + /* + * Check that memory controller is available: + * memory is listed in cgroup.controllers + */ + if (cg_read_strstr(root, "cgroup.controllers", "memory")) + ksft_exit_skip("memory controller isn't available\n"); + + if (cg_read_strstr(root, "cgroup.subtree_control", "memory")) + if (cg_write(root, "cgroup.subtree_control", "+memory")) + ksft_exit_skip("Failed to set memory controller\n"); + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + switch (tests[i].fn(root)) { + case KSFT_PASS: + ksft_test_result_pass("%s\n", tests[i].name); + break; + case KSFT_SKIP: + ksft_test_result_skip("%s\n", tests[i].name); + break; + default: + ret = EXIT_FAILURE; + ksft_test_result_fail("%s\n", tests[i].name); + break; + } + } + + return ret; +} -- cgit v1.2.3 From a549f9f31561a569e3c7a4cb6226a0b31d35fc89 Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Wed, 21 Jun 2023 17:35:47 +0200 Subject: selftests: cgroup: add test_zswap with no kmem bypass test Add a cgroup selftest that verifies memcg charging in zswap. The original issue was that kmem bypass was applied to pages swapped out to zswap by kswapd, resulting in zswapped memory not being charged. It was fixed by commit cd08d80ecdac("mm: correctly charge compressed memory to its memcg"). Link: https://lkml.kernel.org/r/20230621153548.428093-3-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Cc: Dan Streetman Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Nhat Pham Cc: Rik van Riel Cc: Roman Gushchin Cc: Seth Jennings Cc: Shakeel Butt Cc: Shuah Khan Cc: Tejun Heo Cc: Vitaly Wool Cc: Zefan Li Signed-off-by: Andrew Morton --- tools/testing/selftests/cgroup/test_zswap.c | 163 ++++++++++++++++++++++++++++ 1 file changed, 163 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/cgroup/test_zswap.c b/tools/testing/selftests/cgroup/test_zswap.c index 001533667322..e859fecd310b 100644 --- a/tools/testing/selftests/cgroup/test_zswap.c +++ b/tools/testing/selftests/cgroup/test_zswap.c @@ -4,15 +4,178 @@ #include #include #include +#include +#include +#include +#include +#include #include "../kselftest.h" #include "cgroup_util.h" +static int read_int(const char *path, size_t *value) +{ + FILE *file; + int ret = 0; + + file = fopen(path, "r"); + if (!file) + return -1; + if (fscanf(file, "%ld", value) != 1) + ret = -1; + fclose(file); + return ret; +} + +static int set_min_free_kb(size_t value) +{ + FILE *file; + int ret; + + file = fopen("/proc/sys/vm/min_free_kbytes", "w"); + if (!file) + return -1; + ret = fprintf(file, "%ld\n", value); + fclose(file); + return ret; +} + +static int read_min_free_kb(size_t *value) +{ + return read_int("/proc/sys/vm/min_free_kbytes", value); +} + +static int get_zswap_stored_pages(size_t *value) +{ + return read_int("/sys/kernel/debug/zswap/stored_pages", value); +} + +struct no_kmem_bypass_child_args { + size_t target_alloc_bytes; + size_t child_allocated; +}; + +static int no_kmem_bypass_child(const char *cgroup, void *arg) +{ + struct no_kmem_bypass_child_args *values = arg; + void *allocation; + + allocation = malloc(values->target_alloc_bytes); + if (!allocation) { + values->child_allocated = true; + return -1; + } + for (long i = 0; i < values->target_alloc_bytes; i += 4095) + ((char *)allocation)[i] = 'a'; + values->child_allocated = true; + pause(); + free(allocation); + return 0; +} + +/* + * When pages owned by a memcg are pushed to zswap by kswapd, they should be + * charged to that cgroup. This wasn't the case before commit + * cd08d80ecdac("mm: correctly charge compressed memory to its memcg"). + * + * The test first allocates memory in a memcg, then raises min_free_kbytes to + * a very high value so that the allocation falls below low wm, then makes + * another allocation to trigger kswapd that should push the memcg-owned pages + * to zswap and verifies that the zswap pages are correctly charged. + * + * To be run on a VM with at most 4G of memory. + */ +static int test_no_kmem_bypass(const char *root) +{ + size_t min_free_kb_high, min_free_kb_low, min_free_kb_original; + struct no_kmem_bypass_child_args *values; + size_t trigger_allocation_size; + int wait_child_iteration = 0; + long stored_pages_threshold; + struct sysinfo sys_info; + int ret = KSFT_FAIL; + int child_status; + char *test_group; + pid_t child_pid; + + /* Read sys info and compute test values accordingly */ + if (sysinfo(&sys_info) != 0) + return KSFT_FAIL; + if (sys_info.totalram > 5000000000) + return KSFT_SKIP; + values = mmap(0, sizeof(struct no_kmem_bypass_child_args), PROT_READ | + PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (values == MAP_FAILED) + return KSFT_FAIL; + if (read_min_free_kb(&min_free_kb_original)) + return KSFT_FAIL; + min_free_kb_high = sys_info.totalram / 2000; + min_free_kb_low = sys_info.totalram / 500000; + values->target_alloc_bytes = (sys_info.totalram - min_free_kb_high * 1000) + + sys_info.totalram * 5 / 100; + stored_pages_threshold = sys_info.totalram / 5 / 4096; + trigger_allocation_size = sys_info.totalram / 20; + + /* Set up test memcg */ + if (cg_write(root, "cgroup.subtree_control", "+memory")) + goto out; + test_group = cg_name(root, "kmem_bypass_test"); + if (!test_group) + goto out; + + /* Spawn memcg child and wait for it to allocate */ + set_min_free_kb(min_free_kb_low); + if (cg_create(test_group)) + goto out; + values->child_allocated = false; + child_pid = cg_run_nowait(test_group, no_kmem_bypass_child, values); + if (child_pid < 0) + goto out; + while (!values->child_allocated && wait_child_iteration++ < 10000) + usleep(1000); + + /* Try to wakeup kswapd and let it push child memory to zswap */ + set_min_free_kb(min_free_kb_high); + for (int i = 0; i < 20; i++) { + size_t stored_pages; + char *trigger_allocation = malloc(trigger_allocation_size); + + if (!trigger_allocation) + break; + for (int i = 0; i < trigger_allocation_size; i += 4095) + trigger_allocation[i] = 'b'; + usleep(100000); + free(trigger_allocation); + if (get_zswap_stored_pages(&stored_pages)) + break; + if (stored_pages < 0) + break; + /* If memory was pushed to zswap, verify it belongs to memcg */ + if (stored_pages > stored_pages_threshold) { + int zswapped = cg_read_key_long(test_group, "memory.stat", "zswapped "); + int delta = stored_pages * 4096 - zswapped; + int result_ok = delta < stored_pages * 4096 / 4; + + ret = result_ok ? KSFT_PASS : KSFT_FAIL; + break; + } + } + + kill(child_pid, SIGTERM); + waitpid(child_pid, &child_status, 0); +out: + set_min_free_kb(min_free_kb_original); + cg_destroy(test_group); + free(test_group); + return ret; +} + #define T(x) { x, #x } struct zswap_test { int (*fn)(const char *root); const char *name; } tests[] = { + T(test_no_kmem_bypass), }; #undef T -- cgit v1.2.3 From d9cfaf405b8ffe2c716b1ce4c82e0a19d50951da Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Wed, 21 Jun 2023 17:35:48 +0200 Subject: selftests: cgroup: add zswap-memcg unwanted writeback test Add a test to verify that when a memcg hits its limit in zswap, it doesn't trigger an unwanted writeback that would result in pages not owned by that memcg to be sent to disk, even if zswap isn't full. This was fixed by commit 0bdf0efa180a("zswap: do not shrink if cgroup may not zswap"). Link: https://lkml.kernel.org/r/20230621153548.428093-4-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Cc: Dan Streetman Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Nhat Pham Cc: Rik van Riel Cc: Roman Gushchin Cc: Seth Jennings Cc: Shakeel Butt Cc: Shuah Khan Cc: Tejun Heo Cc: Vitaly Wool Cc: Zefan Li Signed-off-by: Andrew Morton --- tools/testing/selftests/cgroup/test_zswap.c | 61 +++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/cgroup/test_zswap.c b/tools/testing/selftests/cgroup/test_zswap.c index e859fecd310b..49def87a909b 100644 --- a/tools/testing/selftests/cgroup/test_zswap.c +++ b/tools/testing/selftests/cgroup/test_zswap.c @@ -50,6 +50,66 @@ static int get_zswap_stored_pages(size_t *value) return read_int("/sys/kernel/debug/zswap/stored_pages", value); } +static int get_zswap_written_back_pages(size_t *value) +{ + return read_int("/sys/kernel/debug/zswap/written_back_pages", value); +} + +static int allocate_bytes(const char *cgroup, void *arg) +{ + size_t size = (size_t)arg; + char *mem = (char *)malloc(size); + + if (!mem) + return -1; + for (int i = 0; i < size; i += 4095) + mem[i] = 'a'; + free(mem); + return 0; +} + +/* + * When trying to store a memcg page in zswap, if the memcg hits its memory + * limit in zswap, writeback should not be triggered. + * + * This was fixed with commit 0bdf0efa180a("zswap: do not shrink if cgroup may + * not zswap"). Needs to be revised when a per memcg writeback mechanism is + * implemented. + */ +static int test_no_invasive_cgroup_shrink(const char *root) +{ + size_t written_back_before, written_back_after; + int ret = KSFT_FAIL; + char *test_group; + + /* Set up */ + test_group = cg_name(root, "no_shrink_test"); + if (!test_group) + goto out; + if (cg_create(test_group)) + goto out; + if (cg_write(test_group, "memory.max", "1M")) + goto out; + if (cg_write(test_group, "memory.zswap.max", "10K")) + goto out; + if (get_zswap_written_back_pages(&written_back_before)) + goto out; + + /* Allocate 10x memory.max to push memory into zswap */ + if (cg_run(test_group, allocate_bytes, (void *)MB(10))) + goto out; + + /* Verify that no writeback happened because of the memcg allocation */ + if (get_zswap_written_back_pages(&written_back_after)) + goto out; + if (written_back_after == written_back_before) + ret = KSFT_PASS; +out: + cg_destroy(test_group); + free(test_group); + return ret; +} + struct no_kmem_bypass_child_args { size_t target_alloc_bytes; size_t child_allocated; @@ -176,6 +236,7 @@ struct zswap_test { const char *name; } tests[] = { T(test_no_kmem_bypass), + T(test_no_invasive_cgroup_shrink), }; #undef T -- cgit v1.2.3 From 3d0745e59c840e64c6b7bf102c43bec29337605d Mon Sep 17 00:00:00 2001 From: xu xin Date: Tue, 13 Jun 2023 11:09:47 +0800 Subject: selftest: add a testcase of ksm zero pages Add a function test_unmerge_zero_page() to test the functionality on unsharing and counting ksm-placed zero pages and counting of this patch series. test_unmerge_zero_page() actually contains four subjct test objects: (1) whether the count of ksm zero pages can update correctly after merging; (2) whether the count of ksm zero pages can update correctly after unmerging by madvise(...MADV_UNMERGEABLE); (3) whether the count of ksm zero pages can update correctly after unmerging by triggering write fault. (4) whether ksm zero pages are really unmerged. Link: https://lkml.kernel.org/r/20230613030947.186089-1-yang.yang29@zte.com.cn Signed-off-by: xu xin Acked-by: David Hildenbrand Reviewed-by: Xiaokai Ran Reviewed-by: Yang Yang Cc: Claudio Imbrenda Cc: Xuexin Jiang Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/ksm_functional_tests.c | 98 ++++++++++++++++++++++- 1 file changed, 97 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c index 26853badae70..0de9d33cd565 100644 --- a/tools/testing/selftests/mm/ksm_functional_tests.c +++ b/tools/testing/selftests/mm/ksm_functional_tests.c @@ -29,6 +29,8 @@ static int ksm_fd; static int ksm_full_scans_fd; +static int proc_self_ksm_stat_fd; +static int ksm_use_zero_pages_fd; static int pagemap_fd; static size_t pagesize; @@ -59,6 +61,33 @@ static bool range_maps_duplicates(char *addr, unsigned long size) return false; } +static long get_my_ksm_zero_pages(void) +{ + char buf[200]; + char *substr_ksm_zero; + size_t value_pos; + ssize_t read_size; + unsigned long my_ksm_zero_pages; + + if (!proc_self_ksm_stat_fd) + return 0; + + read_size = pread(proc_self_ksm_stat_fd, buf, sizeof(buf) - 1, 0); + if (read_size < 0) + return -errno; + + buf[read_size] = 0; + + substr_ksm_zero = strstr(buf, "ksm_zero_pages"); + if (!substr_ksm_zero) + return 0; + + value_pos = strcspn(substr_ksm_zero, "0123456789"); + my_ksm_zero_pages = strtol(substr_ksm_zero + value_pos, NULL, 10); + + return my_ksm_zero_pages; +} + static long ksm_get_full_scans(void) { char buf[10]; @@ -159,6 +188,70 @@ unmap: munmap(map, size); } +static void test_unmerge_zero_pages(void) +{ + const unsigned int size = 2 * MiB; + char *map; + unsigned int offs; + unsigned long pages_expected; + + ksft_print_msg("[RUN] %s\n", __func__); + + if (proc_self_ksm_stat_fd < 0) { + ksft_test_result_skip("open(\"/proc/self/ksm_stat\") failed\n"); + return; + } + if (ksm_use_zero_pages_fd < 0) { + ksft_test_result_skip("open \"/sys/kernel/mm/ksm/use_zero_pages\" failed\n"); + return; + } + if (write(ksm_use_zero_pages_fd, "1", 1) != 1) { + ksft_test_result_skip("write \"/sys/kernel/mm/ksm/use_zero_pages\" failed\n"); + return; + } + + /* Let KSM deduplicate zero pages. */ + map = mmap_and_merge_range(0x00, size, false); + if (map == MAP_FAILED) + return; + + /* Check if ksm_zero_pages is updated correctly after KSM merging */ + pages_expected = size / pagesize; + if (pages_expected != get_my_ksm_zero_pages()) { + ksft_test_result_fail("'ksm_zero_pages' updated after merging\n"); + goto unmap; + } + + /* Try to unmerge half of the region */ + if (madvise(map, size / 2, MADV_UNMERGEABLE)) { + ksft_test_result_fail("MADV_UNMERGEABLE failed\n"); + goto unmap; + } + + /* Check if ksm_zero_pages is updated correctly after unmerging */ + pages_expected /= 2; + if (pages_expected != get_my_ksm_zero_pages()) { + ksft_test_result_fail("'ksm_zero_pages' updated after unmerging\n"); + goto unmap; + } + + /* Trigger unmerging of the other half by writing to the pages. */ + for (offs = size / 2; offs < size; offs += pagesize) + *((unsigned int *)&map[offs]) = offs; + + /* Now we should have no zeropages remaining. */ + if (get_my_ksm_zero_pages()) { + ksft_test_result_fail("'ksm_zero_pages' updated after write fault\n"); + goto unmap; + } + + /* Check if ksm zero pages are really unmerged */ + ksft_test_result(!range_maps_duplicates(map, size), + "KSM zero pages were unmerged\n"); +unmap: + munmap(map, size); +} + static void test_unmerge_discarded(void) { const unsigned int size = 2 * MiB; @@ -358,7 +451,7 @@ unmap: int main(int argc, char **argv) { - unsigned int tests = 5; + unsigned int tests = 6; int err; #ifdef __NR_userfaultfd @@ -379,8 +472,11 @@ int main(int argc, char **argv) pagemap_fd = open("/proc/self/pagemap", O_RDONLY); if (pagemap_fd < 0) ksft_exit_skip("open(\"/proc/self/pagemap\") failed\n"); + proc_self_ksm_stat_fd = open("/proc/self/ksm_stat", O_RDONLY); + ksm_use_zero_pages_fd = open("/sys/kernel/mm/ksm/use_zero_pages", O_RDWR); test_unmerge(); + test_unmerge_zero_pages(); test_unmerge_discarded(); #ifdef __NR_userfaultfd test_unmerge_uffd_wp(); -- cgit v1.2.3 From badbbcd76545c58eff64bb1548f7f834a30dc52a Mon Sep 17 00:00:00 2001 From: Jeff Xu Date: Wed, 5 Jul 2023 06:33:15 +0000 Subject: selftests/memfd: sysctl: fix MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED Add selftest for sysctl vm.memfd_noexec is 2 (MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED) memfd_create(.., MFD_EXEC) should fail in this case. Link: https://lkml.kernel.org/r/20230705063315.3680666-3-jeffxu@google.com Reported-by: Dominique Martinet Closes: https://lore.kernel.org/linux-mm/CABi2SkXUX_QqTQ10Yx9bBUGpN1wByOi_=gZU6WEy5a8MaQY3Jw@mail.gmail.com/T/ Signed-off-by: Jeff Xu Cc: Daniel Verkamp Cc: Dmitry Torokhov Cc: Hugh Dickins Cc: Jann Horn Cc: Jorge Lucangeli Obes Cc: Kees Cook Cc: kernel test robot Cc: Mike Kravetz Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/memfd/memfd_test.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c index dba0e8ba002f..dbdd9ec5e397 100644 --- a/tools/testing/selftests/memfd/memfd_test.c +++ b/tools/testing/selftests/memfd/memfd_test.c @@ -1147,6 +1147,11 @@ static void test_sysctl_child(void) sysctl_assert_write("2"); mfd_fail_new("kern_memfd_sysctl_2", MFD_CLOEXEC | MFD_ALLOW_SEALING); + mfd_fail_new("kern_memfd_sysctl_2_MFD_EXEC", + MFD_CLOEXEC | MFD_EXEC); + fd = mfd_assert_new("", 0, MFD_NOEXEC_SEAL); + close(fd); + sysctl_fail_write("0"); sysctl_fail_write("1"); } -- cgit v1.2.3 From 7cf0f9e83769cb7862dff0221a3ace67d9b2ed9f Mon Sep 17 00:00:00 2001 From: Axel Rasmussen Date: Fri, 7 Jul 2023 14:55:39 -0700 Subject: selftests/mm: refactor uffd_poll_thread to allow custom fault handlers Previously, we had "one fault handler to rule them all", which used several branches to deal with all of the scenarios required by all of the various tests. In upcoming patches, I plan to add a new test, which has its own slightly different fault handling logic. Instead of continuing to add cruft to the existing fault handler, let's allow tests to define custom ones, separate from other tests. Link: https://lkml.kernel.org/r/20230707215540.2324998-8-axelrasmussen@google.com Signed-off-by: Axel Rasmussen Acked-by: Peter Xu Cc: Al Viro Cc: Brian Geffon Cc: Christian Brauner Cc: David Hildenbrand Cc: Gaosheng Cui Cc: Huang, Ying Cc: Hugh Dickins Cc: James Houghton Cc: Jan Alexander Steffens (heftig) Cc: Jiaqi Yan Cc: Jonathan Corbet Cc: Kefeng Wang Cc: Liam R. Howlett Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Muchun Song Cc: Nadav Amit Cc: Naoya Horiguchi Cc: Ryan Roberts Cc: Shuah Khan Cc: Suleiman Souhlal Cc: Suren Baghdasaryan Cc: T.J. Alumbaugh Cc: Yu Zhao Cc: ZhangPeng Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-common.c | 5 ++++- tools/testing/selftests/mm/uffd-common.h | 3 +++ tools/testing/selftests/mm/uffd-stress.c | 8 ++++---- 3 files changed, 11 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c index ba20d7504022..02b89860e193 100644 --- a/tools/testing/selftests/mm/uffd-common.c +++ b/tools/testing/selftests/mm/uffd-common.c @@ -499,6 +499,9 @@ void *uffd_poll_thread(void *arg) int ret; char tmp_chr; + if (!args->handle_fault) + args->handle_fault = uffd_handle_page_fault; + pollfd[0].fd = uffd; pollfd[0].events = POLLIN; pollfd[1].fd = pipefd[cpu*2]; @@ -527,7 +530,7 @@ void *uffd_poll_thread(void *arg) err("unexpected msg event %u\n", msg.event); break; case UFFD_EVENT_PAGEFAULT: - uffd_handle_page_fault(&msg, args); + args->handle_fault(&msg, args); break; case UFFD_EVENT_FORK: close(uffd); diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h index 197f5262fe0d..7c4fa964c3b0 100644 --- a/tools/testing/selftests/mm/uffd-common.h +++ b/tools/testing/selftests/mm/uffd-common.h @@ -77,6 +77,9 @@ struct uffd_args { unsigned long missing_faults; unsigned long wp_faults; unsigned long minor_faults; + + /* A custom fault handler; defaults to uffd_handle_page_fault. */ + void (*handle_fault)(struct uffd_msg *msg, struct uffd_args *args); }; struct uffd_test_ops { diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c index 995ff13e74c7..73ebb97c7026 100644 --- a/tools/testing/selftests/mm/uffd-stress.c +++ b/tools/testing/selftests/mm/uffd-stress.c @@ -189,10 +189,8 @@ static int stress(struct uffd_args *args) locking_thread, (void *)cpu)) return 1; if (bounces & BOUNCE_POLL) { - if (pthread_create(&uffd_threads[cpu], &attr, - uffd_poll_thread, - (void *)&args[cpu])) - return 1; + if (pthread_create(&uffd_threads[cpu], &attr, uffd_poll_thread, &args[cpu])) + err("uffd_poll_thread create"); } else { if (pthread_create(&uffd_threads[cpu], &attr, uffd_read_thread, @@ -250,6 +248,8 @@ static int userfaultfd_stress(void) struct uffd_args args[nr_cpus]; uint64_t mem_size = nr_pages * page_size; + memset(args, 0, sizeof(struct uffd_args) * nr_cpus); + if (uffd_test_ctx_init(UFFD_FEATURE_WP_UNPOPULATED, NULL)) err("context init failed"); -- cgit v1.2.3 From 99aa77215ad0254bf15f84e58fe9f3f1d942ff0f Mon Sep 17 00:00:00 2001 From: Axel Rasmussen Date: Fri, 7 Jul 2023 14:55:40 -0700 Subject: selftests/mm: add uffd unit test for UFFDIO_POISON The test is pretty basic, and exercises UFFDIO_POISON straightforwardly. We register a region with userfaultfd, in missing fault mode. For each fault, we either UFFDIO_COPY a zeroed page (odd pages) or UFFDIO_POISON (even pages). We do this mix to test "something like a real use case", where guest memory would be some mix of poisoned and non-poisoned pages. We read each page in the region, and assert that the odd pages are zeroed as expected, and the even pages yield a SIGBUS as expected. Why UFFDIO_COPY instead of UFFDIO_ZEROPAGE? Because hugetlb doesn't support UFFDIO_ZEROPAGE, and we don't want to have special case code. Link: https://lkml.kernel.org/r/20230707215540.2324998-9-axelrasmussen@google.com Signed-off-by: Axel Rasmussen Acked-by: Peter Xu Cc: Al Viro Cc: Brian Geffon Cc: Christian Brauner Cc: David Hildenbrand Cc: Gaosheng Cui Cc: Huang, Ying Cc: Hugh Dickins Cc: James Houghton Cc: Jan Alexander Steffens (heftig) Cc: Jiaqi Yan Cc: Jonathan Corbet Cc: Kefeng Wang Cc: Liam R. Howlett Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Muchun Song Cc: Nadav Amit Cc: Naoya Horiguchi Cc: Ryan Roberts Cc: Shuah Khan Cc: Suleiman Souhlal Cc: Suren Baghdasaryan Cc: T.J. Alumbaugh Cc: Yu Zhao Cc: ZhangPeng Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-unit-tests.c | 117 +++++++++++++++++++++++++++ 1 file changed, 117 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c index 04d91f144d1c..2709a34a39c5 100644 --- a/tools/testing/selftests/mm/uffd-unit-tests.c +++ b/tools/testing/selftests/mm/uffd-unit-tests.c @@ -951,6 +951,117 @@ static void uffd_zeropage_test(uffd_test_args_t *args) uffd_test_pass(); } +static void uffd_register_poison(int uffd, void *addr, uint64_t len) +{ + uint64_t ioctls = 0; + uint64_t expected = (1 << _UFFDIO_COPY) | (1 << _UFFDIO_POISON); + + if (uffd_register_with_ioctls(uffd, addr, len, true, + false, false, &ioctls)) + err("poison register fail"); + + if ((ioctls & expected) != expected) + err("registered area doesn't support COPY and POISON ioctls"); +} + +static void do_uffdio_poison(int uffd, unsigned long offset) +{ + struct uffdio_poison uffdio_poison = { 0 }; + int ret; + __s64 res; + + uffdio_poison.range.start = (unsigned long) area_dst + offset; + uffdio_poison.range.len = page_size; + uffdio_poison.mode = 0; + ret = ioctl(uffd, UFFDIO_POISON, &uffdio_poison); + res = uffdio_poison.updated; + + if (ret) + err("UFFDIO_POISON error: %"PRId64, (int64_t)res); + else if (res != page_size) + err("UFFDIO_POISON unexpected size: %"PRId64, (int64_t)res); +} + +static void uffd_poison_handle_fault( + struct uffd_msg *msg, struct uffd_args *args) +{ + unsigned long offset; + + if (msg->event != UFFD_EVENT_PAGEFAULT) + err("unexpected msg event %u", msg->event); + + if (msg->arg.pagefault.flags & + (UFFD_PAGEFAULT_FLAG_WP | UFFD_PAGEFAULT_FLAG_MINOR)) + err("unexpected fault type %llu", msg->arg.pagefault.flags); + + offset = (char *)(unsigned long)msg->arg.pagefault.address - area_dst; + offset &= ~(page_size-1); + + /* Odd pages -> copy zeroed page; even pages -> poison. */ + if (offset & page_size) + copy_page(uffd, offset, false); + else + do_uffdio_poison(uffd, offset); +} + +static void uffd_poison_test(uffd_test_args_t *targs) +{ + pthread_t uffd_mon; + char c; + struct uffd_args args = { 0 }; + struct sigaction act = { 0 }; + unsigned long nr_sigbus = 0; + unsigned long nr; + + fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK); + + uffd_register_poison(uffd, area_dst, nr_pages * page_size); + memset(area_src, 0, nr_pages * page_size); + + args.handle_fault = uffd_poison_handle_fault; + if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args)) + err("uffd_poll_thread create"); + + sigbuf = &jbuf; + act.sa_sigaction = sighndl; + act.sa_flags = SA_SIGINFO; + if (sigaction(SIGBUS, &act, 0)) + err("sigaction"); + + for (nr = 0; nr < nr_pages; ++nr) { + unsigned long offset = nr * page_size; + const char *bytes = (const char *) area_dst + offset; + const char *i; + + if (sigsetjmp(*sigbuf, 1)) { + /* + * Access below triggered a SIGBUS, which was caught by + * sighndl, which then jumped here. Count this SIGBUS, + * and move on to next page. + */ + ++nr_sigbus; + continue; + } + + for (i = bytes; i < bytes + page_size; ++i) { + if (*i) + err("nonzero byte in area_dst (%p) at %p: %u", + area_dst, i, *i); + } + } + + if (write(pipefd[1], &c, sizeof(c)) != sizeof(c)) + err("pipe write"); + if (pthread_join(uffd_mon, NULL)) + err("pthread_join()"); + + if (nr_sigbus != nr_pages / 2) + err("expected to receive %lu SIGBUS, actually received %lu", + nr_pages / 2, nr_sigbus); + + uffd_test_pass(); +} + /* * Test the returned uffdio_register.ioctls with different register modes. * Note that _UFFDIO_ZEROPAGE is tested separately in the zeropage test. @@ -1126,6 +1237,12 @@ uffd_test_case_t uffd_tests[] = { UFFD_FEATURE_PAGEFAULT_FLAG_WP | UFFD_FEATURE_WP_HUGETLBFS_SHMEM, }, + { + .name = "poison", + .uffd_fn = uffd_poison_test, + .mem_targets = MEM_ALL, + .uffd_feature_required = UFFD_FEATURE_POISON, + }, }; static void usage(const char *prog) -- cgit v1.2.3 From ba91e7e5d15a22946e6531c898e197e128bb6634 Mon Sep 17 00:00:00 2001 From: Jiaqi Yan Date: Thu, 13 Jul 2023 00:18:33 +0000 Subject: selftests/mm: add tests for HWPOISON hugetlbfs read Add tests for the improvement made to read operation on HWPOISON hugetlb page with different read granularities. For each chunk size, three read scenarios are tested: 1. Simple regression test on read without HWPOISON. 2. Sequential read page by page should succeed until encounters the 1st raw HWPOISON subpage. 3. After skip a raw HWPOISON subpage by lseek, read()s always succeed. Link: https://lkml.kernel.org/r/20230713001833.3778937-5-jiaqiyan@google.com Signed-off-by: Jiaqi Yan Acked-by: Mike Kravetz Reviewed-by: Naoya Horiguchi Cc: James Houghton Cc: Miaohe Lin Cc: Muchun Song Cc: Yang Shi Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/.gitignore | 1 + tools/testing/selftests/mm/Makefile | 1 + tools/testing/selftests/mm/hugetlb-read-hwpoison.c | 322 +++++++++++++++++++++ 3 files changed, 324 insertions(+) create mode 100644 tools/testing/selftests/mm/hugetlb-read-hwpoison.c (limited to 'tools') diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore index 7e2a982383c0..cdc9ce4426b9 100644 --- a/tools/testing/selftests/mm/.gitignore +++ b/tools/testing/selftests/mm/.gitignore @@ -5,6 +5,7 @@ hugepage-mremap hugepage-shm hugepage-vmemmap hugetlb-madvise +hugetlb-read-hwpoison khugepaged map_hugetlb map_populate diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index 66d7c07dc177..b7fce9073279 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -41,6 +41,7 @@ TEST_GEN_PROGS += gup_longterm TEST_GEN_PROGS += gup_test TEST_GEN_PROGS += hmm-tests TEST_GEN_PROGS += hugetlb-madvise +TEST_GEN_PROGS += hugetlb-read-hwpoison TEST_GEN_PROGS += hugepage-mmap TEST_GEN_PROGS += hugepage-mremap TEST_GEN_PROGS += hugepage-shm diff --git a/tools/testing/selftests/mm/hugetlb-read-hwpoison.c b/tools/testing/selftests/mm/hugetlb-read-hwpoison.c new file mode 100644 index 000000000000..ba6cc6f9cabc --- /dev/null +++ b/tools/testing/selftests/mm/hugetlb-read-hwpoison.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define _GNU_SOURCE +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "../kselftest.h" + +#define PREFIX " ... " +#define ERROR_PREFIX " !!! " + +#define MAX_WRITE_READ_CHUNK_SIZE (getpagesize() * 16) +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) + +enum test_status { + TEST_PASSED = 0, + TEST_FAILED = 1, + TEST_SKIPPED = 2, +}; + +static char *status_to_str(enum test_status status) +{ + switch (status) { + case TEST_PASSED: + return "TEST_PASSED"; + case TEST_FAILED: + return "TEST_FAILED"; + case TEST_SKIPPED: + return "TEST_SKIPPED"; + default: + return "TEST_???"; + } +} + +static int setup_filemap(char *filemap, size_t len, size_t wr_chunk_size) +{ + char iter = 0; + + for (size_t offset = 0; offset < len; + offset += wr_chunk_size) { + iter++; + memset(filemap + offset, iter, wr_chunk_size); + } + + return 0; +} + +static bool verify_chunk(char *buf, size_t len, char val) +{ + size_t i; + + for (i = 0; i < len; ++i) { + if (buf[i] != val) { + printf(PREFIX ERROR_PREFIX "check fail: buf[%lu] = %u != %u\n", + i, buf[i], val); + return false; + } + } + + return true; +} + +static bool seek_read_hugepage_filemap(int fd, size_t len, size_t wr_chunk_size, + off_t offset, size_t expected) +{ + char buf[MAX_WRITE_READ_CHUNK_SIZE]; + ssize_t ret_count = 0; + ssize_t total_ret_count = 0; + char val = offset / wr_chunk_size + offset % wr_chunk_size; + + printf(PREFIX PREFIX "init val=%u with offset=0x%lx\n", val, offset); + printf(PREFIX PREFIX "expect to read 0x%lx bytes of data in total\n", + expected); + if (lseek(fd, offset, SEEK_SET) < 0) { + perror(PREFIX ERROR_PREFIX "seek failed"); + return false; + } + + while (offset + total_ret_count < len) { + ret_count = read(fd, buf, wr_chunk_size); + if (ret_count == 0) { + printf(PREFIX PREFIX "read reach end of the file\n"); + break; + } else if (ret_count < 0) { + perror(PREFIX ERROR_PREFIX "read failed"); + break; + } + ++val; + if (!verify_chunk(buf, ret_count, val)) + return false; + + total_ret_count += ret_count; + } + printf(PREFIX PREFIX "actually read 0x%lx bytes of data in total\n", + total_ret_count); + + return total_ret_count == expected; +} + +static bool read_hugepage_filemap(int fd, size_t len, + size_t wr_chunk_size, size_t expected) +{ + char buf[MAX_WRITE_READ_CHUNK_SIZE]; + ssize_t ret_count = 0; + ssize_t total_ret_count = 0; + char val = 0; + + printf(PREFIX PREFIX "expect to read 0x%lx bytes of data in total\n", + expected); + while (total_ret_count < len) { + ret_count = read(fd, buf, wr_chunk_size); + if (ret_count == 0) { + printf(PREFIX PREFIX "read reach end of the file\n"); + break; + } else if (ret_count < 0) { + perror(PREFIX ERROR_PREFIX "read failed"); + break; + } + ++val; + if (!verify_chunk(buf, ret_count, val)) + return false; + + total_ret_count += ret_count; + } + printf(PREFIX PREFIX "actually read 0x%lx bytes of data in total\n", + total_ret_count); + + return total_ret_count == expected; +} + +static enum test_status +test_hugetlb_read(int fd, size_t len, size_t wr_chunk_size) +{ + enum test_status status = TEST_SKIPPED; + char *filemap = NULL; + + if (ftruncate(fd, len) < 0) { + perror(PREFIX ERROR_PREFIX "ftruncate failed"); + return status; + } + + filemap = mmap(NULL, len, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, fd, 0); + if (filemap == MAP_FAILED) { + perror(PREFIX ERROR_PREFIX "mmap for primary mapping failed"); + goto done; + } + + setup_filemap(filemap, len, wr_chunk_size); + status = TEST_FAILED; + + if (read_hugepage_filemap(fd, len, wr_chunk_size, len)) + status = TEST_PASSED; + + munmap(filemap, len); +done: + if (ftruncate(fd, 0) < 0) { + perror(PREFIX ERROR_PREFIX "ftruncate back to 0 failed"); + status = TEST_FAILED; + } + + return status; +} + +static enum test_status +test_hugetlb_read_hwpoison(int fd, size_t len, size_t wr_chunk_size, + bool skip_hwpoison_page) +{ + enum test_status status = TEST_SKIPPED; + char *filemap = NULL; + char *hwp_addr = NULL; + const unsigned long pagesize = getpagesize(); + + if (ftruncate(fd, len) < 0) { + perror(PREFIX ERROR_PREFIX "ftruncate failed"); + return status; + } + + filemap = mmap(NULL, len, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, fd, 0); + if (filemap == MAP_FAILED) { + perror(PREFIX ERROR_PREFIX "mmap for primary mapping failed"); + goto done; + } + + setup_filemap(filemap, len, wr_chunk_size); + status = TEST_FAILED; + + /* + * Poisoned hugetlb page layout (assume hugepagesize=2MB): + * |<---------------------- 1MB ---------------------->| + * |<---- healthy page ---->|<---- HWPOISON page ----->| + * |<------------------- (1MB - 8KB) ----------------->| + */ + hwp_addr = filemap + len / 2 + pagesize; + if (madvise(hwp_addr, pagesize, MADV_HWPOISON) < 0) { + perror(PREFIX ERROR_PREFIX "MADV_HWPOISON failed"); + goto unmap; + } + + if (!skip_hwpoison_page) { + /* + * Userspace should be able to read (1MB + 1 page) from + * the beginning of the HWPOISONed hugepage. + */ + if (read_hugepage_filemap(fd, len, wr_chunk_size, + len / 2 + pagesize)) + status = TEST_PASSED; + } else { + /* + * Userspace should be able to read (1MB - 2 pages) from + * HWPOISONed hugepage. + */ + if (seek_read_hugepage_filemap(fd, len, wr_chunk_size, + len / 2 + MAX(2 * pagesize, wr_chunk_size), + len / 2 - MAX(2 * pagesize, wr_chunk_size))) + status = TEST_PASSED; + } + +unmap: + munmap(filemap, len); +done: + if (ftruncate(fd, 0) < 0) { + perror(PREFIX ERROR_PREFIX "ftruncate back to 0 failed"); + status = TEST_FAILED; + } + + return status; +} + +static int create_hugetlbfs_file(struct statfs *file_stat) +{ + int fd; + + fd = memfd_create("hugetlb_tmp", MFD_HUGETLB); + if (fd < 0) { + perror(PREFIX ERROR_PREFIX "could not open hugetlbfs file"); + return -1; + } + + memset(file_stat, 0, sizeof(*file_stat)); + if (fstatfs(fd, file_stat)) { + perror(PREFIX ERROR_PREFIX "fstatfs failed"); + goto close; + } + if (file_stat->f_type != HUGETLBFS_MAGIC) { + printf(PREFIX ERROR_PREFIX "not hugetlbfs file\n"); + goto close; + } + + return fd; +close: + close(fd); + return -1; +} + +int main(void) +{ + int fd; + struct statfs file_stat; + enum test_status status; + /* Test read() in different granularity. */ + size_t wr_chunk_sizes[] = { + getpagesize() / 2, getpagesize(), + getpagesize() * 2, getpagesize() * 4 + }; + size_t i; + + for (i = 0; i < ARRAY_SIZE(wr_chunk_sizes); ++i) { + printf("Write/read chunk size=0x%lx\n", + wr_chunk_sizes[i]); + + fd = create_hugetlbfs_file(&file_stat); + if (fd < 0) + goto create_failure; + printf(PREFIX "HugeTLB read regression test...\n"); + status = test_hugetlb_read(fd, file_stat.f_bsize, + wr_chunk_sizes[i]); + printf(PREFIX "HugeTLB read regression test...%s\n", + status_to_str(status)); + close(fd); + if (status == TEST_FAILED) + return -1; + + fd = create_hugetlbfs_file(&file_stat); + if (fd < 0) + goto create_failure; + printf(PREFIX "HugeTLB read HWPOISON test...\n"); + status = test_hugetlb_read_hwpoison(fd, file_stat.f_bsize, + wr_chunk_sizes[i], false); + printf(PREFIX "HugeTLB read HWPOISON test...%s\n", + status_to_str(status)); + close(fd); + if (status == TEST_FAILED) + return -1; + + fd = create_hugetlbfs_file(&file_stat); + if (fd < 0) + goto create_failure; + printf(PREFIX "HugeTLB seek then read HWPOISON test...\n"); + status = test_hugetlb_read_hwpoison(fd, file_stat.f_bsize, + wr_chunk_sizes[i], true); + printf(PREFIX "HugeTLB seek then read HWPOISON test...%s\n", + status_to_str(status)); + close(fd); + if (status == TEST_FAILED) + return -1; + } + + return 0; + +create_failure: + printf(ERROR_PREFIX "Abort test: failed to create hugetlbfs file\n"); + return -1; +} -- cgit v1.2.3 From 58e2847ad2e6322a25dedf8b4549ff924baf8395 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 24 Jul 2023 09:25:15 +0100 Subject: selftests: line buffer test program's stdout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "selftests/mm fixes for arm64", v3. Given my on-going work on large anon folios and contpte mappings, I decided it would be a good idea to start running mm selftests to help guard against regressions. However, it soon became clear that I couldn't get the suite to run cleanly on arm64 with a vanilla v6.5-rc1 kernel (perhaps I'm just doing it wrong??), so got stuck in a rabbit hole trying to debug and fix all the issues. Some were down to misconfigurations, but I also found a number of issues with the tests and even a couple of issues with the kernel. This patch (of 8): The selftests runner pipes the test program's stdout to tap_prefix. The presence of the pipe means that the test program sets its stdout to be fully buffered (as aposed to line buffered when directly connected to the terminal). The block buffering means that there is often content in the buffer at fork() time, which causes the output to end up duplicated. This was causing problems for mm:cow where test results were duplicated 20-30x. Solve this by using `stdbuf`, when available to force the test program to use line buffered mode. This means previously printf'ed results are flushed out of the program before any fork(). Additionally, explicitly set line buffer mode in ksft_print_header(), which means that all test programs that use the ksft framework will benefit even if stdbuf is not present on the system. [ryan.roberts@arm.com: add setvbuf() to set buffering mode] Link: https://lkml.kernel.org/r/20230726070655.2713530-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20230724082522.1202616-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20230724082522.1202616-2-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: Mark Brown Cc: David Hildenbrand Cc: Florent Revest Cc: Jérôme Glisse Cc: John Hubbard Cc: Peter Xu Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/include/nolibc/stdio.h | 24 ++++++++++++++++++++++++ tools/testing/selftests/kselftest.h | 9 +++++++++ tools/testing/selftests/kselftest/runner.sh | 7 +++++-- 3 files changed, 38 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/include/nolibc/stdio.h b/tools/include/nolibc/stdio.h index 0eef91daf289..a3778aff4fa9 100644 --- a/tools/include/nolibc/stdio.h +++ b/tools/include/nolibc/stdio.h @@ -21,6 +21,11 @@ #define EOF (-1) #endif +/* Buffering mode used by setvbuf. */ +#define _IOFBF 0 /* Fully buffered. */ +#define _IOLBF 1 /* Line buffered. */ +#define _IONBF 2 /* No buffering. */ + /* just define FILE as a non-empty type. The value of the pointer gives * the FD: FILE=~fd for fd>=0 or NULL for fd<0. This way positive FILE * are immediately identified as abnormal entries (i.e. possible copies @@ -350,6 +355,25 @@ void perror(const char *msg) fprintf(stderr, "%s%serrno=%d\n", (msg && *msg) ? msg : "", (msg && *msg) ? ": " : "", errno); } +static __attribute__((unused)) +int setvbuf(FILE *stream, char *buf, int mode, size_t size) +{ + /* + * nolibc does not support buffering so this is a nop. Just check mode + * is valid as required by the spec. + */ + switch (mode) { + case _IOFBF: + case _IOLBF: + case _IONBF: + break; + default: + return EOF; + } + + return 0; +} + /* make sure to include all global symbols */ #include "nolibc.h" diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h index 829be379545a..529d29a35900 100644 --- a/tools/testing/selftests/kselftest.h +++ b/tools/testing/selftests/kselftest.h @@ -113,6 +113,15 @@ static inline int ksft_get_error_cnt(void) { return ksft_cnt.ksft_error; } static inline void ksft_print_header(void) { + /* + * Force line buffering; If stdout is not connected to a terminal, it + * will otherwise default to fully buffered, which can cause output + * duplication if there is content in the buffer when fork()ing. If + * there is a crash, line buffering also means the most recent output + * line will be visible. + */ + setvbuf(stdout, NULL, _IOLBF, 0); + if (!(getenv("KSFT_TAP_LEVEL"))) printf("TAP version 13\n"); } diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh index 1c952d1401d4..261c73cab41b 100644 --- a/tools/testing/selftests/kselftest/runner.sh +++ b/tools/testing/selftests/kselftest/runner.sh @@ -105,15 +105,18 @@ run_one() echo "# Warning: file $TEST is missing!" echo "not ok $test_num $TEST_HDR_MSG" else + if [ -x /usr/bin/stdbuf ]; then + stdbuf="/usr/bin/stdbuf --output=L " + fi eval kselftest_cmd_args="\$${kselftest_cmd_args_ref:-}" - cmd="./$BASENAME_TEST $kselftest_cmd_args" + cmd="$stdbuf ./$BASENAME_TEST $kselftest_cmd_args" if [ ! -x "$TEST" ]; then echo "# Warning: file $TEST is not executable" if [ $(head -n 1 "$TEST" | cut -c -2) = "#!" ] then interpreter=$(head -n 1 "$TEST" | cut -c 3-) - cmd="$interpreter ./$BASENAME_TEST" + cmd="$stdbuf $interpreter ./$BASENAME_TEST" else echo "not ok $test_num $TEST_HDR_MSG" return -- cgit v1.2.3 From f6dd4e223d8798319d0e2815a468b9fb0a276446 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 24 Jul 2023 09:25:16 +0100 Subject: selftests/mm: skip soft-dirty tests on arm64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit arm64 does not support the soft-dirty PTE bit. However, the `soft-dirty` test suite is currently run unconditionally and therefore generates spurious test failures on arm64. There are also some tests in `madv_populate` which assume it is supported. For `soft-dirty` lets disable the whole suite for arm64; it is no longer built and run_vmtests.sh will skip it if its not present. For `madv_populate`, we need a runtime mechanism so that the remaining tests continue to be run. Unfortunately, the only way to determine if the soft-dirty dirty bit is supported is to write to a page, then see if the bit is set in /proc/self/pagemap. But the tests that we want to conditionally execute are testing precicesly this. So if we introduced this feature check, we could accedentally turn a real failure (on a system that claims to support soft-dirty) into a skip. So instead, do the check based on architecture; for arm64, we report that soft-dirty is not supported. Link: https://lkml.kernel.org/r/20230724082522.1202616-3-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Cc: Florent Revest Cc: Jérôme Glisse Cc: John Hubbard Cc: Mark Brown Cc: Peter Xu Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/Makefile | 5 ++++- tools/testing/selftests/mm/madv_populate.c | 26 ++++++++++++++++++++++++-- tools/testing/selftests/mm/run_vmtests.sh | 5 ++++- 3 files changed, 32 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index b7fce9073279..f35977a2bbf3 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -64,12 +64,15 @@ TEST_GEN_PROGS += thuge-gen TEST_GEN_PROGS += transhuge-stress TEST_GEN_PROGS += uffd-stress TEST_GEN_PROGS += uffd-unit-tests -TEST_GEN_PROGS += soft-dirty TEST_GEN_PROGS += split_huge_page_test TEST_GEN_PROGS += ksm_tests TEST_GEN_PROGS += ksm_functional_tests TEST_GEN_PROGS += mdwe_test +ifneq ($(ARCH),arm64) +TEST_GEN_PROGS += soft-dirty +endif + ifeq ($(ARCH),x86_64) CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_32bit_program.c -m32) CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_64bit_program.c) diff --git a/tools/testing/selftests/mm/madv_populate.c b/tools/testing/selftests/mm/madv_populate.c index 60547245e479..17bcb07f19f3 100644 --- a/tools/testing/selftests/mm/madv_populate.c +++ b/tools/testing/selftests/mm/madv_populate.c @@ -264,14 +264,35 @@ static void test_softdirty(void) munmap(addr, SIZE); } +static int system_has_softdirty(void) +{ + /* + * There is no way to check if the kernel supports soft-dirty, other + * than by writing to a page and seeing if the bit was set. But the + * tests are intended to check that the bit gets set when it should, so + * doing that check would turn a potentially legitimate fail into a + * skip. Fortunately, we know for sure that arm64 does not support + * soft-dirty. So for now, let's just use the arch as a corse guide. + */ +#if defined(__aarch64__) + return 0; +#else + return 1; +#endif +} + int main(int argc, char **argv) { + int nr_tests = 16; int err; pagesize = getpagesize(); + if (system_has_softdirty()) + nr_tests += 5; + ksft_print_header(); - ksft_set_plan(21); + ksft_set_plan(nr_tests); sense_support(); test_prot_read(); @@ -279,7 +300,8 @@ int main(int argc, char **argv) test_holes(); test_populate_read(); test_populate_write(); - test_softdirty(); + if (system_has_softdirty()) + test_softdirty(); err = ksft_get_fail_cnt(); if (err) diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 9666c0c171ab..4eaefc3d2e59 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -322,7 +322,10 @@ then CATEGORY="pkey" run_test ./protection_keys_64 fi -CATEGORY="soft_dirty" run_test ./soft-dirty +if [ -x ./soft-dirty ] +then + CATEGORY="soft_dirty" run_test ./soft-dirty +fi # COW tests CATEGORY="cow" run_test ./cow -- cgit v1.2.3 From e515bce98deb06b392ca79b5c342a849372be7ae Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 24 Jul 2023 09:25:17 +0100 Subject: selftests/mm: enable mrelease_test for arm64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit mrelease_test defaults to defining __NR_pidfd_open and __NR_process_mrelease syscall numbers to -1, if they are not defined anywhere else, and the suite would then be marked as skipped as a result. arm64 (at least the stock debian toolchain that I'm using) requires including to pull in the defines for these syscalls. So let's add this header. With this in place, the test is passing on arm64. Link: https://lkml.kernel.org/r/20230724082522.1202616-4-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: David Hildenbrand Cc: Florent Revest Cc: Jérôme Glisse Cc: John Hubbard Cc: Mark Brown Cc: Peter Xu Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/mrelease_test.c | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/testing/selftests/mm/mrelease_test.c b/tools/testing/selftests/mm/mrelease_test.c index dca21042b679..d822004a374e 100644 --- a/tools/testing/selftests/mm/mrelease_test.c +++ b/tools/testing/selftests/mm/mrelease_test.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3 From 6e16f5133501440699dcca3c5aba367cf6f9c227 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 24 Jul 2023 09:25:18 +0100 Subject: selftests/mm: fix thuge-gen test bugs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit thuge-gen was previously only munmapping part of the mmapped buffer, which caused us to run out of 1G huge pages for a later part of the test. Fix this by munmapping the whole buffer. Based on the code, it looks like a typo rather than an intention to keep some of the buffer mapped. thuge-gen was also calling mmap with SHM_HUGETLB flag (bit 11 set), which is actually MAP_DENYWRITE in mmap context. The man page says this flag is ignored in modern kernels. I'm pretty sure from the context that the author intended to pass the MAP_HUGETLB flag so I've fixed that up too. Link: https://lkml.kernel.org/r/20230724082522.1202616-5-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: David Hildenbrand Cc: Florent Revest Cc: Jérôme Glisse Cc: John Hubbard Cc: Mark Brown Cc: Peter Xu Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/thuge-gen.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/thuge-gen.c b/tools/testing/selftests/mm/thuge-gen.c index 380ab5f0a534..16ed4dfa7359 100644 --- a/tools/testing/selftests/mm/thuge-gen.c +++ b/tools/testing/selftests/mm/thuge-gen.c @@ -139,7 +139,7 @@ void test_mmap(unsigned long size, unsigned flags) before, after, before - after, size); assert(size == getpagesize() || (before - after) == NUM_PAGES); show(size); - err = munmap(map, size); + err = munmap(map, size * NUM_PAGES); assert(!err); } @@ -222,7 +222,7 @@ int main(void) test_mmap(ps, MAP_HUGETLB | arg); } printf("Testing default huge mmap\n"); - test_mmap(default_hps, SHM_HUGETLB); + test_mmap(default_hps, MAP_HUGETLB); puts("Testing non-huge shmget"); test_shmget(getpagesize(), 0); -- cgit v1.2.3 From 49f09526b16361db6af387726082ffdda796c101 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 24 Jul 2023 09:25:19 +0100 Subject: selftests/mm: va_high_addr_switch should skip unsupported arm64 configs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit va_high_addr_switch has a mechanism to determine if the tests should be run or skipped (supported_arch()). This currently returns unconditionally true for arm64. However, va_high_addr_switch also requires a large virtual address space for the tests to run, otherwise they spuriously fail. Since arm64 can only support VA > 48 bits when the page size is 64K, let's decide whether we should skip the test suite based on the page size. This reduces noise when running on 4K and 16K kernels. Link: https://lkml.kernel.org/r/20230724082522.1202616-6-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: David Hildenbrand Cc: Florent Revest Cc: Jérôme Glisse Cc: John Hubbard Cc: Mark Brown Cc: Peter Xu Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/va_high_addr_switch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/va_high_addr_switch.c b/tools/testing/selftests/mm/va_high_addr_switch.c index 7cfaf4a74c57..cfbc501290d3 100644 --- a/tools/testing/selftests/mm/va_high_addr_switch.c +++ b/tools/testing/selftests/mm/va_high_addr_switch.c @@ -292,7 +292,7 @@ static int supported_arch(void) #elif defined(__x86_64__) return 1; #elif defined(__aarch64__) - return 1; + return getpagesize() == PAGE_SIZE; #else return 0; #endif -- cgit v1.2.3 From 000303329752ffff08a6c120d21b8dc382bc9575 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 24 Jul 2023 09:25:20 +0100 Subject: selftests/mm: make migration test robust to failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The `migration` test currently has a number of robustness problems that cause it to hang and leak resources. Timeout: There are 3 tests, which each previously ran for 60 seconds. However, the timeout in mm/settings for a single test binary was set to 45 seconds. So when run using run_kselftest.sh, the top level timeout would trigger before the test binary was finished. Solve this by meeting in the middle; each of the 3 tests now runs for 20 seconds (for a total of 60), and the top level timeout is set to 90 seconds. Leaking child processes: the `shared_anon` test fork()s some children but then an ASSERT() fires before the test kills those children. The assert causes immediate exit of the parent and leaking of the children. Furthermore, if run using the run_kselftest.sh wrapper, the wrapper would get stuck waiting for those children to exit, which never happens. Solve this by setting the "parent death signal" to SIGHUP in the child, so that the child is killed automatically if the parent dies. With these changes, the test binary now runs to completion on arm64, with 2 tests passing and the `shared_anon` test failing. Link: https://lkml.kernel.org/r/20230724082522.1202616-7-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: David Hildenbrand Cc: Florent Revest Cc: Jérôme Glisse Cc: John Hubbard Cc: Mark Brown Cc: Peter Xu Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/migration.c | 12 +++++++++--- tools/testing/selftests/mm/settings | 2 +- 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/migration.c b/tools/testing/selftests/mm/migration.c index 379581567f27..6908569ef406 100644 --- a/tools/testing/selftests/mm/migration.c +++ b/tools/testing/selftests/mm/migration.c @@ -10,12 +10,13 @@ #include #include #include +#include #include #include #include #define TWOMEG (2<<20) -#define RUNTIME (60) +#define RUNTIME (20) #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1))) @@ -155,10 +156,15 @@ TEST_F_TIMEOUT(migration, shared_anon, 2*RUNTIME) memset(ptr, 0xde, TWOMEG); for (i = 0; i < self->nthreads - 1; i++) { pid = fork(); - if (!pid) + if (!pid) { + prctl(PR_SET_PDEATHSIG, SIGHUP); + /* Parent may have died before prctl so check now. */ + if (getppid() == 1) + kill(getpid(), SIGHUP); access_mem(ptr); - else + } else { self->pids[i] = pid; + } } ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0); diff --git a/tools/testing/selftests/mm/settings b/tools/testing/selftests/mm/settings index 9abfc60e9e6f..ba4d85f74cd6 100644 --- a/tools/testing/selftests/mm/settings +++ b/tools/testing/selftests/mm/settings @@ -1 +1 @@ -timeout=45 +timeout=90 -- cgit v1.2.3 From e170621027161a8cd00132a55fe92b41e37a9203 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 24 Jul 2023 09:25:21 +0100 Subject: selftests/mm: optionally pass duration to transhuge-stress MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Until now, transhuge-stress runs until its explicitly killed, so when invoked by run_kselftest.sh, it would run until the test timeout, then it would be killed and the test would be marked as failed. Add a new, optional command line parameter that allows the user to specify the duration in seconds that the program should run. The program exits after this duration with a success (0) exit code. If the argument is omitted the old behacvior remains. On it's own, this doesn't quite solve our problem because run_kselftest.sh does not allow passing parameters to the program under test. But we will shortly move this to run_vmtests.sh, which does allow parameter passing. Link: https://lkml.kernel.org/r/20230724082522.1202616-8-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Cc: Florent Revest Cc: Jérôme Glisse Cc: John Hubbard Cc: Mark Brown Cc: Peter Xu Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/transhuge-stress.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/transhuge-stress.c b/tools/testing/selftests/mm/transhuge-stress.c index ba9d37ad3a89..c61fb9350b8c 100644 --- a/tools/testing/selftests/mm/transhuge-stress.c +++ b/tools/testing/selftests/mm/transhuge-stress.c @@ -25,13 +25,14 @@ int main(int argc, char **argv) { size_t ram, len; void *ptr, *p; - struct timespec a, b; + struct timespec start, a, b; int i = 0; char *name = NULL; double s; uint8_t *map; size_t map_len; int pagemap_fd; + int duration = 0; ram = sysconf(_SC_PHYS_PAGES); if (ram > SIZE_MAX / psize() / 4) @@ -42,9 +43,11 @@ int main(int argc, char **argv) while (++i < argc) { if (!strcmp(argv[i], "-h")) - errx(1, "usage: %s [size in MiB]", argv[0]); + errx(1, "usage: %s [-f ] [-d ] [size in MiB]", argv[0]); else if (!strcmp(argv[i], "-f")) name = argv[++i]; + else if (!strcmp(argv[i], "-d")) + duration = atoi(argv[++i]); else len = atoll(argv[i]) << 20; } @@ -78,6 +81,8 @@ int main(int argc, char **argv) if (!map) errx(2, "map malloc"); + clock_gettime(CLOCK_MONOTONIC, &start); + while (1) { int nr_succeed = 0, nr_failed = 0, nr_pages = 0; @@ -118,5 +123,8 @@ int main(int argc, char **argv) "%4d succeed, %4d failed, %4d different pages", s, s * 1000 / (len >> HPAGE_SHIFT), len / s / (1 << 20), nr_succeed, nr_failed, nr_pages); + + if (duration > 0 && b.tv_sec - start.tv_sec >= duration) + return 0; } } -- cgit v1.2.3 From 05f1edac80095634638a2ba8ae8d0f3d2b3e653b Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 24 Jul 2023 09:25:22 +0100 Subject: selftests/mm: run all tests from run_vmtests.sh MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is very unclear to me how one is supposed to run all the mm selftests consistently and get clear results. Most of the test programs are launched by both run_vmtests.sh and run_kselftest.sh: hugepage-mmap hugepage-shm map_hugetlb hugepage-mremap hugepage-vmemmap hugetlb-madvise map_fixed_noreplace gup_test gup_longterm uffd-unit-tests uffd-stress compaction_test on-fault-limit map_populate mlock-random-test mlock2-tests mrelease_test mremap_test thuge-gen virtual_address_range va_high_addr_switch mremap_dontunmap hmm-tests madv_populate memfd_secret ksm_tests ksm_functional_tests soft-dirty cow However, of this set, when launched by run_vmtests.sh, some of the programs are invoked multiple times with different arguments. When invoked by run_kselftest.sh, they are invoked without arguments (and as a consequence, some fail immediately). Some test programs are only launched by run_vmtests.sh: test_vmalloc.sh And some test programs and only launched by run_kselftest.sh: khugepaged migration mkdirty transhuge-stress split_huge_page_test mdwe_test write_to_hugetlbfs Furthermore, run_vmtests.sh is invoked by run_kselftest.sh, so in this case all the test programs invoked by both scripts are run twice! Needless to say, this is a bit of a mess. In the absence of fully understanding the history here, it looks to me like the best solution is to launch ALL test programs from run_vmtests.sh, and ONLY invoke run_vmtests.sh from run_kselftest.sh. This way, we get full control over the parameters, each program is only invoked the intended number of times, and regardless of which script is used, the same tests get run in the same way. The only drawback is that if using run_kselftest.sh, it's top-level tap result reporting reports only a single test and it fails if any of the contained tests fail. I don't see this as a big deal though since we still see all the nested reporting from multiple layers. The other issue with this is that all of run_vmtests.sh must execute within a single kselftest timeout period, so let's increase that to something more suitable. In the Makefile, TEST_GEN_PROGS will compile and install the tests and will add them to the list of tests that run_kselftest.sh will run. TEST_GEN_FILES will compile and install the tests but will not add them to the test list. So let's move all the programs from TEST_GEN_PROGS to TEST_GEN_FILES so that they are built but not executed by run_kselftest.sh. Note that run_vmtests.sh is added to TEST_PROGS, which means it ends up in the test list. (the lack of "_GEN" means it won't be compiled, but simply copied). Link: https://lkml.kernel.org/r/20230724082522.1202616-9-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Acked-by: Peter Xu Cc: Florent Revest Cc: Jérôme Glisse Cc: John Hubbard Cc: Mark Brown Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/Makefile | 79 ++++++++++++++++--------------- tools/testing/selftests/mm/run_vmtests.sh | 23 +++++++++ tools/testing/selftests/mm/settings | 2 +- 3 files changed, 64 insertions(+), 40 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index f35977a2bbf3..6a9fc5693145 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -35,39 +35,39 @@ MAKEFLAGS += --no-builtin-rules CFLAGS = -Wall -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES) LDLIBS = -lrt -lpthread -TEST_GEN_PROGS = cow -TEST_GEN_PROGS += compaction_test -TEST_GEN_PROGS += gup_longterm -TEST_GEN_PROGS += gup_test -TEST_GEN_PROGS += hmm-tests -TEST_GEN_PROGS += hugetlb-madvise -TEST_GEN_PROGS += hugetlb-read-hwpoison -TEST_GEN_PROGS += hugepage-mmap -TEST_GEN_PROGS += hugepage-mremap -TEST_GEN_PROGS += hugepage-shm -TEST_GEN_PROGS += hugepage-vmemmap -TEST_GEN_PROGS += khugepaged -TEST_GEN_PROGS += madv_populate -TEST_GEN_PROGS += map_fixed_noreplace -TEST_GEN_PROGS += map_hugetlb -TEST_GEN_PROGS += map_populate -TEST_GEN_PROGS += memfd_secret -TEST_GEN_PROGS += migration -TEST_GEN_PROGS += mkdirty -TEST_GEN_PROGS += mlock-random-test -TEST_GEN_PROGS += mlock2-tests -TEST_GEN_PROGS += mrelease_test -TEST_GEN_PROGS += mremap_dontunmap -TEST_GEN_PROGS += mremap_test -TEST_GEN_PROGS += on-fault-limit -TEST_GEN_PROGS += thuge-gen -TEST_GEN_PROGS += transhuge-stress -TEST_GEN_PROGS += uffd-stress -TEST_GEN_PROGS += uffd-unit-tests -TEST_GEN_PROGS += split_huge_page_test -TEST_GEN_PROGS += ksm_tests -TEST_GEN_PROGS += ksm_functional_tests -TEST_GEN_PROGS += mdwe_test +TEST_GEN_FILES = cow +TEST_GEN_FILES += compaction_test +TEST_GEN_FILES += gup_longterm +TEST_GEN_FILES += gup_test +TEST_GEN_FILES += hmm-tests +TEST_GEN_FILES += hugetlb-madvise +TEST_GEN_FILES += hugetlb-read-hwpoison +TEST_GEN_FILES += hugepage-mmap +TEST_GEN_FILES += hugepage-mremap +TEST_GEN_FILES += hugepage-shm +TEST_GEN_FILES += hugepage-vmemmap +TEST_GEN_FILES += khugepaged +TEST_GEN_FILES += madv_populate +TEST_GEN_FILES += map_fixed_noreplace +TEST_GEN_FILES += map_hugetlb +TEST_GEN_FILES += map_populate +TEST_GEN_FILES += memfd_secret +TEST_GEN_FILES += migration +TEST_GEN_FILES += mkdirty +TEST_GEN_FILES += mlock-random-test +TEST_GEN_FILES += mlock2-tests +TEST_GEN_FILES += mrelease_test +TEST_GEN_FILES += mremap_dontunmap +TEST_GEN_FILES += mremap_test +TEST_GEN_FILES += on-fault-limit +TEST_GEN_FILES += thuge-gen +TEST_GEN_FILES += transhuge-stress +TEST_GEN_FILES += uffd-stress +TEST_GEN_FILES += uffd-unit-tests +TEST_GEN_FILES += split_huge_page_test +TEST_GEN_FILES += ksm_tests +TEST_GEN_FILES += ksm_functional_tests +TEST_GEN_FILES += mdwe_test ifneq ($(ARCH),arm64) TEST_GEN_PROGS += soft-dirty @@ -87,24 +87,24 @@ CFLAGS += -no-pie endif ifeq ($(CAN_BUILD_I386),1) -TEST_GEN_PROGS += $(BINARIES_32) +TEST_GEN_FILES += $(BINARIES_32) endif ifeq ($(CAN_BUILD_X86_64),1) -TEST_GEN_PROGS += $(BINARIES_64) +TEST_GEN_FILES += $(BINARIES_64) endif else ifneq (,$(findstring $(ARCH),ppc64)) -TEST_GEN_PROGS += protection_keys +TEST_GEN_FILES += protection_keys endif endif ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 ppc64 riscv64 s390x sparc64 x86_64)) -TEST_GEN_PROGS += va_high_addr_switch -TEST_GEN_PROGS += virtual_address_range -TEST_GEN_PROGS += write_to_hugetlbfs +TEST_GEN_FILES += va_high_addr_switch +TEST_GEN_FILES += virtual_address_range +TEST_GEN_FILES += write_to_hugetlbfs endif TEST_PROGS := run_vmtests.sh @@ -116,6 +116,7 @@ TEST_FILES += va_high_addr_switch.sh include ../lib.mk $(TEST_GEN_PROGS): vm_util.c +$(TEST_GEN_FILES): vm_util.c $(OUTPUT)/uffd-stress: uffd-common.c $(OUTPUT)/uffd-unit-tests: uffd-common.c diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 4eaefc3d2e59..6de90c0adf92 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -58,6 +58,17 @@ separated by spaces: test soft dirty page bit semantics - cow test copy-on-write semantics +- thp + test transparent huge pages +- migration + invoke move_pages(2) to exercise the migration entry code + paths in the kernel +- mkdirty + test handling of code that might set PTE/PMD dirty in + read-only VMAs +- mdwe + test prctl(PR_SET_MDWE, ...) + example: ./run_vmtests.sh -t "hmm mmap ksm" EOF exit 0 @@ -330,6 +341,18 @@ fi # COW tests CATEGORY="cow" run_test ./cow +CATEGORY="thp" run_test ./khugepaged + +CATEGORY="thp" run_test ./transhuge-stress -d 20 + +CATEGORY="thp" run_test ./split_huge_page_test + +CATEGORY="migration" run_test ./migration + +CATEGORY="mkdirty" run_test ./mkdirty + +CATEGORY="mdwe" run_test ./mdwe_test + echo "SUMMARY: PASS=${count_pass} SKIP=${count_skip} FAIL=${count_fail}" exit $exitcode diff --git a/tools/testing/selftests/mm/settings b/tools/testing/selftests/mm/settings index ba4d85f74cd6..a953c96aa16e 100644 --- a/tools/testing/selftests/mm/settings +++ b/tools/testing/selftests/mm/settings @@ -1 +1 @@ -timeout=90 +timeout=180 -- cgit v1.2.3 From da0892547b101df6e13255b378380d077975368d Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Mon, 24 Jul 2023 14:31:49 -0400 Subject: maple_tree: re-introduce entry to mas_preallocate() arguments The current preallocation strategy is to preallocate the absolute worst-case allocation for a tree modification. The entry (or NULL) is needed to know how many nodes are needed to write to the tree. Start by adding the argument to the mas_preallocate() definition. Link: https://lkml.kernel.org/r/20230724183157.3939892-8-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Peng Zhang Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- tools/testing/radix-tree/maple.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'tools') diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c index 9901ae821911..c6c1c5109deb 100644 --- a/tools/testing/radix-tree/maple.c +++ b/tools/testing/radix-tree/maple.c @@ -35458,7 +35458,7 @@ static noinline void __init check_prealloc(struct maple_tree *mt) for (i = 0; i <= max; i++) mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); MT_BUG_ON(mt, allocated == 0); @@ -35467,18 +35467,18 @@ static noinline void __init check_prealloc(struct maple_tree *mt) allocated = mas_allocated(&mas); MT_BUG_ON(mt, allocated != 0); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); MT_BUG_ON(mt, allocated == 0); MT_BUG_ON(mt, allocated != 1 + height * 3); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); mas_destroy(&mas); allocated = mas_allocated(&mas); MT_BUG_ON(mt, allocated != 0); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); MT_BUG_ON(mt, allocated == 0); @@ -35487,26 +35487,26 @@ static noinline void __init check_prealloc(struct maple_tree *mt) MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); mn->parent = ma_parent_ptr(mn); ma_free_rcu(mn); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); mas_destroy(&mas); allocated = mas_allocated(&mas); MT_BUG_ON(mt, allocated != 0); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); MT_BUG_ON(mt, allocated == 0); MT_BUG_ON(mt, allocated != 1 + height * 3); mn = mas_pop_node(&mas); MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); mas_destroy(&mas); allocated = mas_allocated(&mas); MT_BUG_ON(mt, allocated != 0); mn->parent = ma_parent_ptr(mn); ma_free_rcu(mn); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); MT_BUG_ON(mt, allocated == 0); @@ -35515,12 +35515,12 @@ static noinline void __init check_prealloc(struct maple_tree *mt) MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); mas_push_node(&mas, mn); MT_BUG_ON(mt, mas_allocated(&mas) != allocated); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); mas_destroy(&mas); allocated = mas_allocated(&mas); MT_BUG_ON(mt, allocated != 0); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); MT_BUG_ON(mt, allocated == 0); @@ -35528,21 +35528,21 @@ static noinline void __init check_prealloc(struct maple_tree *mt) mas_store_prealloc(&mas, ptr); MT_BUG_ON(mt, mas_allocated(&mas) != 0); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); MT_BUG_ON(mt, allocated == 0); MT_BUG_ON(mt, allocated != 1 + height * 3); mas_store_prealloc(&mas, ptr); MT_BUG_ON(mt, mas_allocated(&mas) != 0); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); MT_BUG_ON(mt, allocated == 0); MT_BUG_ON(mt, allocated != 1 + height * 3); mas_store_prealloc(&mas, ptr); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); MT_BUG_ON(mt, allocated == 0); @@ -35550,14 +35550,14 @@ static noinline void __init check_prealloc(struct maple_tree *mt) mas_store_prealloc(&mas, ptr); MT_BUG_ON(mt, mas_allocated(&mas) != 0); mt_set_non_kernel(1); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL & GFP_NOWAIT) == 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); MT_BUG_ON(mt, allocated != 0); mas_destroy(&mas); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); MT_BUG_ON(mt, allocated == 0); @@ -35565,7 +35565,7 @@ static noinline void __init check_prealloc(struct maple_tree *mt) mas_store_prealloc(&mas, ptr); MT_BUG_ON(mt, mas_allocated(&mas) != 0); mt_set_non_kernel(1); - MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL & GFP_NOWAIT) == 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); MT_BUG_ON(mt, allocated != 0); -- cgit v1.2.3 From 0b8bb544b1a7051c9072c26ca03a7840fb9573ad Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Mon, 24 Jul 2023 14:31:54 -0400 Subject: maple_tree: update mas_preallocate() testing Since the mas_preallocate() calculation has been updated to be more precise, the testing must also be updated to check for what is expected. Link: https://lkml.kernel.org/r/20230724183157.3939892-13-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Peng Zhang Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- tools/testing/radix-tree/maple.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) (limited to 'tools') diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c index c6c1c5109deb..e5da1cad70ba 100644 --- a/tools/testing/radix-tree/maple.c +++ b/tools/testing/radix-tree/maple.c @@ -35458,6 +35458,8 @@ static noinline void __init check_prealloc(struct maple_tree *mt) for (i = 0; i <= max; i++) mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + /* Spanning store */ + mas_set_range(&mas, 470, 500); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); @@ -35481,7 +35483,6 @@ static noinline void __init check_prealloc(struct maple_tree *mt) MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); MT_BUG_ON(mt, allocated != 1 + height * 3); mn = mas_pop_node(&mas); MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); @@ -35495,7 +35496,6 @@ static noinline void __init check_prealloc(struct maple_tree *mt) MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); MT_BUG_ON(mt, allocated != 1 + height * 3); mn = mas_pop_node(&mas); MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); @@ -35509,7 +35509,6 @@ static noinline void __init check_prealloc(struct maple_tree *mt) MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); MT_BUG_ON(mt, allocated != 1 + height * 3); mn = mas_pop_node(&mas); MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); @@ -35523,33 +35522,37 @@ static noinline void __init check_prealloc(struct maple_tree *mt) MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); MT_BUG_ON(mt, allocated != 1 + height * 3); mas_store_prealloc(&mas, ptr); MT_BUG_ON(mt, mas_allocated(&mas) != 0); + /* Slot store does not need allocations */ + mas_set_range(&mas, 6, 9); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); - height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); + MT_BUG_ON(mt, allocated != 0); mas_store_prealloc(&mas, ptr); MT_BUG_ON(mt, mas_allocated(&mas) != 0); + + mas_set_range(&mas, 6, 10); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); + MT_BUG_ON(mt, allocated != 1); mas_store_prealloc(&mas, ptr); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + /* Split */ + mas_set_range(&mas, 54, 54); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); + MT_BUG_ON(mt, allocated != 1 + height * 2); mas_store_prealloc(&mas, ptr); MT_BUG_ON(mt, mas_allocated(&mas) != 0); mt_set_non_kernel(1); + /* Spanning store */ + mas_set_range(&mas, 1, 100); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); @@ -35557,6 +35560,7 @@ static noinline void __init check_prealloc(struct maple_tree *mt) mas_destroy(&mas); + /* Spanning store */ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); @@ -35564,6 +35568,7 @@ static noinline void __init check_prealloc(struct maple_tree *mt) MT_BUG_ON(mt, allocated != 1 + height * 3); mas_store_prealloc(&mas, ptr); MT_BUG_ON(mt, mas_allocated(&mas) != 0); + mas_set_range(&mas, 0, 200); mt_set_non_kernel(1); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0); allocated = mas_allocated(&mas); -- cgit v1.2.3 From edb72f4e4fc2a41bb2dc313ac377d16b78d61dea Mon Sep 17 00:00:00 2001 From: Ayush Jain Date: Fri, 28 Jul 2023 22:11:02 +0530 Subject: selftests: mm: add KSM_MERGE_TIME tests Add KSM_MERGE_TIME and KSM_MERGE_TIME_HUGE_PAGES tests with size of 100. ./run_vmtests.sh -t ksm ----------------------------- running ./ksm_tests -H -s 100 ----------------------------- Number of normal pages: 0 Number of huge pages: 50 Total size: 100 MiB Total time: 0.399844662 s Average speed: 250.097 MiB/s [PASS] ----------------------------- running ./ksm_tests -P -s 100 ----------------------------- Total size: 100 MiB Total time: 0.451931496 s Average speed: 221.272 MiB/s [PASS] Link: https://lkml.kernel.org/r/20230728164102.4655-1-ayush.jain3@amd.com Signed-off-by: Ayush Jain Reviewed-by: David Hildenbrand Cc: Stefan Roesch Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/run_vmtests.sh | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 6de90c0adf92..3e2bc818d566 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -305,6 +305,10 @@ CATEGORY="madv_populate" run_test ./madv_populate CATEGORY="memfd_secret" run_test ./memfd_secret +# KSM KSM_MERGE_TIME_HUGE_PAGES test with size of 100 +CATEGORY="ksm" run_test ./ksm_tests -H -s 100 +# KSM KSM_MERGE_TIME test with size of 100 +CATEGORY="ksm" run_test ./ksm_tests -P -s 100 # KSM MADV_MERGEABLE test with 10 identical pages CATEGORY="ksm" run_test ./ksm_tests -M -p 10 # KSM unmerge test -- cgit v1.2.3 From b823cb08e66212f5d90b5eaad59b2371b930e33f Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 2 Aug 2023 21:32:19 +0000 Subject: selftests/damon/sysfs: test tried_regions/total_bytes file Update sysfs.sh DAMON selftest for checking existence of 'total_bytes' file under the 'tried_regions' directory of DAMON sysfs interface. Link: https://lkml.kernel.org/r/20230802213222.109841-4-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/damon/sysfs.sh | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/testing/selftests/damon/sysfs.sh b/tools/testing/selftests/damon/sysfs.sh index bcd4734ca094..967e27267549 100644 --- a/tools/testing/selftests/damon/sysfs.sh +++ b/tools/testing/selftests/damon/sysfs.sh @@ -84,6 +84,7 @@ test_tried_regions() { tried_regions_dir=$1 ensure_dir "$tried_regions_dir" "exist" + ensure_file "$tried_regions_dir/total_bytes" "exist" "400" } test_stats() -- cgit v1.2.3 From 4c45c20d53488d05da6e240f3ac4dffbd3963f94 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 2 Aug 2023 21:43:03 +0000 Subject: selftests/damon/sysfs: test address range damos filter Add a selftest for checking existence of addr_{start,end} files under DAMOS filter directory, and 'addr' damos filter type input of DAMON sysfs interface. Link: https://lkml.kernel.org/r/20230802214312.110532-5-sj@kernel.org Signed-off-by: SeongJae Park Cc: Brendan Higgins Cc: Jonathan Corbet Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/damon/sysfs.sh | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/damon/sysfs.sh b/tools/testing/selftests/damon/sysfs.sh index 967e27267549..5677cfd342fc 100644 --- a/tools/testing/selftests/damon/sysfs.sh +++ b/tools/testing/selftests/damon/sysfs.sh @@ -103,9 +103,12 @@ test_filter() ensure_file "$filter_dir/type" "exist" "600" ensure_write_succ "$filter_dir/type" "anon" "valid input" ensure_write_succ "$filter_dir/type" "memcg" "valid input" + ensure_write_succ "$filter_dir/type" "addr" "valid input" ensure_write_fail "$filter_dir/type" "foo" "invalid input" ensure_file "$filter_dir/matching" "exist" "600" ensure_file "$filter_dir/memcg_path" "exist" "600" + ensure_file "$filter_dir/addr_start" "exist" "600" + ensure_file "$filter_dir/addr_end" "exist" "600" } test_filters() -- cgit v1.2.3 From 9628ace840614abcdd99abc0b313edc4c6d0b1ad Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 2 Aug 2023 21:43:09 +0000 Subject: selftests/damon/sysfs: test damon_target filter Test existence of files and validity of input keyword for DAMON monitoring target based DAMOS filter on DAMON sysfs interface. Link: https://lkml.kernel.org/r/20230802214312.110532-11-sj@kernel.org Signed-off-by: SeongJae Park Cc: Brendan Higgins Cc: Jonathan Corbet Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/damon/sysfs.sh | 2 ++ 1 file changed, 2 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/damon/sysfs.sh b/tools/testing/selftests/damon/sysfs.sh index 5677cfd342fc..60a9a305aef0 100644 --- a/tools/testing/selftests/damon/sysfs.sh +++ b/tools/testing/selftests/damon/sysfs.sh @@ -104,11 +104,13 @@ test_filter() ensure_write_succ "$filter_dir/type" "anon" "valid input" ensure_write_succ "$filter_dir/type" "memcg" "valid input" ensure_write_succ "$filter_dir/type" "addr" "valid input" + ensure_write_succ "$filter_dir/type" "target" "valid input" ensure_write_fail "$filter_dir/type" "foo" "invalid input" ensure_file "$filter_dir/matching" "exist" "600" ensure_file "$filter_dir/memcg_path" "exist" "600" ensure_file "$filter_dir/addr_start" "exist" "600" ensure_file "$filter_dir/addr_end" "exist" "600" + ensure_file "$filter_dir/damon_target_idx" "exist" "600" } test_filters() -- cgit v1.2.3 From 708879a1b44216f6c12a3d61328c5259078fc1b1 Mon Sep 17 00:00:00 2001 From: Rong Tao Date: Mon, 14 Aug 2023 18:45:50 +0800 Subject: selftests/mm: fix uffd-stress help information commit 686a8bb72349("selftests/mm: split uffd tests into uffd-stress and uffd-unit-tests") split uffd tests into uffd-stress and uffd-unit-tests, obviously we need to modify the help information synchronously. Also modify code indentation. Link: https://lkml.kernel.org/r/tencent_64FC724AC5F05568F41BD1C68058E83CEB05@qq.com Signed-off-by: Rong Tao Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-stress.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c index 73ebb97c7026..469e0476af26 100644 --- a/tools/testing/selftests/mm/uffd-stress.c +++ b/tools/testing/selftests/mm/uffd-stress.c @@ -53,21 +53,21 @@ pthread_attr_t attr; do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) const char *examples = - "# Run anonymous memory test on 100MiB region with 99999 bounces:\n" - "./userfaultfd anon 100 99999\n\n" - "# Run share memory test on 1GiB region with 99 bounces:\n" - "./userfaultfd shmem 1000 99\n\n" - "# Run hugetlb memory test on 256MiB region with 50 bounces:\n" - "./userfaultfd hugetlb 256 50\n\n" - "# Run the same hugetlb test but using private file:\n" - "./userfaultfd hugetlb-private 256 50\n\n" - "# 10MiB-~6GiB 999 bounces anonymous test, " - "continue forever unless an error triggers\n" - "while ./userfaultfd anon $[RANDOM % 6000 + 10] 999; do true; done\n\n"; + "# Run anonymous memory test on 100MiB region with 99999 bounces:\n" + "./uffd-stress anon 100 99999\n\n" + "# Run share memory test on 1GiB region with 99 bounces:\n" + "./uffd-stress shmem 1000 99\n\n" + "# Run hugetlb memory test on 256MiB region with 50 bounces:\n" + "./uffd-stress hugetlb 256 50\n\n" + "# Run the same hugetlb test but using private file:\n" + "./uffd-stress hugetlb-private 256 50\n\n" + "# 10MiB-~6GiB 999 bounces anonymous test, " + "continue forever unless an error triggers\n" + "while ./uffd-stress anon $[RANDOM % 6000 + 10] 999; do true; done\n\n"; static void usage(void) { - fprintf(stderr, "\nUsage: ./userfaultfd \n\n"); + fprintf(stderr, "\nUsage: ./uffd-stress \n\n"); fprintf(stderr, "Supported : anon, hugetlb, " "hugetlb-private, shmem, shmem-private\n\n"); fprintf(stderr, "Examples:\n\n"); -- cgit v1.2.3 From 99f34659e78b9b781a3248e0b080b4dfca4957e2 Mon Sep 17 00:00:00 2001 From: Aleksa Sarai Date: Mon, 14 Aug 2023 18:40:57 +1000 Subject: selftests: memfd: error out test process when child test fails Patch series "memfd: cleanups for vm.memfd_noexec", v2. The most critical issue with vm.memfd_noexec=2 (the fact that passing MFD_EXEC would bypass it entirely[1]) has been fixed in Andrew's tree[2], but there are still some outstanding issues that need to be addressed: * vm.memfd_noexec=2 shouldn't reject old-style memfd_create(2) syscalls because it will make it far to difficult to ever migrate. Instead it should imply MFD_EXEC. * The dmesg warnings are pr_warn_once(), which on most systems means that they will be used up by systemd or some other boot process and userspace developers will never see it. - For the !(flags & (MFD_EXEC | MFD_NOEXEC_SEAL)) case, outputting a rate-limited message to the kernel log is necessary to tell userspace that they should add the new flags. Arguably the most ideal way to deal with the spam concern[3,4] while still prompting userspace to switch to the new flags would be to only log the warning once per task or something similar. However, adding something to task_struct for tracking this would be needless bloat for a single pr_warn_ratelimited(). So just switch to pr_info_ratelimited() to avoid spamming the log with something that isn't a real warning. There's lots of info-level stuff in dmesg, it seems really unlikely that this should be an actual problem. Most programs are already switching to the new flags anyway. - For the vm.memfd_noexec=2 case, we need to log a warning for every failure because otherwise userspace will have no idea why their previously working program started returning -EACCES (previously -EINVAL) from memfd_create(2). pr_warn_once() is simply wrong here. * The racheting mechanism for vm.memfd_noexec makes it incredibly unappealing for most users to enable the sysctl because enabling it on &init_pid_ns means you need a system reboot to unset it. Given the actual security threat being protected against, CAP_SYS_ADMIN users being restricted in this way makes little sense. The argument for this ratcheting by the original author was that it allows you to have a hierarchical setting that cannot be unset by child pidnses, but this is not accurate -- changing the parent pidns's vm.memfd_noexec setting to be more restrictive didn't affect children. Instead, switch the vm.memfd_noexec sysctl to be properly hierarchical and allow CAP_SYS_ADMIN users (in the pidns's owning userns) to lower the setting as long as it is not lower than the parent's effective setting. This change also makes it so that changing a parent pidns's vm.memfd_noexec will affect all descendants, providing a properly hierarchical setting. The performance impact of this is incredibly minimal since the maximum depth of pidns is 32 and it is only checked during memfd_create(2) and unshare(CLONE_NEWPID). * The memfd selftests would not exit with a non-zero error code when certain tests that ran in a forked process (specifically the ones related to MFD_EXEC and MFD_NOEXEC_SEAL) failed. [1]: https://lore.kernel.org/all/ZJwcsU0vI-nzgOB_@codewreck.org/ [2]: https://lore.kernel.org/all/20230705063315.3680666-1-jeffxu@google.com/ [3]: https://lore.kernel.org/Y5yS8wCnuYGLHMj4@x1n/ [4]: https://lore.kernel.org/f185bb42-b29c-977e-312e-3349eea15383@linuxfoundation.org/ This patch (of 5): Before this change, a test runner using this self test would see a return code of 0 when the tests using a child process (namely the MFD_NOEXEC_SEAL and MFD_EXEC tests) failed, masking test failures. Link: https://lkml.kernel.org/r/20230814-memfd-vm-noexec-uapi-fixes-v2-0-7ff9e3e10ba6@cyphar.com Link: https://lkml.kernel.org/r/20230814-memfd-vm-noexec-uapi-fixes-v2-1-7ff9e3e10ba6@cyphar.com Fixes: 11f75a01448f ("selftests/memfd: add tests for MFD_NOEXEC_SEAL MFD_EXEC") Signed-off-by: Aleksa Sarai Reviewed-by: Jeff Xu Cc: "Christian Brauner (Microsoft)" Cc: Daniel Verkamp Cc: Dominique Martinet Cc: Kees Cook Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/memfd/memfd_test.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c index dbdd9ec5e397..8eb49204f9ea 100644 --- a/tools/testing/selftests/memfd/memfd_test.c +++ b/tools/testing/selftests/memfd/memfd_test.c @@ -1207,7 +1207,24 @@ static pid_t spawn_newpid_thread(unsigned int flags, int (*fn)(void *)) static void join_newpid_thread(pid_t pid) { - waitpid(pid, NULL, 0); + int wstatus; + + if (waitpid(pid, &wstatus, 0) < 0) { + printf("newpid thread: waitpid() failed: %m\n"); + abort(); + } + + if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) != 0) { + printf("newpid thread: exited with non-zero error code %d\n", + WEXITSTATUS(wstatus)); + abort(); + } + + if (WIFSIGNALED(wstatus)) { + printf("newpid thread: killed by signal %d\n", + WTERMSIG(wstatus)); + abort(); + } } /* -- cgit v1.2.3 From 202e14222fadb246dfdf182e67de1518e86a1e20 Mon Sep 17 00:00:00 2001 From: Aleksa Sarai Date: Mon, 14 Aug 2023 18:40:58 +1000 Subject: memfd: do not -EACCES old memfd_create() users with vm.memfd_noexec=2 Given the difficulty of auditing all of userspace to figure out whether every memfd_create() user has switched to passing MFD_EXEC and MFD_NOEXEC_SEAL flags, it seems far less distruptive to make it possible for older programs that don't make use of executable memfds to run under vm.memfd_noexec=2. Otherwise, a small dependency change can result in spurious errors. For programs that don't use executable memfds, passing MFD_NOEXEC_SEAL is functionally a no-op and thus having the same In addition, every failure under vm.memfd_noexec=2 needs to print to the kernel log so that userspace can figure out where the error came from. The concerns about pr_warn_ratelimited() spam that caused the switch to pr_warn_once()[1,2] do not apply to the vm.memfd_noexec=2 case. This is a user-visible API change, but as it allows programs to do something that would be blocked before, and the sysctl itself was broken and recently released, it seems unlikely this will cause any issues. [1]: https://lore.kernel.org/Y5yS8wCnuYGLHMj4@x1n/ [2]: https://lore.kernel.org/202212161233.85C9783FB@keescook/ Link: https://lkml.kernel.org/r/20230814-memfd-vm-noexec-uapi-fixes-v2-2-7ff9e3e10ba6@cyphar.com Fixes: 105ff5339f49 ("mm/memfd: add MFD_NOEXEC_SEAL and MFD_EXEC") Signed-off-by: Aleksa Sarai Cc: Dominique Martinet Cc: Christian Brauner Cc: Daniel Verkamp Cc: Jeff Xu Cc: Kees Cook Cc: Shuah Khan Cc: Signed-off-by: Andrew Morton --- tools/testing/selftests/memfd/memfd_test.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c index 8eb49204f9ea..8b7390ad81d1 100644 --- a/tools/testing/selftests/memfd/memfd_test.c +++ b/tools/testing/selftests/memfd/memfd_test.c @@ -1145,11 +1145,23 @@ static void test_sysctl_child(void) printf("%s sysctl 2\n", memfd_str); sysctl_assert_write("2"); - mfd_fail_new("kern_memfd_sysctl_2", - MFD_CLOEXEC | MFD_ALLOW_SEALING); - mfd_fail_new("kern_memfd_sysctl_2_MFD_EXEC", - MFD_CLOEXEC | MFD_EXEC); - fd = mfd_assert_new("", 0, MFD_NOEXEC_SEAL); + mfd_fail_new("kern_memfd_sysctl_2_exec", + MFD_EXEC | MFD_CLOEXEC | MFD_ALLOW_SEALING); + + fd = mfd_assert_new("kern_memfd_sysctl_2_dfl", + mfd_def_size, + MFD_CLOEXEC | MFD_ALLOW_SEALING); + mfd_assert_mode(fd, 0666); + mfd_assert_has_seals(fd, F_SEAL_EXEC); + mfd_fail_chmod(fd, 0777); + close(fd); + + fd = mfd_assert_new("kern_memfd_sysctl_2_noexec_seal", + mfd_def_size, + MFD_NOEXEC_SEAL | MFD_CLOEXEC | MFD_ALLOW_SEALING); + mfd_assert_mode(fd, 0666); + mfd_assert_has_seals(fd, F_SEAL_EXEC); + mfd_fail_chmod(fd, 0777); close(fd); sysctl_fail_write("0"); -- cgit v1.2.3 From 6469b66e3f5a38214bf1d1220d54d78d9cd08ebd Mon Sep 17 00:00:00 2001 From: Aleksa Sarai Date: Mon, 14 Aug 2023 18:41:01 +1000 Subject: selftests: improve vm.memfd_noexec sysctl tests This adds proper tests for the nesting functionality of vm.memfd_noexec as well as some minor cleanups to spawn_*_thread(). Link: https://lkml.kernel.org/r/20230814-memfd-vm-noexec-uapi-fixes-v2-5-7ff9e3e10ba6@cyphar.com Signed-off-by: Aleksa Sarai Cc: Christian Brauner Cc: Daniel Verkamp Cc: Dominique Martinet Cc: Jeff Xu Cc: Kees Cook Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/memfd/memfd_test.c | 339 +++++++++++++++++++++-------- 1 file changed, 254 insertions(+), 85 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c index 8b7390ad81d1..3df008677239 100644 --- a/tools/testing/selftests/memfd/memfd_test.c +++ b/tools/testing/selftests/memfd/memfd_test.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "common.h" @@ -43,7 +44,6 @@ */ static size_t mfd_def_size = MFD_DEF_SIZE; static const char *memfd_str = MEMFD_STR; -static pid_t spawn_newpid_thread(unsigned int flags, int (*fn)(void *)); static int newpid_thread_fn2(void *arg); static void join_newpid_thread(pid_t pid); @@ -96,12 +96,12 @@ static void sysctl_assert_write(const char *val) int fd = open("/proc/sys/vm/memfd_noexec", O_WRONLY | O_CLOEXEC); if (fd < 0) { - printf("open sysctl failed\n"); + printf("open sysctl failed: %m\n"); abort(); } if (write(fd, val, strlen(val)) < 0) { - printf("write sysctl failed\n"); + printf("write sysctl %s failed: %m\n", val); abort(); } } @@ -111,7 +111,7 @@ static void sysctl_fail_write(const char *val) int fd = open("/proc/sys/vm/memfd_noexec", O_WRONLY | O_CLOEXEC); if (fd < 0) { - printf("open sysctl failed\n"); + printf("open sysctl failed: %m\n"); abort(); } @@ -122,6 +122,33 @@ static void sysctl_fail_write(const char *val) } } +static void sysctl_assert_equal(const char *val) +{ + char *p, buf[128] = {}; + int fd = open("/proc/sys/vm/memfd_noexec", O_RDONLY | O_CLOEXEC); + + if (fd < 0) { + printf("open sysctl failed: %m\n"); + abort(); + } + + if (read(fd, buf, sizeof(buf)) < 0) { + printf("read sysctl failed: %m\n"); + abort(); + } + + /* Strip trailing whitespace. */ + p = buf; + while (!isspace(*p)) + p++; + *p = '\0'; + + if (strcmp(buf, val) != 0) { + printf("unexpected sysctl value: expected %s, got %s\n", val, buf); + abort(); + } +} + static int mfd_assert_reopen_fd(int fd_in) { int fd; @@ -736,7 +763,7 @@ static int idle_thread_fn(void *arg) return 0; } -static pid_t spawn_idle_thread(unsigned int flags) +static pid_t spawn_thread(unsigned int flags, int (*fn)(void *), void *arg) { uint8_t *stack; pid_t pid; @@ -747,10 +774,7 @@ static pid_t spawn_idle_thread(unsigned int flags) abort(); } - pid = clone(idle_thread_fn, - stack + STACK_SIZE, - SIGCHLD | flags, - NULL); + pid = clone(fn, stack + STACK_SIZE, SIGCHLD | flags, arg); if (pid < 0) { printf("clone() failed: %m\n"); abort(); @@ -759,6 +783,33 @@ static pid_t spawn_idle_thread(unsigned int flags) return pid; } +static void join_thread(pid_t pid) +{ + int wstatus; + + if (waitpid(pid, &wstatus, 0) < 0) { + printf("newpid thread: waitpid() failed: %m\n"); + abort(); + } + + if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) != 0) { + printf("newpid thread: exited with non-zero error code %d\n", + WEXITSTATUS(wstatus)); + abort(); + } + + if (WIFSIGNALED(wstatus)) { + printf("newpid thread: killed by signal %d\n", + WTERMSIG(wstatus)); + abort(); + } +} + +static pid_t spawn_idle_thread(unsigned int flags) +{ + return spawn_thread(flags, idle_thread_fn, NULL); +} + static void join_idle_thread(pid_t pid) { kill(pid, SIGTERM); @@ -1111,42 +1162,69 @@ static void test_noexec_seal(void) close(fd); } -static void test_sysctl_child(void) +static void test_sysctl_sysctl0(void) { int fd; - int pid; - printf("%s sysctl 0\n", memfd_str); - sysctl_assert_write("0"); - fd = mfd_assert_new("kern_memfd_sysctl_0", + sysctl_assert_equal("0"); + + fd = mfd_assert_new("kern_memfd_sysctl_0_dfl", mfd_def_size, MFD_CLOEXEC | MFD_ALLOW_SEALING); - mfd_assert_mode(fd, 0777); mfd_assert_has_seals(fd, 0); mfd_assert_chmod(fd, 0644); close(fd); +} - printf("%s sysctl 1\n", memfd_str); - sysctl_assert_write("1"); - fd = mfd_assert_new("kern_memfd_sysctl_1", +static void test_sysctl_set_sysctl0(void) +{ + sysctl_assert_write("0"); + test_sysctl_sysctl0(); +} + +static void test_sysctl_sysctl1(void) +{ + int fd; + + sysctl_assert_equal("1"); + + fd = mfd_assert_new("kern_memfd_sysctl_1_dfl", mfd_def_size, MFD_CLOEXEC | MFD_ALLOW_SEALING); + mfd_assert_mode(fd, 0666); + mfd_assert_has_seals(fd, F_SEAL_EXEC); + mfd_fail_chmod(fd, 0777); + close(fd); - printf("%s child ns\n", memfd_str); - pid = spawn_newpid_thread(CLONE_NEWPID, newpid_thread_fn2); - join_newpid_thread(pid); + fd = mfd_assert_new("kern_memfd_sysctl_1_exec", + mfd_def_size, + MFD_CLOEXEC | MFD_EXEC | MFD_ALLOW_SEALING); + mfd_assert_mode(fd, 0777); + mfd_assert_has_seals(fd, 0); + mfd_assert_chmod(fd, 0644); + close(fd); + fd = mfd_assert_new("kern_memfd_sysctl_1_noexec", + mfd_def_size, + MFD_CLOEXEC | MFD_NOEXEC_SEAL | MFD_ALLOW_SEALING); mfd_assert_mode(fd, 0666); mfd_assert_has_seals(fd, F_SEAL_EXEC); mfd_fail_chmod(fd, 0777); - sysctl_fail_write("0"); close(fd); +} - printf("%s sysctl 2\n", memfd_str); - sysctl_assert_write("2"); - mfd_fail_new("kern_memfd_sysctl_2_exec", - MFD_EXEC | MFD_CLOEXEC | MFD_ALLOW_SEALING); +static void test_sysctl_set_sysctl1(void) +{ + sysctl_assert_write("1"); + test_sysctl_sysctl1(); +} + +static void test_sysctl_sysctl2(void) +{ + int fd; + + sysctl_assert_equal("2"); fd = mfd_assert_new("kern_memfd_sysctl_2_dfl", mfd_def_size, @@ -1156,98 +1234,188 @@ static void test_sysctl_child(void) mfd_fail_chmod(fd, 0777); close(fd); - fd = mfd_assert_new("kern_memfd_sysctl_2_noexec_seal", + mfd_fail_new("kern_memfd_sysctl_2_exec", + MFD_CLOEXEC | MFD_EXEC | MFD_ALLOW_SEALING); + + fd = mfd_assert_new("kern_memfd_sysctl_2_noexec", mfd_def_size, - MFD_NOEXEC_SEAL | MFD_CLOEXEC | MFD_ALLOW_SEALING); + MFD_CLOEXEC | MFD_NOEXEC_SEAL | MFD_ALLOW_SEALING); mfd_assert_mode(fd, 0666); mfd_assert_has_seals(fd, F_SEAL_EXEC); mfd_fail_chmod(fd, 0777); close(fd); - - sysctl_fail_write("0"); - sysctl_fail_write("1"); } -static int newpid_thread_fn(void *arg) +static void test_sysctl_set_sysctl2(void) { - test_sysctl_child(); - return 0; + sysctl_assert_write("2"); + test_sysctl_sysctl2(); } -static void test_sysctl_child2(void) +static int sysctl_simple_child(void *arg) { int fd; + int pid; - sysctl_fail_write("0"); - fd = mfd_assert_new("kern_memfd_sysctl_1", - mfd_def_size, - MFD_CLOEXEC | MFD_ALLOW_SEALING); + printf("%s sysctl 0\n", memfd_str); + test_sysctl_set_sysctl0(); - mfd_assert_mode(fd, 0666); - mfd_assert_has_seals(fd, F_SEAL_EXEC); - mfd_fail_chmod(fd, 0777); - close(fd); + printf("%s sysctl 1\n", memfd_str); + test_sysctl_set_sysctl1(); + + printf("%s sysctl 0\n", memfd_str); + test_sysctl_set_sysctl0(); + + printf("%s sysctl 2\n", memfd_str); + test_sysctl_set_sysctl2(); + + printf("%s sysctl 1\n", memfd_str); + test_sysctl_set_sysctl1(); + + printf("%s sysctl 0\n", memfd_str); + test_sysctl_set_sysctl0(); + + return 0; +} + +/* + * Test sysctl + * A very basic test to make sure the core sysctl semantics work. + */ +static void test_sysctl_simple(void) +{ + int pid = spawn_thread(CLONE_NEWPID, sysctl_simple_child, NULL); + + join_thread(pid); } -static int newpid_thread_fn2(void *arg) +static int sysctl_nested(void *arg) { - test_sysctl_child2(); + void (*fn)(void) = arg; + + fn(); return 0; } -static pid_t spawn_newpid_thread(unsigned int flags, int (*fn)(void *)) + +static int sysctl_nested_wait(void *arg) { - uint8_t *stack; - pid_t pid; + /* Wait for a SIGCONT. */ + kill(getpid(), SIGSTOP); + return sysctl_nested(arg); +} - stack = malloc(STACK_SIZE); - if (!stack) { - printf("malloc(STACK_SIZE) failed: %m\n"); - abort(); - } +static void test_sysctl_sysctl1_failset(void) +{ + sysctl_fail_write("0"); + test_sysctl_sysctl1(); +} - pid = clone(fn, - stack + STACK_SIZE, - SIGCHLD | flags, - NULL); - if (pid < 0) { - printf("clone() failed: %m\n"); - abort(); - } +static void test_sysctl_sysctl2_failset(void) +{ + sysctl_fail_write("1"); + test_sysctl_sysctl2(); - return pid; + sysctl_fail_write("0"); + test_sysctl_sysctl2(); } -static void join_newpid_thread(pid_t pid) +static int sysctl_nested_child(void *arg) { - int wstatus; + int fd; + int pid; - if (waitpid(pid, &wstatus, 0) < 0) { - printf("newpid thread: waitpid() failed: %m\n"); - abort(); - } + printf("%s nested sysctl 0\n", memfd_str); + sysctl_assert_write("0"); + /* A further nested pidns works the same. */ + pid = spawn_thread(CLONE_NEWPID, sysctl_simple_child, NULL); + join_thread(pid); - if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) != 0) { - printf("newpid thread: exited with non-zero error code %d\n", - WEXITSTATUS(wstatus)); - abort(); - } + printf("%s nested sysctl 1\n", memfd_str); + sysctl_assert_write("1"); + /* Child inherits our setting. */ + pid = spawn_thread(CLONE_NEWPID, sysctl_nested, test_sysctl_sysctl1); + join_thread(pid); + /* Child cannot raise the setting. */ + pid = spawn_thread(CLONE_NEWPID, sysctl_nested, + test_sysctl_sysctl1_failset); + join_thread(pid); + /* Child can lower the setting. */ + pid = spawn_thread(CLONE_NEWPID, sysctl_nested, + test_sysctl_set_sysctl2); + join_thread(pid); + /* Child lowering the setting has no effect on our setting. */ + test_sysctl_sysctl1(); + + printf("%s nested sysctl 2\n", memfd_str); + sysctl_assert_write("2"); + /* Child inherits our setting. */ + pid = spawn_thread(CLONE_NEWPID, sysctl_nested, test_sysctl_sysctl2); + join_thread(pid); + /* Child cannot raise the setting. */ + pid = spawn_thread(CLONE_NEWPID, sysctl_nested, + test_sysctl_sysctl2_failset); + join_thread(pid); + + /* Verify that the rules are actually inherited after fork. */ + printf("%s nested sysctl 0 -> 1 after fork\n", memfd_str); + sysctl_assert_write("0"); - if (WIFSIGNALED(wstatus)) { - printf("newpid thread: killed by signal %d\n", - WTERMSIG(wstatus)); - abort(); - } + pid = spawn_thread(CLONE_NEWPID, sysctl_nested_wait, + test_sysctl_sysctl1_failset); + sysctl_assert_write("1"); + kill(pid, SIGCONT); + join_thread(pid); + + printf("%s nested sysctl 0 -> 2 after fork\n", memfd_str); + sysctl_assert_write("0"); + + pid = spawn_thread(CLONE_NEWPID, sysctl_nested_wait, + test_sysctl_sysctl2_failset); + sysctl_assert_write("2"); + kill(pid, SIGCONT); + join_thread(pid); + + /* + * Verify that the current effective setting is saved on fork, meaning + * that the parent lowering the sysctl doesn't affect already-forked + * children. + */ + printf("%s nested sysctl 2 -> 1 after fork\n", memfd_str); + sysctl_assert_write("2"); + pid = spawn_thread(CLONE_NEWPID, sysctl_nested_wait, + test_sysctl_sysctl2); + sysctl_assert_write("1"); + kill(pid, SIGCONT); + join_thread(pid); + + printf("%s nested sysctl 2 -> 0 after fork\n", memfd_str); + sysctl_assert_write("2"); + pid = spawn_thread(CLONE_NEWPID, sysctl_nested_wait, + test_sysctl_sysctl2); + sysctl_assert_write("0"); + kill(pid, SIGCONT); + join_thread(pid); + + printf("%s nested sysctl 1 -> 0 after fork\n", memfd_str); + sysctl_assert_write("1"); + pid = spawn_thread(CLONE_NEWPID, sysctl_nested_wait, + test_sysctl_sysctl1); + sysctl_assert_write("0"); + kill(pid, SIGCONT); + join_thread(pid); + + return 0; } /* - * Test sysctl - * A very basic sealing test to see whether setting/retrieving seals works. + * Test sysctl with nested pid namespaces + * Make sure that the sysctl nesting semantics work correctly. */ -static void test_sysctl(void) +static void test_sysctl_nested(void) { - int pid = spawn_newpid_thread(CLONE_NEWPID, newpid_thread_fn); + int pid = spawn_thread(CLONE_NEWPID, sysctl_nested_child, NULL); - join_newpid_thread(pid); + join_thread(pid); } /* @@ -1433,6 +1601,9 @@ int main(int argc, char **argv) test_seal_grow(); test_seal_resize(); + test_sysctl_simple(); + test_sysctl_nested(); + test_share_dup("SHARE-DUP", ""); test_share_mmap("SHARE-MMAP", ""); test_share_open("SHARE-OPEN", ""); @@ -1447,8 +1618,6 @@ int main(int argc, char **argv) test_share_fork("SHARE-FORK", SHARED_FT_STR); join_idle_thread(pid); - test_sysctl(); - printf("memfd: DONE\n"); return 0; -- cgit v1.2.3 From daa60ae64c6587e2be2cdf02bca21b59551ee0f6 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Aug 2023 13:02:08 -0700 Subject: mm,thp: fix smaps THPeligible output alignment Extract from current /proc/self/smaps output: Swap: 0 kB SwapPss: 0 kB Locked: 0 kB THPeligible: 0 ProtectionKey: 0 That's not the alignment shown in Documentation/filesystems/proc.rst: it's an ugly artifact from missing out the %8 other fields are using; but there's even one selftest which expects it to look that way. Hoping no other smaps parsers depend on THPeligible to look so ugly, fix these. Link: https://lkml.kernel.org/r/cfb81f7a-f448-5bc2-b0e1-8136fcd1dd8c@google.com Signed-off-by: Hugh Dickins Cc: Alexey Dobriyan Signed-off-by: Andrew Morton --- tools/testing/selftests/proc/proc-empty-vm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/proc/proc-empty-vm.c b/tools/testing/selftests/proc/proc-empty-vm.c index 7588428b8fcd..e1052443d070 100644 --- a/tools/testing/selftests/proc/proc-empty-vm.c +++ b/tools/testing/selftests/proc/proc-empty-vm.c @@ -77,7 +77,7 @@ static const char proc_pid_smaps_vsyscall_1[] = "Swap: 0 kB\n" "SwapPss: 0 kB\n" "Locked: 0 kB\n" -"THPeligible: 0\n" +"THPeligible: 0\n" /* * "ProtectionKey:" field is conditional. It is possible to check it as well, * but I don't have such machine. @@ -107,7 +107,7 @@ static const char proc_pid_smaps_vsyscall_2[] = "Swap: 0 kB\n" "SwapPss: 0 kB\n" "Locked: 0 kB\n" -"THPeligible: 0\n" +"THPeligible: 0\n" /* * "ProtectionKey:" field is conditional. It is possible to check it as well, * but I'm too tired. -- cgit v1.2.3 From 42096aa24b82f54d486c501148afb6048e3830a1 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 3 Aug 2023 16:32:07 +0200 Subject: selftest/mm: ksm_functional_tests: test in mmap_and_merge_range() if anything got merged Let's extend mmap_and_merge_range() to test if anything in the current process was merged. range_maps_duplicates() is too unreliable for that use case, so instead look at KSM stats. Trigger a complete unmerge first, to cleanup the stable tree and stabilize accounting of merged pages. Note that we're using /proc/self/ksm_merging_pages instead of /proc/self/ksm_stat, because that one is available in more existing kernels. If /proc/self/ksm_merging_pages can't be opened, we can't perform any checks and simply skip them. We have to special-case the shared zeropage for now. But the only user -- test_unmerge_zero_pages() -- performs its own merge checks. Link: https://lkml.kernel.org/r/20230803143208.383663-7-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Peter Xu Cc: Hugh Dickins Cc: Jason Gunthorpe Cc: John Hubbard Cc: Linus Torvalds Cc: liubo Cc: Matthew Wilcox (Oracle) Cc: Mel Gorman Cc: Mel Gorman Cc: Paolo Bonzini Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/ksm_functional_tests.c | 47 +++++++++++++++++++++++ 1 file changed, 47 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c index 0de9d33cd565..cb63b600cb4f 100644 --- a/tools/testing/selftests/mm/ksm_functional_tests.c +++ b/tools/testing/selftests/mm/ksm_functional_tests.c @@ -30,6 +30,7 @@ static int ksm_fd; static int ksm_full_scans_fd; static int proc_self_ksm_stat_fd; +static int proc_self_ksm_merging_pages_fd; static int ksm_use_zero_pages_fd; static int pagemap_fd; static size_t pagesize; @@ -88,6 +89,22 @@ static long get_my_ksm_zero_pages(void) return my_ksm_zero_pages; } +static long get_my_merging_pages(void) +{ + char buf[10]; + ssize_t ret; + + if (proc_self_ksm_merging_pages_fd < 0) + return proc_self_ksm_merging_pages_fd; + + ret = pread(proc_self_ksm_merging_pages_fd, buf, sizeof(buf) - 1, 0); + if (ret <= 0) + return -errno; + buf[ret] = 0; + + return strtol(buf, NULL, 10); +} + static long ksm_get_full_scans(void) { char buf[10]; @@ -120,11 +137,29 @@ static int ksm_merge(void) return 0; } +static int ksm_unmerge(void) +{ + if (write(ksm_fd, "2", 1) != 1) + return -errno; + return 0; +} + static char *mmap_and_merge_range(char val, unsigned long size, bool use_prctl) { char *map; int ret; + /* Stabilize accounting by disabling KSM completely. */ + if (ksm_unmerge()) { + ksft_test_result_fail("Disabling (unmerging) KSM failed\n"); + goto unmap; + } + + if (get_my_merging_pages() > 0) { + ksft_test_result_fail("Still pages merged\n"); + goto unmap; + } + map = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0); if (map == MAP_FAILED) { @@ -160,6 +195,16 @@ static char *mmap_and_merge_range(char val, unsigned long size, bool use_prctl) ksft_test_result_fail("Running KSM failed\n"); goto unmap; } + + /* + * Check if anything was merged at all. Ignore the zero page that is + * accounted differently (depending on kernel support). + */ + if (val && !get_my_merging_pages()) { + ksft_test_result_fail("No pages got merged\n"); + goto unmap; + } + return map; unmap: munmap(map, size); @@ -473,6 +518,8 @@ int main(int argc, char **argv) if (pagemap_fd < 0) ksft_exit_skip("open(\"/proc/self/pagemap\") failed\n"); proc_self_ksm_stat_fd = open("/proc/self/ksm_stat", O_RDONLY); + proc_self_ksm_merging_pages_fd = open("/proc/self/ksm_merging_pages", + O_RDONLY); ksm_use_zero_pages_fd = open("/sys/kernel/mm/ksm/use_zero_pages", O_RDWR); test_unmerge(); -- cgit v1.2.3 From e5013f11c6c92f58134418d7caf3098c13413c4c Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 3 Aug 2023 16:32:08 +0200 Subject: selftest/mm: ksm_functional_tests: Add PROT_NONE test Let's test whether merging and unmerging in PROT_NONE areas works as expected. Pass a page protection to mmap_and_merge_range(), which will trigger an mprotect() after writing to the pages, but before enabling merging. Make sure that unsharing works as expected, by performing a ptrace write (using /proc/self/mem) and by setting MADV_UNMERGEABLE. Note that this implicitly tests that ptrace writes in an inaccessible (PROT_NONE) mapping work as expected. [david@redhat.com: use sizeof(i) in test_prot_none(), per Peter] Link: https://lkml.kernel.org/r/e9cdb144-70c7-6596-2377-e675635c94e0@redhat.com Link: https://lkml.kernel.org/r/20230803143208.383663-8-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Peter Xu Cc: Hugh Dickins Cc: Jason Gunthorpe Cc: John Hubbard Cc: Linus Torvalds Cc: liubo Cc: Matthew Wilcox (Oracle) Cc: Mel Gorman Cc: Mel Gorman Cc: Paolo Bonzini Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/ksm_functional_tests.c | 59 ++++++++++++++++++++--- 1 file changed, 52 insertions(+), 7 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c index cb63b600cb4f..901e950f9138 100644 --- a/tools/testing/selftests/mm/ksm_functional_tests.c +++ b/tools/testing/selftests/mm/ksm_functional_tests.c @@ -27,6 +27,7 @@ #define KiB 1024u #define MiB (1024 * KiB) +static int mem_fd; static int ksm_fd; static int ksm_full_scans_fd; static int proc_self_ksm_stat_fd; @@ -144,7 +145,8 @@ static int ksm_unmerge(void) return 0; } -static char *mmap_and_merge_range(char val, unsigned long size, bool use_prctl) +static char *mmap_and_merge_range(char val, unsigned long size, int prot, + bool use_prctl) { char *map; int ret; @@ -176,6 +178,11 @@ static char *mmap_and_merge_range(char val, unsigned long size, bool use_prctl) /* Make sure each page contains the same values to merge them. */ memset(map, val, size); + if (mprotect(map, size, prot)) { + ksft_test_result_skip("mprotect() failed\n"); + goto unmap; + } + if (use_prctl) { ret = prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0); if (ret < 0 && errno == EINVAL) { @@ -218,7 +225,7 @@ static void test_unmerge(void) ksft_print_msg("[RUN] %s\n", __func__); - map = mmap_and_merge_range(0xcf, size, false); + map = mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, false); if (map == MAP_FAILED) return; @@ -256,7 +263,7 @@ static void test_unmerge_zero_pages(void) } /* Let KSM deduplicate zero pages. */ - map = mmap_and_merge_range(0x00, size, false); + map = mmap_and_merge_range(0x00, size, PROT_READ | PROT_WRITE, false); if (map == MAP_FAILED) return; @@ -304,7 +311,7 @@ static void test_unmerge_discarded(void) ksft_print_msg("[RUN] %s\n", __func__); - map = mmap_and_merge_range(0xcf, size, false); + map = mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, false); if (map == MAP_FAILED) return; @@ -336,7 +343,7 @@ static void test_unmerge_uffd_wp(void) ksft_print_msg("[RUN] %s\n", __func__); - map = mmap_and_merge_range(0xcf, size, false); + map = mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, false); if (map == MAP_FAILED) return; @@ -479,7 +486,7 @@ static void test_prctl_unmerge(void) ksft_print_msg("[RUN] %s\n", __func__); - map = mmap_and_merge_range(0xcf, size, true); + map = mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, true); if (map == MAP_FAILED) return; @@ -494,9 +501,42 @@ unmap: munmap(map, size); } +static void test_prot_none(void) +{ + const unsigned int size = 2 * MiB; + char *map; + int i; + + ksft_print_msg("[RUN] %s\n", __func__); + + map = mmap_and_merge_range(0x11, size, PROT_NONE, false); + if (map == MAP_FAILED) + goto unmap; + + /* Store a unique value in each page on one half using ptrace */ + for (i = 0; i < size / 2; i += pagesize) { + lseek(mem_fd, (uintptr_t) map + i, SEEK_SET); + if (write(mem_fd, &i, sizeof(i)) != sizeof(i)) { + ksft_test_result_fail("ptrace write failed\n"); + goto unmap; + } + } + + /* Trigger unsharing on the other half. */ + if (madvise(map + size / 2, size / 2, MADV_UNMERGEABLE)) { + ksft_test_result_fail("MADV_UNMERGEABLE failed\n"); + goto unmap; + } + + ksft_test_result(!range_maps_duplicates(map, size), + "Pages were unmerged\n"); +unmap: + munmap(map, size); +} + int main(int argc, char **argv) { - unsigned int tests = 6; + unsigned int tests = 7; int err; #ifdef __NR_userfaultfd @@ -508,6 +548,9 @@ int main(int argc, char **argv) pagesize = getpagesize(); + mem_fd = open("/proc/self/mem", O_RDWR); + if (mem_fd < 0) + ksft_exit_fail_msg("opening /proc/self/mem failed\n"); ksm_fd = open("/sys/kernel/mm/ksm/run", O_RDWR); if (ksm_fd < 0) ksft_exit_skip("open(\"/sys/kernel/mm/ksm/run\") failed\n"); @@ -529,6 +572,8 @@ int main(int argc, char **argv) test_unmerge_uffd_wp(); #endif + test_prot_none(); + test_prctl(); test_prctl_fork(); test_prctl_unmerge(); -- cgit v1.2.3 From f9bff0e31881d03badf191d3b0005839391f5f2b Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 2 Aug 2023 16:13:29 +0100 Subject: minmax: add in_range() macro Patch series "New page table range API", v6. This patchset changes the API used by the MM to set up page table entries. The four APIs are: set_ptes(mm, addr, ptep, pte, nr) update_mmu_cache_range(vma, addr, ptep, nr) flush_dcache_folio(folio) flush_icache_pages(vma, page, nr) flush_dcache_folio() isn't technically new, but no architecture implemented it, so I've done that for them. The old APIs remain around but are mostly implemented by calling the new interfaces. The new APIs are based around setting up N page table entries at once. The N entries belong to the same PMD, the same folio and the same VMA, so ptep++ is a legitimate operation, and locking is taken care of for you. Some architectures can do a better job of it than just a loop, but I have hesitated to make too deep a change to architectures I don't understand well. One thing I have changed in every architecture is that PG_arch_1 is now a per-folio bit instead of a per-page bit when used for dcache clean/dirty tracking. This was something that would have to happen eventually, and it makes sense to do it now rather than iterate over every page involved in a cache flush and figure out if it needs to happen. The point of all this is better performance, and Fengwei Yin has measured improvement on x86. I suspect you'll see improvement on your architecture too. Try the new will-it-scale test mentioned here: https://lore.kernel.org/linux-mm/20230206140639.538867-5-fengwei.yin@intel.com/ You'll need to run it on an XFS filesystem and have CONFIG_TRANSPARENT_HUGEPAGE set. This patchset is the basis for much of the anonymous large folio work being done by Ryan, so it's received quite a lot of testing over the last few months. This patch (of 38): Determine if a value lies within a range more efficiently (subtraction + comparison vs two comparisons and an AND). It also has useful (under some circumstances) behaviour if the range exceeds the maximum value of the type. Convert all the conflicting definitions of in_range() within the kernel; some can use the generic definition while others need their own definition. Link: https://lkml.kernel.org/r/20230802151406.3735276-1-willy@infradead.org Link: https://lkml.kernel.org/r/20230802151406.3735276-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- tools/testing/selftests/bpf/progs/get_branch_snapshot.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/get_branch_snapshot.c b/tools/testing/selftests/bpf/progs/get_branch_snapshot.c index a1b139888048..511ac634eef0 100644 --- a/tools/testing/selftests/bpf/progs/get_branch_snapshot.c +++ b/tools/testing/selftests/bpf/progs/get_branch_snapshot.c @@ -15,7 +15,7 @@ long total_entries = 0; #define ENTRY_CNT 32 struct perf_branch_entry entries[ENTRY_CNT] = {}; -static inline bool in_range(__u64 val) +static inline bool gbs_in_range(__u64 val) { return (val >= address_low) && (val < address_high); } @@ -31,7 +31,7 @@ int BPF_PROG(test1, int n, int ret) for (i = 0; i < ENTRY_CNT; i++) { if (i >= total_entries) break; - if (in_range(entries[i].from) && in_range(entries[i].to)) + if (gbs_in_range(entries[i].from) && gbs_in_range(entries[i].to)) test1_hits++; else if (!test1_hits) wasted_entries++; -- cgit v1.2.3 From 7131fd7e30b20c3de03ca5d674546675dfffce5f Mon Sep 17 00:00:00 2001 From: Lucas Karpinski Date: Thu, 17 Aug 2023 15:57:48 -0400 Subject: selftests: cgroup: fix test_kmem_memcg_deletion kernel mem check Currently, not all kernel memory usage is being accounted for. This commit switches to using the kernel entry within memory.stat which already includes kernel_stack, pagetables, and slab. The kernel entry also includes vmalloc and other additional kernel memory use cases which were missing. Link: https://lkml.kernel.org/r/bvrhe2tpsts2azaroq4ubp2slawmop6orndsswrewuscw3ugvk@kmemmrttsnc7 Signed-off-by: Lucas Karpinski Acked-by: Shakeel Butt Acked-by: Roman Gushchin Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Shuah Khan Cc: Tejun Heo Cc: Zefan Li Signed-off-by: Andrew Morton --- tools/testing/selftests/cgroup/test_kmem.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/cgroup/test_kmem.c b/tools/testing/selftests/cgroup/test_kmem.c index ed2e50bb1e76..c82f974b85c9 100644 --- a/tools/testing/selftests/cgroup/test_kmem.c +++ b/tools/testing/selftests/cgroup/test_kmem.c @@ -162,11 +162,11 @@ static int cg_run_in_subcgroups(const char *parent, * allocates some slab memory (mostly negative dentries) using 2 * NR_CPUS * threads. Then it checks the sanity of numbers on the parent level: * the total size of the cgroups should be roughly equal to - * anon + file + slab + kernel_stack. + * anon + file + kernel + sock. */ static int test_kmem_memcg_deletion(const char *root) { - long current, slab, anon, file, kernel_stack, pagetables, percpu, sock, sum; + long current, anon, file, kernel, sock, sum; int ret = KSFT_FAIL; char *parent; @@ -184,29 +184,22 @@ static int test_kmem_memcg_deletion(const char *root) goto cleanup; current = cg_read_long(parent, "memory.current"); - slab = cg_read_key_long(parent, "memory.stat", "slab "); anon = cg_read_key_long(parent, "memory.stat", "anon "); file = cg_read_key_long(parent, "memory.stat", "file "); - kernel_stack = cg_read_key_long(parent, "memory.stat", "kernel_stack "); - pagetables = cg_read_key_long(parent, "memory.stat", "pagetables "); - percpu = cg_read_key_long(parent, "memory.stat", "percpu "); + kernel = cg_read_key_long(parent, "memory.stat", "kernel "); sock = cg_read_key_long(parent, "memory.stat", "sock "); - if (current < 0 || slab < 0 || anon < 0 || file < 0 || - kernel_stack < 0 || pagetables < 0 || percpu < 0 || sock < 0) + if (current < 0 || anon < 0 || file < 0 || kernel < 0 || sock < 0) goto cleanup; - sum = slab + anon + file + kernel_stack + pagetables + percpu + sock; + sum = anon + file + kernel + sock; if (abs(sum - current) < MAX_VMSTAT_ERROR) { ret = KSFT_PASS; } else { printf("memory.current = %ld\n", current); - printf("slab + anon + file + kernel_stack = %ld\n", sum); - printf("slab = %ld\n", slab); + printf("anon + file + kernel + sock = %ld\n", sum); printf("anon = %ld\n", anon); printf("file = %ld\n", file); - printf("kernel_stack = %ld\n", kernel_stack); - printf("pagetables = %ld\n", pagetables); - printf("percpu = %ld\n", percpu); + printf("kernel = %ld\n", kernel); printf("sock = %ld\n", sock); } -- cgit v1.2.3 From bad5a3a42a31859705baf39c4fc92173b2716760 Mon Sep 17 00:00:00 2001 From: Anh Tuan Phan Date: Thu, 17 Aug 2023 23:00:33 +0700 Subject: selftests/mm: fix WARNING comparing pointer to 0 Remove comparing pointer to 0 to avoid this warning from coccinelle: ./tools/testing/selftests/mm/map_populate.c:80:16-17: WARNING comparing pointer to 0, suggest !E ./tools/testing/selftests/mm/map_populate.c:80:16-17: WARNING comparing pointer to 0 Link: https://lkml.kernel.org/r/20230817160033.90079-1-tuananhlfc@gmail.com Signed-off-by: Anh Tuan Phan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/map_populate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/map_populate.c b/tools/testing/selftests/mm/map_populate.c index 240f2d9dae7a..7945d0754875 100644 --- a/tools/testing/selftests/mm/map_populate.c +++ b/tools/testing/selftests/mm/map_populate.c @@ -77,7 +77,7 @@ int main(int argc, char **argv) unsigned long *smap; ftmp = tmpfile(); - BUG_ON(ftmp == 0, "tmpfile()"); + BUG_ON(!ftmp, "tmpfile()"); ret = ftruncate(fileno(ftmp), MMAP_SZ); BUG_ON(ret, "ftruncate()"); -- cgit v1.2.3