summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug45
-rw-r--r--lib/Kconfig.kasan2
-rw-r--r--lib/Kconfig.kgdb2
-rw-r--r--lib/Kconfig.ubsan28
-rw-r--r--lib/Makefile10
-rw-r--r--lib/assoc_array.c2
-rw-r--r--lib/bitmap.c7
-rw-r--r--lib/buildid.c6
-rw-r--r--lib/checksum_kunit.c17
-rw-r--r--lib/cmdline_kunit.c2
-rw-r--r--lib/devres.c208
-rw-r--r--lib/dhry_1.c2
-rw-r--r--lib/dhry_run.c1
-rw-r--r--lib/dump_stack.c16
-rw-r--r--lib/dynamic_debug.c7
-rw-r--r--lib/dynamic_queue_limits.c74
-rw-r--r--lib/flex_proportions.c77
-rw-r--r--lib/fonts/Kconfig3
-rw-r--r--lib/fortify_kunit.c662
-rw-r--r--lib/fw_table.c15
-rw-r--r--lib/generic-radix-tree.c35
-rw-r--r--lib/iov_iter.c83
-rw-r--r--lib/kunit/device.c6
-rw-r--r--lib/kunit/executor.c6
-rw-r--r--lib/kunit/executor_test.c2
-rw-r--r--lib/livepatch/Makefile14
-rw-r--r--lib/livepatch/test_klp_atomic_replace.c57
-rw-r--r--lib/livepatch/test_klp_callbacks_busy.c70
-rw-r--r--lib/livepatch/test_klp_callbacks_demo.c121
-rw-r--r--lib/livepatch/test_klp_callbacks_demo2.c93
-rw-r--r--lib/livepatch/test_klp_callbacks_mod.c24
-rw-r--r--lib/livepatch/test_klp_livepatch.c51
-rw-r--r--lib/livepatch/test_klp_shadow_vars.c301
-rw-r--r--lib/livepatch/test_klp_state.c162
-rw-r--r--lib/livepatch/test_klp_state2.c191
-rw-r--r--lib/livepatch/test_klp_state3.c5
-rw-r--r--lib/maple_tree.c99
-rw-r--r--lib/math/div64.c15
-rw-r--r--lib/memcpy_kunit.c4
-rw-r--r--lib/nlattr.c4
-rw-r--r--lib/overflow_kunit.c67
-rw-r--r--lib/pci_iomap.c180
-rw-r--r--lib/raid6/Makefile2
-rw-r--r--lib/raid6/s390vx.uc62
-rw-r--r--lib/sort.c20
-rw-r--r--lib/stackdepot.c313
-rw-r--r--lib/stackinit_kunit.c21
-rw-r--r--lib/string.c23
-rw-r--r--lib/string_helpers.c89
-rw-r--r--lib/string_helpers_kunit.c (renamed from lib/test-string_helpers.c)255
-rw-r--r--lib/string_kunit.c199
-rw-r--r--lib/test_bitmap.c42
-rw-r--r--lib/test_blackhole_dev.c3
-rw-r--r--lib/test_kmod.c6
-rw-r--r--lib/test_maple_tree.c44
-rw-r--r--lib/test_string.c257
-rw-r--r--lib/test_ubsan.c41
-rw-r--r--lib/test_vmalloc.c11
-rw-r--r--lib/test_xarray.c230
-rw-r--r--lib/ubsan.c68
-rw-r--r--lib/ubsan.h4
62 files changed, 2062 insertions, 2407 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 5ddda7c2ed9b..4557bb8a5256 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -70,9 +70,6 @@ source "lib/math/Kconfig"
config NO_GENERIC_PCI_IOPORT_MAP
bool
-config GENERIC_PCI_IOMAP
- bool
-
config GENERIC_IOMAP
bool
select GENERIC_PCI_IOMAP
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 975a07f9f1cc..733ee2ac0138 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1303,7 +1303,7 @@ config PROVE_LOCKING
select DEBUG_SPINLOCK
select DEBUG_MUTEXES if !PREEMPT_RT
select DEBUG_RT_MUTEXES if RT_MUTEXES
- select DEBUG_RWSEMS
+ select DEBUG_RWSEMS if !PREEMPT_RT
select DEBUG_WW_MUTEX_SLOWPATH
select DEBUG_LOCK_ALLOC
select PREEMPT_COUNT if !ARCH_NO_PREEMPT
@@ -1426,7 +1426,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
config DEBUG_RWSEMS
bool "RW Semaphore debugging: basic checks"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && !PREEMPT_RT
help
This debugging feature allows mismatched rw semaphore locks
and unlocks to be detected and reported.
@@ -2085,7 +2085,7 @@ config KCOV
depends on ARCH_HAS_KCOV
depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \
- GCC_VERSION >= 120000 || CLANG_VERSION >= 130000
+ GCC_VERSION >= 120000 || CC_IS_CLANG
select DEBUG_FS
select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
select OBJTOOL if HAVE_NOINSTR_HACK
@@ -2142,7 +2142,7 @@ config TEST_DHRY
To run the benchmark, it needs to be enabled explicitly, either from
the kernel command line (when built-in), or from userspace (when
- built-in or modular.
+ built-in or modular).
Run once during kernel boot:
@@ -2235,6 +2235,7 @@ config TEST_DIV64
config TEST_IOV_ITER
tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
depends on KUNIT
+ depends on MMU
default KUNIT_ALL_TESTS
help
Enable this to turn on testing of the operation of the I/O iterator
@@ -2352,11 +2353,15 @@ config ASYNC_RAID6_TEST
config TEST_HEXDUMP
tristate "Test functions located in the hexdump module at runtime"
-config STRING_SELFTEST
- tristate "Test string functions at runtime"
+config STRING_KUNIT_TEST
+ tristate "KUnit test string functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
-config TEST_STRING_HELPERS
- tristate "Test functions located in the string_helpers module at runtime"
+config STRING_HELPERS_KUNIT_TEST
+ tristate "KUnit test string helpers at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
@@ -2748,7 +2753,7 @@ config STACKINIT_KUNIT_TEST
config FORTIFY_KUNIT_TEST
tristate "Test fortified str*() and mem*() function internals at runtime" if !KUNIT_ALL_TESTS
- depends on KUNIT && FORTIFY_SOURCE
+ depends on KUNIT
default KUNIT_ALL_TESTS
help
Builds unit tests for checking internals of FORTIFY_SOURCE as used
@@ -2857,28 +2862,6 @@ config TEST_MEMCAT_P
If unsure, say N.
-config TEST_LIVEPATCH
- tristate "Test livepatching"
- default n
- depends on DYNAMIC_DEBUG
- depends on LIVEPATCH
- depends on m
- help
- Test kernel livepatching features for correctness. The tests will
- load test modules that will be livepatched in various scenarios.
-
- To run all the livepatching tests:
-
- make -C tools/testing/selftests TARGETS=livepatch run_tests
-
- Alternatively, individual tests may be invoked:
-
- tools/testing/selftests/livepatch/test-callbacks.sh
- tools/testing/selftests/livepatch/test-livepatch.sh
- tools/testing/selftests/livepatch/test-shadow-vars.sh
-
- If unsure, say N.
-
config TEST_OBJAGG
tristate "Perform selftest on object aggreration manager"
default n
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index e6eda054ab27..98016e137b7f 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -158,7 +158,7 @@ config KASAN_STACK
out-of-bounds bugs in stack variables.
With Clang, stack instrumentation has a problem that causes excessive
- stack usage, see https://bugs.llvm.org/show_bug.cgi?id=38809. Thus,
+ stack usage, see https://llvm.org/pr38809. Thus,
with Clang, this option is deemed unsafe.
This option is always disabled when compile-testing with Clang to
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 3b9a44008433..b5c0e6576749 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -43,7 +43,7 @@ config KGDB_SERIAL_CONSOLE
tristate "KGDB: use kgdb over the serial console"
select CONSOLE_POLL
select MAGIC_SYSRQ
- depends on TTY && HW_CONSOLE
+ depends on TTY && VT
default y
help
Share a serial console with kgdb. Sysrq-g must be used
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 59e21bfec188..48a67058f84e 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-config ARCH_HAS_UBSAN_SANITIZE_ALL
+config ARCH_HAS_UBSAN
bool
menuconfig UBSAN
@@ -87,7 +87,6 @@ config UBSAN_LOCAL_BOUNDS
config UBSAN_SHIFT
bool "Perform checking for bit-shift overflows"
- default UBSAN
depends on $(cc-option,-fsanitize=shift)
help
This option enables -fsanitize=shift which checks for bit-shift
@@ -116,6 +115,20 @@ config UBSAN_UNREACHABLE
This option enables -fsanitize=unreachable which checks for control
flow reaching an expected-to-be-unreachable position.
+config UBSAN_SIGNED_WRAP
+ bool "Perform checking for signed arithmetic wrap-around"
+ default UBSAN
+ depends on !COMPILE_TEST
+ depends on $(cc-option,-fsanitize=signed-integer-overflow)
+ help
+ This option enables -fsanitize=signed-integer-overflow which checks
+ for wrap-around of any arithmetic operations with signed integers.
+ This currently performs nearly no instrumentation due to the
+ kernel's use of -fno-strict-overflow which converts all would-be
+ arithmetic undefined behavior into wrap-around arithmetic. Future
+ sanitizer versions will allow for wrap-around checking (rather than
+ exclusively undefined behavior).
+
config UBSAN_BOOL
bool "Perform checking for non-boolean values used as boolean"
default UBSAN
@@ -142,17 +155,6 @@ config UBSAN_ALIGNMENT
Enabling this option on architectures that support unaligned
accesses may produce a lot of false positives.
-config UBSAN_SANITIZE_ALL
- bool "Enable instrumentation for the entire kernel"
- depends on ARCH_HAS_UBSAN_SANITIZE_ALL
- default y
- help
- This option activates instrumentation for the entire kernel.
- If you don't enable this option, you have to explicitly specify
- UBSAN_SANITIZE := y for the files/directories you want to check for UB.
- Enabling this option will get kernel image size increased
- significantly.
-
config TEST_UBSAN
tristate "Module for testing for undefined behavior detection"
depends on m
diff --git a/lib/Makefile b/lib/Makefile
index 6b09731d8e61..ffc6b2341b45 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -49,9 +49,9 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
percpu-refcount.o rhashtable.o base64.o \
once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \
generic-radix-tree.o bitmap-str.o
-obj-$(CONFIG_STRING_SELFTEST) += test_string.o
+obj-$(CONFIG_STRING_KUNIT_TEST) += string_kunit.o
obj-y += string_helpers.o
-obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
+obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o
obj-y += hexdump.o
obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
obj-y += kstrtox.o
@@ -69,6 +69,7 @@ obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
+CFLAGS_test_ubsan.o += $(call cc-disable-warning, unused-but-set-variable)
UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
@@ -134,8 +135,6 @@ endif
obj-$(CONFIG_TEST_FPU) += test_fpu.o
CFLAGS_test_fpu.o += $(FPU_CFLAGS)
-obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
-
# Some KUnit files (hooks.o) need to be built-in even when KUnit is a module,
# so we can't just use obj-$(CONFIG_KUNIT).
ifdef CONFIG_KUNIT
@@ -153,7 +152,6 @@ CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
obj-y += math/ crypto/
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
-obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
@@ -401,6 +399,8 @@ obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced)
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread)
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation)
CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
obj-$(CONFIG_STRCAT_KUNIT_TEST) += strcat_kunit.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index ca0b4f360c1a..388e656ac974 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -938,7 +938,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
edit->leaf_p = &new_n0->slots[0];
pr_devel("<--%s() = ok [split shortcut]\n", __func__);
- return edit;
+ return true;
}
/**
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 09522af227f1..b97692854966 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -348,6 +348,13 @@ unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_weight_and);
+unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ return BITMAP_WEIGHT(bitmap1[idx] & ~bitmap2[idx], bits);
+}
+EXPORT_SYMBOL(__bitmap_weight_andnot);
+
void __bitmap_set(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
diff --git a/lib/buildid.c b/lib/buildid.c
index e3a7acdeef0e..898301b49eb6 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -140,7 +140,7 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
return -EFAULT; /* page not mapped */
ret = -EINVAL;
- page_addr = kmap_atomic(page);
+ page_addr = kmap_local_page(page);
ehdr = (Elf32_Ehdr *)page_addr;
/* compare magic x7f "ELF" */
@@ -156,7 +156,7 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
ret = get_build_id_64(page_addr, build_id, size);
out:
- kunmap_atomic(page_addr);
+ kunmap_local(page_addr);
put_page(page);
return ret;
}
@@ -174,7 +174,7 @@ int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size)
return parse_build_id_buf(build_id, NULL, buf, buf_size);
}
-#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE)
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init;
/**
diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
index 225bb7701460..bf70850035c7 100644
--- a/lib/checksum_kunit.c
+++ b/lib/checksum_kunit.c
@@ -215,7 +215,7 @@ static const u32 init_sums_no_overflow[] = {
0xffff0000, 0xfffffffb,
};
-static const __sum16 expected_csum_ipv6_magic[] = {
+static const u16 expected_csum_ipv6_magic[] = {
0x18d4, 0x3085, 0x2e4b, 0xd9f4, 0xbdc8, 0x78f, 0x1034, 0x8422, 0x6fc0,
0xd2f6, 0xbeb5, 0x9d3, 0x7e2a, 0x312e, 0x778e, 0xc1bb, 0x7cf2, 0x9d1e,
0xca21, 0xf3ff, 0x7569, 0xb02e, 0xca86, 0x7e76, 0x4539, 0x45e3, 0xf28d,
@@ -241,7 +241,7 @@ static const __sum16 expected_csum_ipv6_magic[] = {
0x3845, 0x1014
};
-static const __sum16 expected_fast_csum[] = {
+static const u16 expected_fast_csum[] = {
0xda83, 0x45da, 0x4f46, 0x4e4f, 0x34e, 0xe902, 0xa5e9, 0x87a5, 0x7187,
0x5671, 0xf556, 0x6df5, 0x816d, 0x8f81, 0xbb8f, 0xfbba, 0x5afb, 0xbe5a,
0xedbe, 0xabee, 0x6aac, 0xe6b, 0xea0d, 0x67ea, 0x7e68, 0x8a7e, 0x6f8a,
@@ -577,7 +577,8 @@ static void test_csum_no_carry_inputs(struct kunit *test)
static void test_ip_fast_csum(struct kunit *test)
{
- __sum16 csum_result, expected;
+ __sum16 csum_result;
+ u16 expected;
for (int len = IPv4_MIN_WORDS; len < IPv4_MAX_WORDS; len++) {
for (int index = 0; index < NUM_IP_FAST_CSUM_TESTS; index++) {
@@ -586,7 +587,7 @@ static void test_ip_fast_csum(struct kunit *test)
expected_fast_csum[(len - IPv4_MIN_WORDS) *
NUM_IP_FAST_CSUM_TESTS +
index];
- CHECK_EQ(expected, csum_result);
+ CHECK_EQ(to_sum16(expected), csum_result);
}
}
}
@@ -598,7 +599,7 @@ static void test_csum_ipv6_magic(struct kunit *test)
const struct in6_addr *daddr;
unsigned int len;
unsigned char proto;
- unsigned int csum;
+ __wsum csum;
const int daddr_offset = sizeof(struct in6_addr);
const int len_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr);
@@ -611,10 +612,10 @@ static void test_csum_ipv6_magic(struct kunit *test)
saddr = (const struct in6_addr *)(random_buf + i);
daddr = (const struct in6_addr *)(random_buf + i +
daddr_offset);
- len = *(unsigned int *)(random_buf + i + len_offset);
+ len = le32_to_cpu(*(__le32 *)(random_buf + i + len_offset));
proto = *(random_buf + i + proto_offset);
- csum = *(unsigned int *)(random_buf + i + csum_offset);
- CHECK_EQ(expected_csum_ipv6_magic[i],
+ csum = *(__wsum *)(random_buf + i + csum_offset);
+ CHECK_EQ(to_sum16(expected_csum_ipv6_magic[i]),
csum_ipv6_magic(saddr, daddr, len, proto, csum));
}
#endif /* !CONFIG_NET */
diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c
index d4572dbc9145..705b82736be0 100644
--- a/lib/cmdline_kunit.c
+++ b/lib/cmdline_kunit.c
@@ -124,7 +124,7 @@ static void cmdline_do_one_range_test(struct kunit *test, const char *in,
n, e[0], r[0]);
p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0]));
- KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %u out of bound", n, p - r);
+ KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %td out of bound", n, p - r);
}
static void cmdline_test_range(struct kunit *test)
diff --git a/lib/devres.c b/lib/devres.c
index c44f104b58d5..fe0c63caeb68 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
#include <linux/err.h>
-#include <linux/pci.h>
#include <linux/io.h>
#include <linux/gfp.h>
#include <linux/export.h>
@@ -311,212 +311,6 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
EXPORT_SYMBOL(devm_ioport_unmap);
#endif /* CONFIG_HAS_IOPORT_MAP */
-#ifdef CONFIG_PCI
-/*
- * PCI iomap devres
- */
-#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
-
-struct pcim_iomap_devres {
- void __iomem *table[PCIM_IOMAP_MAX];
-};
-
-static void pcim_iomap_release(struct device *gendev, void *res)
-{
- struct pci_dev *dev = to_pci_dev(gendev);
- struct pcim_iomap_devres *this = res;
- int i;
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++)
- if (this->table[i])
- pci_iounmap(dev, this->table[i]);
-}
-
-/**
- * pcim_iomap_table - access iomap allocation table
- * @pdev: PCI device to access iomap table for
- *
- * Access iomap allocation table for @dev. If iomap table doesn't
- * exist and @pdev is managed, it will be allocated. All iomaps
- * recorded in the iomap table are automatically unmapped on driver
- * detach.
- *
- * This function might sleep when the table is first allocated but can
- * be safely called without context and guaranteed to succeed once
- * allocated.
- */
-void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
-{
- struct pcim_iomap_devres *dr, *new_dr;
-
- dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
- if (dr)
- return dr->table;
-
- new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
- dev_to_node(&pdev->dev));
- if (!new_dr)
- return NULL;
- dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
- return dr->table;
-}
-EXPORT_SYMBOL(pcim_iomap_table);
-
-/**
- * pcim_iomap - Managed pcim_iomap()
- * @pdev: PCI device to iomap for
- * @bar: BAR to iomap
- * @maxlen: Maximum length of iomap
- *
- * Managed pci_iomap(). Map is automatically unmapped on driver
- * detach.
- */
-void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
-{
- void __iomem **tbl;
-
- BUG_ON(bar >= PCIM_IOMAP_MAX);
-
- tbl = (void __iomem **)pcim_iomap_table(pdev);
- if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
- return NULL;
-
- tbl[bar] = pci_iomap(pdev, bar, maxlen);
- return tbl[bar];
-}
-EXPORT_SYMBOL(pcim_iomap);
-
-/**
- * pcim_iounmap - Managed pci_iounmap()
- * @pdev: PCI device to iounmap for
- * @addr: Address to unmap
- *
- * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
- */
-void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
-{
- void __iomem **tbl;
- int i;
-
- pci_iounmap(pdev, addr);
-
- tbl = (void __iomem **)pcim_iomap_table(pdev);
- BUG_ON(!tbl);
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++)
- if (tbl[i] == addr) {
- tbl[i] = NULL;
- return;
- }
- WARN_ON(1);
-}
-EXPORT_SYMBOL(pcim_iounmap);
-
-/**
- * pcim_iomap_regions - Request and iomap PCI BARs
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to request and iomap
- * @name: Name used when requesting regions
- *
- * Request and iomap regions specified by @mask.
- */
-int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
-{
- void __iomem * const *iomap;
- int i, rc;
-
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
- return -ENOMEM;
-
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- unsigned long len;
-
- if (!(mask & (1 << i)))
- continue;
-
- rc = -EINVAL;
- len = pci_resource_len(pdev, i);
- if (!len)
- goto err_inval;
-
- rc = pci_request_region(pdev, i, name);
- if (rc)
- goto err_inval;
-
- rc = -ENOMEM;
- if (!pcim_iomap(pdev, i, 0))
- goto err_region;
- }
-
- return 0;
-
- err_region:
- pci_release_region(pdev, i);
- err_inval:
- while (--i >= 0) {
- if (!(mask & (1 << i)))
- continue;
- pcim_iounmap(pdev, iomap[i]);
- pci_release_region(pdev, i);
- }
-
- return rc;
-}
-EXPORT_SYMBOL(pcim_iomap_regions);
-
-/**
- * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to iomap
- * @name: Name used when requesting regions
- *
- * Request all PCI BARs and iomap regions specified by @mask.
- */
-int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
- const char *name)
-{
- int request_mask = ((1 << 6) - 1) & ~mask;
- int rc;
-
- rc = pci_request_selected_regions(pdev, request_mask, name);
- if (rc)
- return rc;
-
- rc = pcim_iomap_regions(pdev, mask, name);
- if (rc)
- pci_release_selected_regions(pdev, request_mask);
- return rc;
-}
-EXPORT_SYMBOL(pcim_iomap_regions_request_all);
-
-/**
- * pcim_iounmap_regions - Unmap and release PCI BARs
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to unmap and release
- *
- * Unmap and release regions specified by @mask.
- */
-void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
-{
- void __iomem * const *iomap;
- int i;
-
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
- return;
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++) {
- if (!(mask & (1 << i)))
- continue;
-
- pcim_iounmap(pdev, iomap[i]);
- pci_release_region(pdev, i);
- }
-}
-EXPORT_SYMBOL(pcim_iounmap_regions);
-#endif /* CONFIG_PCI */
-
static void devm_arch_phys_ac_add_release(struct device *dev, void *res)
{
arch_phys_wc_del(*((int *)res));
diff --git a/lib/dhry_1.c b/lib/dhry_1.c
index 08edbbb19f57..ca6c87232c58 100644
--- a/lib/dhry_1.c
+++ b/lib/dhry_1.c
@@ -277,7 +277,7 @@ int dhry(int n)
dhry_assert_string_eq(Str_1_Loc, "DHRYSTONE PROGRAM, 1'ST STRING");
dhry_assert_string_eq(Str_2_Loc, "DHRYSTONE PROGRAM, 2'ND STRING");
- User_Time = ktime_to_ms(ktime_sub(End_Time, Begin_Time));
+ User_Time = ktime_ms_delta(End_Time, Begin_Time);
kfree(Ptr_Glob);
kfree(Next_Ptr_Glob);
diff --git a/lib/dhry_run.c b/lib/dhry_run.c
index f15ac666e9d3..e6a279dabf84 100644
--- a/lib/dhry_run.c
+++ b/lib/dhry_run.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/mutex.h>
#include <linux/smp.h>
#define DHRY_VAX 1757
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 83471e81501a..222c6d6c8281 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -96,15 +96,25 @@ static void __dump_stack(const char *log_lvl)
*/
asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
{
+ bool in_panic = this_cpu_in_panic();
unsigned long flags;
/*
* Permit this cpu to perform nested stack dumps while serialising
- * against other CPUs
+ * against other CPUs, unless this CPU is in panic.
+ *
+ * When in panic, non-panic CPUs are not permitted to store new
+ * printk messages so there is no need to synchronize the output.
+ * This avoids potential deadlock in panic() if another CPU is
+ * holding and unable to release the printk_cpu_sync.
*/
- printk_cpu_sync_get_irqsave(flags);
+ if (!in_panic)
+ printk_cpu_sync_get_irqsave(flags);
+
__dump_stack(log_lvl);
- printk_cpu_sync_put_irqrestore(flags);
+
+ if (!in_panic)
+ printk_cpu_sync_put_irqrestore(flags);
}
EXPORT_SYMBOL(dump_stack_lvl);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 6fba6423cc10..c78f335fa981 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -640,10 +640,9 @@ static int param_set_dyndbg_classnames(const char *instr, const struct kernel_pa
int cls_id, totct = 0;
bool wanted;
- cl_str = tmp = kstrdup(instr, GFP_KERNEL);
- p = strchr(cl_str, '\n');
- if (p)
- *p = '\0';
+ cl_str = tmp = kstrdup_and_replace(instr, '\n', '\0', GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
/* start with previously set state-bits, then modify */
curr_bits = old_bits = *dcp->bits;
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index fde0aa244148..a1389db1c30a 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -10,10 +10,77 @@
#include <linux/dynamic_queue_limits.h>
#include <linux/compiler.h>
#include <linux/export.h>
+#include <trace/events/napi.h>
#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
+static void dql_check_stall(struct dql *dql)
+{
+ unsigned short stall_thrs;
+ unsigned long now;
+
+ stall_thrs = READ_ONCE(dql->stall_thrs);
+ if (!stall_thrs)
+ return;
+
+ now = jiffies;
+ /* Check for a potential stall */
+ if (time_after_eq(now, dql->last_reap + stall_thrs)) {
+ unsigned long hist_head, t, start, end;
+
+ /* We are trying to detect a period of at least @stall_thrs
+ * jiffies without any Tx completions, but during first half
+ * of which some Tx was posted.
+ */
+dqs_again:
+ hist_head = READ_ONCE(dql->history_head);
+ /* pairs with smp_wmb() in dql_queued() */
+ smp_rmb();
+
+ /* Get the previous entry in the ring buffer, which is the
+ * oldest sample.
+ */
+ start = (hist_head - DQL_HIST_LEN + 1) * BITS_PER_LONG;
+
+ /* Advance start to continue from the last reap time */
+ if (time_before(start, dql->last_reap + 1))
+ start = dql->last_reap + 1;
+
+ /* Newest sample we should have already seen a completion for */
+ end = hist_head * BITS_PER_LONG + (BITS_PER_LONG - 1);
+
+ /* Shrink the search space to [start, (now - start_thrs/2)] if
+ * `end` is beyond the stall zone
+ */
+ if (time_before(now, end + stall_thrs / 2))
+ end = now - stall_thrs / 2;
+
+ /* Search for the queued time in [t, end] */
+ for (t = start; time_before_eq(t, end); t++)
+ if (test_bit(t % (DQL_HIST_LEN * BITS_PER_LONG),
+ dql->history))
+ break;
+
+ /* Variable t contains the time of the queue */
+ if (!time_before_eq(t, end))
+ goto no_stall;
+
+ /* The ring buffer was modified in the meantime, retry */
+ if (hist_head != READ_ONCE(dql->history_head))
+ goto dqs_again;
+
+ dql->stall_cnt++;
+ dql->stall_max = max_t(unsigned short, dql->stall_max, now - t);
+
+ trace_dql_stall_detected(dql->stall_thrs, now - t,
+ dql->last_reap, dql->history_head,
+ now, dql->history);
+ }
+no_stall:
+ dql->last_reap = now;
+}
+
/* Records completed count and recalculates the queue limit */
void dql_completed(struct dql *dql, unsigned int count)
{
@@ -110,6 +177,8 @@ void dql_completed(struct dql *dql, unsigned int count)
dql->prev_last_obj_cnt = dql->last_obj_cnt;
dql->num_completed = completed;
dql->prev_num_queued = num_queued;
+
+ dql_check_stall(dql);
}
EXPORT_SYMBOL(dql_completed);
@@ -125,6 +194,10 @@ void dql_reset(struct dql *dql)
dql->prev_ovlimit = 0;
dql->lowest_slack = UINT_MAX;
dql->slack_start_time = jiffies;
+
+ dql->last_reap = jiffies;
+ dql->history_head = jiffies / BITS_PER_LONG;
+ memset(dql->history, 0, sizeof(dql->history));
}
EXPORT_SYMBOL(dql_reset);
@@ -133,6 +206,7 @@ void dql_init(struct dql *dql, unsigned int hold_time)
dql->max_limit = DQL_MAX_LIMIT;
dql->min_limit = 0;
dql->slack_hold_time = hold_time;
+ dql->stall_thrs = 0;
dql_reset(dql);
}
EXPORT_SYMBOL(dql_init);
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index 83332fefa6f4..84ecccddc771 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -84,83 +84,6 @@ bool fprop_new_period(struct fprop_global *p, int periods)
}
/*
- * ---- SINGLE ----
- */
-
-int fprop_local_init_single(struct fprop_local_single *pl)
-{
- pl->events = 0;
- pl->period = 0;
- raw_spin_lock_init(&pl->lock);
- return 0;
-}
-
-void fprop_local_destroy_single(struct fprop_local_single *pl)
-{
-}
-
-static void fprop_reflect_period_single(struct fprop_global *p,
- struct fprop_local_single *pl)
-{
- unsigned int period = p->period;
- unsigned long flags;
-
- /* Fast path - period didn't change */
- if (pl->period == period)
- return;
- raw_spin_lock_irqsave(&pl->lock, flags);
- /* Someone updated pl->period while we were spinning? */
- if (pl->period >= period) {
- raw_spin_unlock_irqrestore(&pl->lock, flags);
- return;
- }
- /* Aging zeroed our fraction? */
- if (period - pl->period < BITS_PER_LONG)
- pl->events >>= period - pl->period;
- else
- pl->events = 0;
- pl->period = period;
- raw_spin_unlock_irqrestore(&pl->lock, flags);
-}
-
-/* Event of type pl happened */
-void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
-{
- fprop_reflect_period_single(p, pl);
- pl->events++;
- percpu_counter_add(&p->events, 1);
-}
-
-/* Return fraction of events of type pl */
-void fprop_fraction_single(struct fprop_global *p,
- struct fprop_local_single *pl,
- unsigned long *numerator, unsigned long *denominator)
-{
- unsigned int seq;
- s64 num, den;
-
- do {
- seq = read_seqcount_begin(&p->sequence);
- fprop_reflect_period_single(p, pl);
- num = pl->events;
- den = percpu_counter_read_positive(&p->events);
- } while (read_seqcount_retry(&p->sequence, seq));
-
- /*
- * Make fraction <= 1 and denominator > 0 even in presence of percpu
- * counter errors
- */
- if (den <= num) {
- if (num)
- den = num;
- else
- den = 1;
- }
- *denominator = den;
- *numerator = num;
-}
-
-/*
* ---- PERCPU ----
*/
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 7ee468ef21ec..7e945fdcbf11 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -98,7 +98,8 @@ config FONT_10x18
config FONT_SUN8x16
bool "Sparc console 8x16 font"
- depends on (FRAMEBUFFER_CONSOLE && (FONTS || SPARC)) || BOOTX_TEXT
+ depends on (FRAMEBUFFER_CONSOLE && (FONTS || SPARC)) || \
+ BOOTX_TEXT || EARLYFB
help
This is the high resolution console font for Sun machines. Say Y.
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index 2e4fedc81621..493ec02dd5b3 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Runtime test cases for CONFIG_FORTIFY_SOURCE that aren't expected to
- * Oops the kernel on success. (For those, see drivers/misc/lkdtm/fortify.c)
+ * Runtime test cases for CONFIG_FORTIFY_SOURCE. For testing memcpy(),
+ * see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c).
*
* For corner cases with UBSAN, try testing with:
*
@@ -15,17 +15,55 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/* Redefine fortify_panic() to track failures. */
+void fortify_add_kunit_error(int write);
+#define fortify_panic(func, write, avail, size, retfail) do { \
+ __fortify_report(FORTIFY_REASON(func, write), avail, size); \
+ fortify_add_kunit_error(write); \
+ return (retfail); \
+} while (0)
+
#include <kunit/device.h>
#include <kunit/test.h>
+#include <kunit/test-bug.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
+/* Handle being built without CONFIG_FORTIFY_SOURCE */
+#ifndef __compiletime_strlen
+# define __compiletime_strlen __builtin_strlen
+#endif
+
+static struct kunit_resource read_resource;
+static struct kunit_resource write_resource;
+static int fortify_read_overflows;
+static int fortify_write_overflows;
+
static const char array_of_10[] = "this is 10";
static const char *ptr_of_11 = "this is 11!";
static char array_unknown[] = "compiler thinks I might change";
+void fortify_add_kunit_error(int write)
+{
+ struct kunit_resource *resource;
+ struct kunit *current_test;
+
+ current_test = kunit_get_current_test();
+ if (!current_test)
+ return;
+
+ resource = kunit_find_named_resource(current_test,
+ write ? "fortify_write_overflows"
+ : "fortify_read_overflows");
+ if (!resource)
+ return;
+
+ (*(int *)resource->data)++;
+ kunit_put_resource(resource);
+}
+
static void known_sizes_test(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
@@ -308,6 +346,610 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
+/*
+ * We can't have an array at the end of a structure or else
+ * builds without -fstrict-flex-arrays=3 will report them as
+ * being an unknown length. Additionally, add bytes before
+ * and after the string to catch over/underflows if tests
+ * fail.
+ */
+struct fortify_padding {
+ unsigned long bytes_before;
+ char buf[32];
+ unsigned long bytes_after;
+};
+/* Force compiler into not being able to resolve size at compile-time. */
+static volatile int unconst;
+
+static void strlen_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ int i, end = sizeof(pad.buf) - 1;
+
+ /* Fill 31 bytes with valid characters. */
+ for (i = 0; i < sizeof(pad.buf) - 1; i++)
+ pad.buf[i] = i + '0';
+ /* Trailing bytes are still %NUL. */
+ KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* String is terminated, so strlen() is valid. */
+ KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+
+ /* Make string unterminated, and recount. */
+ pad.buf[end] = 'A';
+ end = sizeof(pad.buf);
+ KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+}
+
+static void strnlen_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ int i, end = sizeof(pad.buf) - 1;
+
+ /* Fill 31 bytes with valid characters. */
+ for (i = 0; i < sizeof(pad.buf) - 1; i++)
+ pad.buf[i] = i + '0';
+ /* Trailing bytes are still %NUL. */
+ KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* String is terminated, so strnlen() is valid. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf)), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* A truncated strnlen() will be safe, too. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf) / 2),
+ sizeof(pad.buf) / 2);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+
+ /* Make string unterminated, and recount. */
+ pad.buf[end] = 'A';
+ end = sizeof(pad.buf);
+ /* Reading beyond with strncpy() will fail. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 1), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 2), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+
+ /* Early-truncated is safe still, though. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+
+ end = sizeof(pad.buf) / 2;
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void strcpy_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf) + 1] = { };
+ int i;
+
+ /* Fill 31 bytes with valid characters. */
+ for (i = 0; i < sizeof(src) - 2; i++)
+ src[i] = i + '0';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strcpy() 1 less than of max size. */
+ KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Only last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ src[sizeof(src) - 2] = 'A';
+ /* But now we trip the overflow checking. */
+ KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Trailing %NUL -- thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ src[sizeof(src) - 1] = 'A';
+ /* And for sure now, two bytes past. */
+ KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
+ == pad.buf);
+ /*
+ * Which trips both the strlen() on the unterminated src,
+ * and the resulting copy attempt.
+ */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Trailing %NUL -- thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void strncpy_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[] = "Copy me fully into a small buffer and I will overflow!";
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strncpy() 1 less than of max size. */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
+ sizeof(pad.buf) + unconst - 1)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Only last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Legitimate (though unterminated) max-size strncpy. */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
+ sizeof(pad.buf) + unconst)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* No trailing %NUL -- thanks strncpy API. */
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* But we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Now verify that FORTIFY is working... */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
+ sizeof(pad.buf) + unconst + 1)
+ == pad.buf);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And further... */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
+ sizeof(pad.buf) + unconst + 2)
+ == pad.buf);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void strscpy_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[] = "Copy me fully into a small buffer and I will overflow!";
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strscpy() 1 less than of max size. */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
+ sizeof(pad.buf) + unconst - 1),
+ -E2BIG);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Keeping space for %NUL, last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Legitimate max-size strscpy. */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
+ sizeof(pad.buf) + unconst),
+ -E2BIG);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* A trailing %NUL will exist. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+
+ /* Now verify that FORTIFY is working... */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
+ sizeof(pad.buf) + unconst + 1),
+ -E2BIG);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And much further... */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
+ sizeof(src) * 2 + unconst),
+ -E2BIG);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void strcat_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf) / 2] = { };
+ char one[] = "A";
+ char two[] = "BC";
+ int i;
+
+ /* Fill 15 bytes with valid characters. */
+ for (i = 0; i < sizeof(src) - 1; i++)
+ src[i] = i + 'A';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strcat() using less than half max size. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Legitimate strcat() now 2 bytes shy of end. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Add one more character to the end. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* And this one char will overflow. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And adding two will overflow more. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, two) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void strncat_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf)] = { };
+ int i, partial;
+
+ /* Fill 31 bytes with valid characters. */
+ partial = sizeof(src) / 2 - 1;
+ for (i = 0; i < partial; i++)
+ src[i] = i + 'A';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strncat() using less than half max size. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Legitimate strncat() now 2 bytes shy of end. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Add one more character to the end. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* And this one char will overflow. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And adding two will overflow more. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 2) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Force an unterminated destination, and overflow. */
+ pad.buf[sizeof(pad.buf) - 1] = 'A';
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
+ /* This will have tripped both strlen() and strcat(). */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ /* But we should not go beyond the end. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void strlcat_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf)] = { };
+ int i, partial;
+ int len = sizeof(pad.buf) + unconst;
+
+ /* Fill 15 bytes with valid characters. */
+ partial = sizeof(src) / 2 - 1;
+ for (i = 0; i < partial; i++)
+ src[i] = i + 'A';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strlcat() using less than half max size. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Legitimate strlcat() now 2 bytes shy of end. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial * 2);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Add one more character to the end. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "Q", len), partial * 2 + 1);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* And this one char will overflow. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "V", len * 2), len);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And adding two will overflow more. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "QQ", len * 2), len + 1);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Force an unterminated destination, and overflow. */
+ pad.buf[sizeof(pad.buf) - 1] = 'A';
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "TT", len * 2), len + 2);
+ /* This will have tripped both strlen() and strlcat(). */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ /* But we should not go beyond the end. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Force an unterminated source, and overflow. */
+ memset(src, 'B', sizeof(src));
+ pad.buf[sizeof(pad.buf) - 1] = '\0';
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len * 3), len - 1 + sizeof(src));
+ /* This will have tripped both strlen() and strlcat(). */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ /* But we should not go beyond the end. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void memscan_test(struct kunit *test)
+{
+ char haystack[] = "Where oh where is my memory range?";
+ char *mem = haystack + strlen("Where oh where is ");
+ char needle = 'm';
+ size_t len = sizeof(haystack) + unconst;
+
+ KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len),
+ mem);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Catch too-large range. */
+ KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len + 1),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len * 2),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void memchr_test(struct kunit *test)
+{
+ char haystack[] = "Where oh where is my memory range?";
+ char *mem = haystack + strlen("Where oh where is ");
+ char needle = 'm';
+ size_t len = sizeof(haystack) + unconst;
+
+ KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len),
+ mem);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Catch too-large range. */
+ KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len + 1),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len * 2),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void memchr_inv_test(struct kunit *test)
+{
+ char haystack[] = "Where oh where is my memory range?";
+ char *mem = haystack + 1;
+ char needle = 'W';
+ size_t len = sizeof(haystack) + unconst;
+
+ /* Normal search is okay. */
+ KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len),
+ mem);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Catch too-large range. */
+ KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len + 1),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len * 2),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void memcmp_test(struct kunit *test)
+{
+ char one[] = "My mind is going ...";
+ char two[] = "My mind is going ... I can feel it.";
+ size_t one_len = sizeof(one) + unconst - 1;
+ size_t two_len = sizeof(two) + unconst - 1;
+
+ /* We match the first string (ignoring the %NUL). */
+ KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Still in bounds, but no longer matching. */
+ KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 1), -32);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+
+ /* Catch too-large ranges. */
+ KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 2), INT_MIN);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+
+ KUNIT_ASSERT_EQ(test, memcmp(two, one, two_len + 2), INT_MIN);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void kmemdup_test(struct kunit *test)
+{
+ char src[] = "I got Doom running on it!";
+ char *copy;
+ size_t len = sizeof(src) + unconst;
+
+ /* Copy is within bounds. */
+ copy = kmemdup(src, len, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ kfree(copy);
+
+ /* Without %NUL. */
+ copy = kmemdup(src, len - 1, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ kfree(copy);
+
+ /* Tiny bounds. */
+ copy = kmemdup(src, 1, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ kfree(copy);
+
+ /* Out of bounds by 1 byte. */
+ copy = kmemdup(src, len + 1, GFP_KERNEL);
+ KUNIT_EXPECT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ kfree(copy);
+
+ /* Way out of bounds. */
+ copy = kmemdup(src, len * 2, GFP_KERNEL);
+ KUNIT_EXPECT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+ kfree(copy);
+
+ /* Starting offset causing out of bounds. */
+ copy = kmemdup(src + 1, len, GFP_KERNEL);
+ KUNIT_EXPECT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
+ kfree(copy);
+}
+
+static int fortify_test_init(struct kunit *test)
+{
+ if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE))
+ kunit_skip(test, "Not built with CONFIG_FORTIFY_SOURCE=y");
+
+ fortify_read_overflows = 0;
+ kunit_add_named_resource(test, NULL, NULL, &read_resource,
+ "fortify_read_overflows",
+ &fortify_read_overflows);
+ fortify_write_overflows = 0;
+ kunit_add_named_resource(test, NULL, NULL, &write_resource,
+ "fortify_write_overflows",
+ &fortify_write_overflows);
+ return 0;
+}
+
static struct kunit_case fortify_test_cases[] = {
KUNIT_CASE(known_sizes_test),
KUNIT_CASE(control_flow_split_test),
@@ -319,11 +961,27 @@ static struct kunit_case fortify_test_cases[] = {
KUNIT_CASE(alloc_size_kvmalloc_dynamic_test),
KUNIT_CASE(alloc_size_devm_kmalloc_const_test),
KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test),
+ KUNIT_CASE(strlen_test),
+ KUNIT_CASE(strnlen_test),
+ KUNIT_CASE(strcpy_test),
+ KUNIT_CASE(strncpy_test),
+ KUNIT_CASE(strscpy_test),
+ KUNIT_CASE(strcat_test),
+ KUNIT_CASE(strncat_test),
+ KUNIT_CASE(strlcat_test),
+ /* skip memset: performs bounds checking on whole structs */
+ /* skip memcpy: still using warn-and-overwrite instead of hard-fail */
+ KUNIT_CASE(memscan_test),
+ KUNIT_CASE(memchr_test),
+ KUNIT_CASE(memchr_inv_test),
+ KUNIT_CASE(memcmp_test),
+ KUNIT_CASE(kmemdup_test),
{}
};
static struct kunit_suite fortify_test_suite = {
.name = "fortify",
+ .init = fortify_test_init,
.test_cases = fortify_test_cases,
};
diff --git a/lib/fw_table.c b/lib/fw_table.c
index c3569d2ba503..16291814450e 100644
--- a/lib/fw_table.c
+++ b/lib/fw_table.c
@@ -127,6 +127,7 @@ static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc,
*
* @id: table id (for debugging purposes)
* @table_size: size of the root table
+ * @max_length: maximum size of the table (ignore if 0)
* @table_header: where does the table start?
* @proc: array of acpi_subtable_proc struct containing entry id
* and associated handler with it
@@ -148,18 +149,21 @@ static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc,
int __init_or_fwtbl_lib
acpi_parse_entries_array(char *id, unsigned long table_size,
union fw_table_header *table_header,
+ unsigned long max_length,
struct acpi_subtable_proc *proc,
int proc_num, unsigned int max_entries)
{
- unsigned long table_end, subtable_len, entry_len;
+ unsigned long table_len, table_end, subtable_len, entry_len;
struct acpi_subtable_entry entry;
enum acpi_subtable_type type;
int count = 0;
int i;
type = acpi_get_subtable_type(id);
- table_end = (unsigned long)table_header +
- acpi_table_get_length(type, table_header);
+ table_len = acpi_table_get_length(type, table_header);
+ if (max_length && max_length < table_len)
+ table_len = max_length;
+ table_end = (unsigned long)table_header + table_len;
/* Parse all entries looking for a match. */
@@ -208,7 +212,8 @@ int __init_or_fwtbl_lib
cdat_table_parse(enum acpi_cdat_type type,
acpi_tbl_entry_handler_arg handler_arg,
void *arg,
- struct acpi_table_cdat *table_header)
+ struct acpi_table_cdat *table_header,
+ unsigned long length)
{
struct acpi_subtable_proc proc = {
.id = type,
@@ -222,6 +227,6 @@ cdat_table_parse(enum acpi_cdat_type type,
return acpi_parse_entries_array(ACPI_SIG_CDAT,
sizeof(struct acpi_table_cdat),
(union fw_table_header *)table_header,
- &proc, 1, 0);
+ length, &proc, 1, 0);
}
EXPORT_SYMBOL_FWTBL_LIB(cdat_table_parse);
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index 41f1bcdc4488..aaefb9b678c8 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -5,7 +5,7 @@
#include <linux/gfp.h>
#include <linux/kmemleak.h>
-#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *))
+#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *))
#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
struct genradix_node {
@@ -14,13 +14,13 @@ struct genradix_node {
struct genradix_node *children[GENRADIX_ARY];
/* Leaf: */
- u8 data[PAGE_SIZE];
+ u8 data[GENRADIX_NODE_SIZE];
};
};
static inline int genradix_depth_shift(unsigned depth)
{
- return PAGE_SHIFT + GENRADIX_ARY_SHIFT * depth;
+ return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth;
}
/*
@@ -33,7 +33,7 @@ static inline size_t genradix_depth_size(unsigned depth)
/* depth that's needed for a genradix that can address up to ULONG_MAX: */
#define GENRADIX_MAX_DEPTH \
- DIV_ROUND_UP(BITS_PER_LONG - PAGE_SHIFT, GENRADIX_ARY_SHIFT)
+ DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT)
#define GENRADIX_DEPTH_MASK \
((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
@@ -79,23 +79,12 @@ EXPORT_SYMBOL(__genradix_ptr);
static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
{
- struct genradix_node *node;
-
- node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
-
- /*
- * We're using pages (not slab allocations) directly for kernel data
- * structures, so we need to explicitly inform kmemleak of them in order
- * to avoid false positive memory leak reports.
- */
- kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
- return node;
+ return kzalloc(GENRADIX_NODE_SIZE, gfp_mask);
}
static inline void genradix_free_node(struct genradix_node *node)
{
- kmemleak_free(node);
- free_page((unsigned long)node);
+ kfree(node);
}
/*
@@ -200,7 +189,7 @@ restart:
i++;
iter->offset = round_down(iter->offset + objs_per_ptr,
objs_per_ptr);
- iter->pos = (iter->offset >> PAGE_SHIFT) *
+ iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) *
objs_per_page;
if (i == GENRADIX_ARY)
goto restart;
@@ -209,7 +198,7 @@ restart:
n = n->children[i];
}
- return &n->data[iter->offset & (PAGE_SIZE - 1)];
+ return &n->data[iter->offset & (GENRADIX_NODE_SIZE - 1)];
}
EXPORT_SYMBOL(__genradix_iter_peek);
@@ -235,7 +224,7 @@ restart:
if (ilog2(iter->offset) >= genradix_depth_shift(level)) {
iter->offset = genradix_depth_size(level);
- iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+ iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page;
iter->offset -= obj_size_plus_page_remainder;
iter->pos--;
@@ -251,7 +240,7 @@ restart:
size_t objs_per_ptr = genradix_depth_size(level);
iter->offset = round_down(iter->offset, objs_per_ptr);
- iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+ iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page;
if (!iter->offset)
return NULL;
@@ -267,7 +256,7 @@ restart:
n = n->children[i];
}
- return &n->data[iter->offset & (PAGE_SIZE - 1)];
+ return &n->data[iter->offset & (GENRADIX_NODE_SIZE - 1)];
}
EXPORT_SYMBOL(__genradix_iter_peek_prev);
@@ -289,7 +278,7 @@ int __genradix_prealloc(struct __genradix *radix, size_t size,
{
size_t offset;
- for (offset = 0; offset < size; offset += PAGE_SIZE)
+ for (offset = 0; offset < size; offset += GENRADIX_NODE_SIZE)
if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
return -ENOMEM;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index e0aa6b440ca5..4a6a9f419bd7 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -166,7 +166,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter) {
.iter_type = ITER_IOVEC,
- .copy_mc = false,
.nofault = false,
.data_source = direction,
.__iov = iov,
@@ -245,26 +244,8 @@ EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
#endif /* CONFIG_ARCH_HAS_COPY_MC */
static __always_inline
-size_t memcpy_from_iter_mc(void *iter_from, size_t progress,
- size_t len, void *to, void *priv2)
-{
- return copy_mc_to_kernel(to + progress, iter_from, len);
-}
-
-static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i)
-{
- if (unlikely(i->count < bytes))
- bytes = i->count;
- if (unlikely(!bytes))
- return 0;
- return iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc);
-}
-
-static __always_inline
size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(iov_iter_is_copy_mc(i)))
- return __copy_from_iter_mc(addr, bytes, i);
return iterate_and_advance(i, bytes, addr,
copy_from_user_iter, memcpy_from_iter);
}
@@ -633,7 +614,6 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter){
.iter_type = ITER_KVEC,
- .copy_mc = false,
.data_source = direction,
.kvec = kvec,
.nr_segs = nr_segs,
@@ -650,7 +630,6 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter){
.iter_type = ITER_BVEC,
- .copy_mc = false,
.data_source = direction,
.bvec = bvec,
.nr_segs = nr_segs,
@@ -679,7 +658,6 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
BUG_ON(direction & ~1);
*i = (struct iov_iter) {
.iter_type = ITER_XARRAY,
- .copy_mc = false,
.data_source = direction,
.xarray = xarray,
.xarray_start = start,
@@ -703,7 +681,6 @@ void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
BUG_ON(direction != READ);
*i = (struct iov_iter){
.iter_type = ITER_DISCARD,
- .copy_mc = false,
.data_source = false,
.count = count,
.iov_offset = 0
@@ -714,12 +691,11 @@ EXPORT_SYMBOL(iov_iter_discard);
static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
unsigned len_mask)
{
+ const struct iovec *iov = iter_iov(i);
size_t size = i->count;
size_t skip = i->iov_offset;
- unsigned k;
- for (k = 0; k < i->nr_segs; k++, skip = 0) {
- const struct iovec *iov = iter_iov(i) + k;
+ do {
size_t len = iov->iov_len - skip;
if (len > size)
@@ -729,34 +705,36 @@ static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
if ((unsigned long)(iov->iov_base + skip) & addr_mask)
return false;
+ iov++;
size -= len;
- if (!size)
- break;
- }
+ skip = 0;
+ } while (size);
+
return true;
}
static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
unsigned len_mask)
{
- size_t size = i->count;
+ const struct bio_vec *bvec = i->bvec;
unsigned skip = i->iov_offset;
- unsigned k;
+ size_t size = i->count;
- for (k = 0; k < i->nr_segs; k++, skip = 0) {
- size_t len = i->bvec[k].bv_len - skip;
+ do {
+ size_t len = bvec->bv_len;
if (len > size)
len = size;
if (len & len_mask)
return false;
- if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
+ if ((unsigned long)(bvec->bv_offset + skip) & addr_mask)
return false;
+ bvec++;
size -= len;
- if (!size)
- break;
- }
+ skip = 0;
+ } while (size);
+
return true;
}
@@ -800,13 +778,12 @@ EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
{
+ const struct iovec *iov = iter_iov(i);
unsigned long res = 0;
size_t size = i->count;
size_t skip = i->iov_offset;
- unsigned k;
- for (k = 0; k < i->nr_segs; k++, skip = 0) {
- const struct iovec *iov = iter_iov(i) + k;
+ do {
size_t len = iov->iov_len - skip;
if (len) {
res |= (unsigned long)iov->iov_base + skip;
@@ -814,30 +791,31 @@ static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
len = size;
res |= len;
size -= len;
- if (!size)
- break;
}
- }
+ iov++;
+ skip = 0;
+ } while (size);
return res;
}
static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
{
+ const struct bio_vec *bvec = i->bvec;
unsigned res = 0;
size_t size = i->count;
unsigned skip = i->iov_offset;
- unsigned k;
- for (k = 0; k < i->nr_segs; k++, skip = 0) {
- size_t len = i->bvec[k].bv_len - skip;
- res |= (unsigned long)i->bvec[k].bv_offset + skip;
+ do {
+ size_t len = bvec->bv_len - skip;
+ res |= (unsigned long)bvec->bv_offset + skip;
if (len > size)
len = size;
res |= len;
+ bvec++;
size -= len;
- if (!size)
- break;
- }
+ skip = 0;
+ } while (size);
+
return res;
}
@@ -1166,11 +1144,12 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
EXPORT_SYMBOL(dup_iter);
static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
- const struct iovec __user *uvec, unsigned long nr_segs)
+ const struct iovec __user *uvec, u32 nr_segs)
{
const struct compat_iovec __user *uiov =
(const struct compat_iovec __user *)uvec;
- int ret = -EFAULT, i;
+ int ret = -EFAULT;
+ u32 i;
if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
return -EFAULT;
diff --git a/lib/kunit/device.c b/lib/kunit/device.c
index 644a38a1f5b1..abc603730b8e 100644
--- a/lib/kunit/device.c
+++ b/lib/kunit/device.c
@@ -10,6 +10,7 @@
*/
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <kunit/test.h>
#include <kunit/device.h>
@@ -35,7 +36,7 @@ struct kunit_device {
#define to_kunit_device(d) container_of_const(d, struct kunit_device, dev)
-static struct bus_type kunit_bus_type = {
+static const struct bus_type kunit_bus_type = {
.name = "kunit",
};
@@ -133,6 +134,9 @@ static struct kunit_device *kunit_device_register_internal(struct kunit *test,
return ERR_PTR(err);
}
+ kunit_dev->dev.dma_mask = &kunit_dev->dev.coherent_dma_mask;
+ kunit_dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
kunit_add_action(test, device_unregister_wrapper, &kunit_dev->dev);
return kunit_dev;
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 689fff2b2b10..70b9a43cd257 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -33,13 +33,13 @@ static char *filter_glob_param;
static char *filter_param;
static char *filter_action_param;
-module_param_named(filter_glob, filter_glob_param, charp, 0400);
+module_param_named(filter_glob, filter_glob_param, charp, 0600);
MODULE_PARM_DESC(filter_glob,
"Filter which KUnit test suites/tests run at boot-time, e.g. list* or list*.*del_test");
-module_param_named(filter, filter_param, charp, 0400);
+module_param_named(filter, filter_param, charp, 0600);
MODULE_PARM_DESC(filter,
"Filter which KUnit test suites/tests run at boot-time using attributes, e.g. speed>slow");
-module_param_named(filter_action, filter_action_param, charp, 0400);
+module_param_named(filter_action, filter_action_param, charp, 0600);
MODULE_PARM_DESC(filter_action,
"Changes behavior of filtered tests using attributes, valid values are:\n"
"<none>: do not run filtered tests as normal\n"
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index 22d4ee86dbed..3f7f967e3688 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -129,7 +129,7 @@ static void parse_filter_attr_test(struct kunit *test)
GFP_KERNEL);
for (j = 0; j < filter_count; j++) {
parsed_filters[j] = kunit_next_attr_filter(&filter, &err);
- KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]);
+ KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter from '%s'", filters);
}
KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[0]), "speed");
diff --git a/lib/livepatch/Makefile b/lib/livepatch/Makefile
deleted file mode 100644
index dcc912b3478f..000000000000
--- a/lib/livepatch/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for livepatch test code.
-
-obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \
- test_klp_callbacks_demo.o \
- test_klp_callbacks_demo2.o \
- test_klp_callbacks_busy.o \
- test_klp_callbacks_mod.o \
- test_klp_livepatch.o \
- test_klp_shadow_vars.o \
- test_klp_state.o \
- test_klp_state2.o \
- test_klp_state3.o
diff --git a/lib/livepatch/test_klp_atomic_replace.c b/lib/livepatch/test_klp_atomic_replace.c
deleted file mode 100644
index 5af7093ca00c..000000000000
--- a/lib/livepatch/test_klp_atomic_replace.c
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/livepatch.h>
-
-static int replace;
-module_param(replace, int, 0644);
-MODULE_PARM_DESC(replace, "replace (default=0)");
-
-#include <linux/seq_file.h>
-static int livepatch_meminfo_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%s: %s\n", THIS_MODULE->name,
- "this has been live patched");
- return 0;
-}
-
-static struct klp_func funcs[] = {
- {
- .old_name = "meminfo_proc_show",
- .new_func = livepatch_meminfo_proc_show,
- }, {}
-};
-
-static struct klp_object objs[] = {
- {
- /* name being NULL means vmlinux */
- .funcs = funcs,
- }, {}
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
- /* set .replace in the init function below for demo purposes */
-};
-
-static int test_klp_atomic_replace_init(void)
-{
- patch.replace = replace;
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_atomic_replace_exit(void)
-{
-}
-
-module_init(test_klp_atomic_replace_init);
-module_exit(test_klp_atomic_replace_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: atomic replace");
diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c
deleted file mode 100644
index 133929e0ce8f..000000000000
--- a/lib/livepatch/test_klp_callbacks_busy.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-
-/* load/run-time control from sysfs writer */
-static bool block_transition;
-module_param(block_transition, bool, 0644);
-MODULE_PARM_DESC(block_transition, "block_transition (default=false)");
-
-static void busymod_work_func(struct work_struct *work);
-static DECLARE_WORK(work, busymod_work_func);
-static DECLARE_COMPLETION(busymod_work_started);
-
-static void busymod_work_func(struct work_struct *work)
-{
- pr_info("%s enter\n", __func__);
- complete(&busymod_work_started);
-
- while (READ_ONCE(block_transition)) {
- /*
- * Busy-wait until the sysfs writer has acknowledged a
- * blocked transition and clears the flag.
- */
- msleep(20);
- }
-
- pr_info("%s exit\n", __func__);
-}
-
-static int test_klp_callbacks_busy_init(void)
-{
- pr_info("%s\n", __func__);
- schedule_work(&work);
-
- /*
- * To synchronize kernel messages, hold the init function from
- * exiting until the work function's entry message has printed.
- */
- wait_for_completion(&busymod_work_started);
-
- if (!block_transition) {
- /*
- * Serialize output: print all messages from the work
- * function before returning from init().
- */
- flush_work(&work);
- }
-
- return 0;
-}
-
-static void test_klp_callbacks_busy_exit(void)
-{
- WRITE_ONCE(block_transition, false);
- flush_work(&work);
- pr_info("%s\n", __func__);
-}
-
-module_init(test_klp_callbacks_busy_init);
-module_exit(test_klp_callbacks_busy_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: busy target module");
diff --git a/lib/livepatch/test_klp_callbacks_demo.c b/lib/livepatch/test_klp_callbacks_demo.c
deleted file mode 100644
index 3fd8fe1cd1cc..000000000000
--- a/lib/livepatch/test_klp_callbacks_demo.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/livepatch.h>
-
-static int pre_patch_ret;
-module_param(pre_patch_ret, int, 0644);
-MODULE_PARM_DESC(pre_patch_ret, "pre_patch_ret (default=0)");
-
-static const char *const module_state[] = {
- [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
- [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
- [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
- [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
-};
-
-static void callback_info(const char *callback, struct klp_object *obj)
-{
- if (obj->mod)
- pr_info("%s: %s -> %s\n", callback, obj->mod->name,
- module_state[obj->mod->state]);
- else
- pr_info("%s: vmlinux\n", callback);
-}
-
-/* Executed on object patching (ie, patch enablement) */
-static int pre_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- return pre_patch_ret;
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void pre_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-static void patched_work_func(struct work_struct *work)
-{
- pr_info("%s\n", __func__);
-}
-
-static struct klp_func no_funcs[] = {
- {}
-};
-
-static struct klp_func busymod_funcs[] = {
- {
- .old_name = "busymod_work_func",
- .new_func = patched_work_func,
- }, {}
-};
-
-static struct klp_object objs[] = {
- {
- .name = NULL, /* vmlinux */
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, {
- .name = "test_klp_callbacks_mod",
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, {
- .name = "test_klp_callbacks_busy",
- .funcs = busymod_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
-};
-
-static int test_klp_callbacks_demo_init(void)
-{
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_callbacks_demo_exit(void)
-{
-}
-
-module_init(test_klp_callbacks_demo_init);
-module_exit(test_klp_callbacks_demo_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: livepatch demo");
diff --git a/lib/livepatch/test_klp_callbacks_demo2.c b/lib/livepatch/test_klp_callbacks_demo2.c
deleted file mode 100644
index 5417573e80af..000000000000
--- a/lib/livepatch/test_klp_callbacks_demo2.c
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/livepatch.h>
-
-static int replace;
-module_param(replace, int, 0644);
-MODULE_PARM_DESC(replace, "replace (default=0)");
-
-static const char *const module_state[] = {
- [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
- [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
- [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
- [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
-};
-
-static void callback_info(const char *callback, struct klp_object *obj)
-{
- if (obj->mod)
- pr_info("%s: %s -> %s\n", callback, obj->mod->name,
- module_state[obj->mod->state]);
- else
- pr_info("%s: vmlinux\n", callback);
-}
-
-/* Executed on object patching (ie, patch enablement) */
-static int pre_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- return 0;
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void pre_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-static struct klp_func no_funcs[] = {
- { }
-};
-
-static struct klp_object objs[] = {
- {
- .name = NULL, /* vmlinux */
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
- /* set .replace in the init function below for demo purposes */
-};
-
-static int test_klp_callbacks_demo2_init(void)
-{
- patch.replace = replace;
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_callbacks_demo2_exit(void)
-{
-}
-
-module_init(test_klp_callbacks_demo2_init);
-module_exit(test_klp_callbacks_demo2_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: livepatch demo2");
diff --git a/lib/livepatch/test_klp_callbacks_mod.c b/lib/livepatch/test_klp_callbacks_mod.c
deleted file mode 100644
index 8fbe645b1c2c..000000000000
--- a/lib/livepatch/test_klp_callbacks_mod.c
+++ /dev/null
@@ -1,24 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-
-static int test_klp_callbacks_mod_init(void)
-{
- pr_info("%s\n", __func__);
- return 0;
-}
-
-static void test_klp_callbacks_mod_exit(void)
-{
- pr_info("%s\n", __func__);
-}
-
-module_init(test_klp_callbacks_mod_init);
-module_exit(test_klp_callbacks_mod_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: target module");
diff --git a/lib/livepatch/test_klp_livepatch.c b/lib/livepatch/test_klp_livepatch.c
deleted file mode 100644
index aff08199de71..000000000000
--- a/lib/livepatch/test_klp_livepatch.c
+++ /dev/null
@@ -1,51 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/livepatch.h>
-
-#include <linux/seq_file.h>
-static int livepatch_cmdline_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%s: %s\n", THIS_MODULE->name,
- "this has been live patched");
- return 0;
-}
-
-static struct klp_func funcs[] = {
- {
- .old_name = "cmdline_proc_show",
- .new_func = livepatch_cmdline_proc_show,
- }, { }
-};
-
-static struct klp_object objs[] = {
- {
- /* name being NULL means vmlinux */
- .funcs = funcs,
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
-};
-
-static int test_klp_livepatch_init(void)
-{
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_livepatch_exit(void)
-{
-}
-
-module_init(test_klp_livepatch_init);
-module_exit(test_klp_livepatch_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Seth Jennings <sjenning@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: livepatch module");
diff --git a/lib/livepatch/test_klp_shadow_vars.c b/lib/livepatch/test_klp_shadow_vars.c
deleted file mode 100644
index b99116490858..000000000000
--- a/lib/livepatch/test_klp_shadow_vars.c
+++ /dev/null
@@ -1,301 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/livepatch.h>
-#include <linux/slab.h>
-
-/*
- * Keep a small list of pointers so that we can print address-agnostic
- * pointer values. Use a rolling integer count to differentiate the values.
- * Ironically we could have used the shadow variable API to do this, but
- * let's not lean too heavily on the very code we're testing.
- */
-static LIST_HEAD(ptr_list);
-struct shadow_ptr {
- void *ptr;
- int id;
- struct list_head list;
-};
-
-static void free_ptr_list(void)
-{
- struct shadow_ptr *sp, *tmp_sp;
-
- list_for_each_entry_safe(sp, tmp_sp, &ptr_list, list) {
- list_del(&sp->list);
- kfree(sp);
- }
-}
-
-static int ptr_id(void *ptr)
-{
- struct shadow_ptr *sp;
- static int count;
-
- list_for_each_entry(sp, &ptr_list, list) {
- if (sp->ptr == ptr)
- return sp->id;
- }
-
- sp = kmalloc(sizeof(*sp), GFP_ATOMIC);
- if (!sp)
- return -ENOMEM;
- sp->ptr = ptr;
- sp->id = count++;
-
- list_add(&sp->list, &ptr_list);
-
- return sp->id;
-}
-
-/*
- * Shadow variable wrapper functions that echo the function and arguments
- * to the kernel log for testing verification. Don't display raw pointers,
- * but use the ptr_id() value instead.
- */
-static void *shadow_get(void *obj, unsigned long id)
-{
- int **sv;
-
- sv = klp_shadow_get(obj, id);
- pr_info("klp_%s(obj=PTR%d, id=0x%lx) = PTR%d\n",
- __func__, ptr_id(obj), id, ptr_id(sv));
-
- return sv;
-}
-
-static void *shadow_alloc(void *obj, unsigned long id, size_t size,
- gfp_t gfp_flags, klp_shadow_ctor_t ctor,
- void *ctor_data)
-{
- int **var = ctor_data;
- int **sv;
-
- sv = klp_shadow_alloc(obj, id, size, gfp_flags, ctor, var);
- pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
- __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
- ptr_id(*var), ptr_id(sv));
-
- return sv;
-}
-
-static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size,
- gfp_t gfp_flags, klp_shadow_ctor_t ctor,
- void *ctor_data)
-{
- int **var = ctor_data;
- int **sv;
-
- sv = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor, var);
- pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
- __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
- ptr_id(*var), ptr_id(sv));
-
- return sv;
-}
-
-static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
-{
- klp_shadow_free(obj, id, dtor);
- pr_info("klp_%s(obj=PTR%d, id=0x%lx, dtor=PTR%d)\n",
- __func__, ptr_id(obj), id, ptr_id(dtor));
-}
-
-static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
-{
- klp_shadow_free_all(id, dtor);
- pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n", __func__, id, ptr_id(dtor));
-}
-
-
-/* Shadow variable constructor - remember simple pointer data */
-static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
-{
- int **sv = shadow_data;
- int **var = ctor_data;
-
- if (!var)
- return -EINVAL;
-
- *sv = *var;
- pr_info("%s: PTR%d -> PTR%d\n", __func__, ptr_id(sv), ptr_id(*var));
-
- return 0;
-}
-
-/*
- * With more than one item to free in the list, order is not determined and
- * shadow_dtor will not be passed to shadow_free_all() which would make the
- * test fail. (see pass 6)
- */
-static void shadow_dtor(void *obj, void *shadow_data)
-{
- int **sv = shadow_data;
-
- pr_info("%s(obj=PTR%d, shadow_data=PTR%d)\n",
- __func__, ptr_id(obj), ptr_id(sv));
-}
-
-/* number of objects we simulate that need shadow vars */
-#define NUM_OBJS 3
-
-/* dynamically created obj fields have the following shadow var id values */
-#define SV_ID1 0x1234
-#define SV_ID2 0x1235
-
-/*
- * The main test case adds/removes new fields (shadow var) to each of these
- * test structure instances. The last group of fields in the struct represent
- * the idea that shadow variables may be added and removed to and from the
- * struct during execution.
- */
-struct test_object {
- /* add anything here below and avoid to define an empty struct */
- struct shadow_ptr sp;
-
- /* these represent shadow vars added and removed with SV_ID{1,2} */
- /* char nfield1; */
- /* int nfield2; */
-};
-
-static int test_klp_shadow_vars_init(void)
-{
- struct test_object objs[NUM_OBJS];
- char nfields1[NUM_OBJS], *pnfields1[NUM_OBJS], **sv1[NUM_OBJS];
- char *pndup[NUM_OBJS];
- int nfields2[NUM_OBJS], *pnfields2[NUM_OBJS], **sv2[NUM_OBJS];
- void **sv;
- int ret;
- int i;
-
- ptr_id(NULL);
-
- /*
- * With an empty shadow variable hash table, expect not to find
- * any matches.
- */
- sv = shadow_get(&objs[0], SV_ID1);
- if (!sv)
- pr_info(" got expected NULL result\n");
-
- /* pass 1: init & alloc a char+int pair of svars for each objs */
- for (i = 0; i < NUM_OBJS; i++) {
- pnfields1[i] = &nfields1[i];
- ptr_id(pnfields1[i]);
-
- if (i % 2) {
- sv1[i] = shadow_alloc(&objs[i], SV_ID1,
- sizeof(pnfields1[i]), GFP_KERNEL,
- shadow_ctor, &pnfields1[i]);
- } else {
- sv1[i] = shadow_get_or_alloc(&objs[i], SV_ID1,
- sizeof(pnfields1[i]), GFP_KERNEL,
- shadow_ctor, &pnfields1[i]);
- }
- if (!sv1[i]) {
- ret = -ENOMEM;
- goto out;
- }
-
- pnfields2[i] = &nfields2[i];
- ptr_id(pnfields2[i]);
- sv2[i] = shadow_alloc(&objs[i], SV_ID2, sizeof(pnfields2[i]),
- GFP_KERNEL, shadow_ctor, &pnfields2[i]);
- if (!sv2[i]) {
- ret = -ENOMEM;
- goto out;
- }
- }
-
- /* pass 2: verify we find allocated svars and where they point to */
- for (i = 0; i < NUM_OBJS; i++) {
- /* check the "char" svar for all objects */
- sv = shadow_get(&objs[i], SV_ID1);
- if (!sv) {
- ret = -EINVAL;
- goto out;
- }
- if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv1[i]), ptr_id(*sv1[i]));
-
- /* check the "int" svar for all objects */
- sv = shadow_get(&objs[i], SV_ID2);
- if (!sv) {
- ret = -EINVAL;
- goto out;
- }
- if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv2[i]), ptr_id(*sv2[i]));
- }
-
- /* pass 3: verify that 'get_or_alloc' returns already allocated svars */
- for (i = 0; i < NUM_OBJS; i++) {
- pndup[i] = &nfields1[i];
- ptr_id(pndup[i]);
-
- sv = shadow_get_or_alloc(&objs[i], SV_ID1, sizeof(pndup[i]),
- GFP_KERNEL, shadow_ctor, &pndup[i]);
- if (!sv) {
- ret = -EINVAL;
- goto out;
- }
- if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv1[i]), ptr_id(*sv1[i]));
- }
-
- /* pass 4: free <objs[*], SV_ID1> pairs of svars, verify removal */
- for (i = 0; i < NUM_OBJS; i++) {
- shadow_free(&objs[i], SV_ID1, shadow_dtor); /* 'char' pairs */
- sv = shadow_get(&objs[i], SV_ID1);
- if (!sv)
- pr_info(" got expected NULL result\n");
- }
-
- /* pass 5: check we still find <objs[*], SV_ID2> svar pairs */
- for (i = 0; i < NUM_OBJS; i++) {
- sv = shadow_get(&objs[i], SV_ID2); /* 'int' pairs */
- if (!sv) {
- ret = -EINVAL;
- goto out;
- }
- if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv2[i]), ptr_id(*sv2[i]));
- }
-
- /* pass 6: free all the <objs[*], SV_ID2> svar pairs too. */
- shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
- for (i = 0; i < NUM_OBJS; i++) {
- sv = shadow_get(&objs[i], SV_ID2);
- if (!sv)
- pr_info(" got expected NULL result\n");
- }
-
- free_ptr_list();
-
- return 0;
-out:
- shadow_free_all(SV_ID1, NULL); /* 'char' pairs */
- shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
- free_ptr_list();
-
- return ret;
-}
-
-static void test_klp_shadow_vars_exit(void)
-{
-}
-
-module_init(test_klp_shadow_vars_init);
-module_exit(test_klp_shadow_vars_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: shadow variables");
diff --git a/lib/livepatch/test_klp_state.c b/lib/livepatch/test_klp_state.c
deleted file mode 100644
index 57a4253acb01..000000000000
--- a/lib/livepatch/test_klp_state.c
+++ /dev/null
@@ -1,162 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2019 SUSE
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/printk.h>
-#include <linux/livepatch.h>
-
-#define CONSOLE_LOGLEVEL_STATE 1
-/* Version 1 does not support migration. */
-#define CONSOLE_LOGLEVEL_STATE_VERSION 1
-
-static const char *const module_state[] = {
- [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
- [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
- [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
- [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
-};
-
-static void callback_info(const char *callback, struct klp_object *obj)
-{
- if (obj->mod)
- pr_info("%s: %s -> %s\n", callback, obj->mod->name,
- module_state[obj->mod->state]);
- else
- pr_info("%s: vmlinux\n", callback);
-}
-
-static struct klp_patch patch;
-
-static int allocate_loglevel_state(void)
-{
- struct klp_state *loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return -EINVAL;
-
- loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL);
- if (!loglevel_state->data)
- return -ENOMEM;
-
- pr_info("%s: allocating space to store console_loglevel\n",
- __func__);
- return 0;
-}
-
-static void fix_console_loglevel(void)
-{
- struct klp_state *loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: fixing console_loglevel\n", __func__);
- *(int *)loglevel_state->data = console_loglevel;
- console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
-}
-
-static void restore_console_loglevel(void)
-{
- struct klp_state *loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: restoring console_loglevel\n", __func__);
- console_loglevel = *(int *)loglevel_state->data;
-}
-
-static void free_loglevel_state(void)
-{
- struct klp_state *loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: freeing space for the stored console_loglevel\n",
- __func__);
- kfree(loglevel_state->data);
-}
-
-/* Executed on object patching (ie, patch enablement) */
-static int pre_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- return allocate_loglevel_state();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- fix_console_loglevel();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void pre_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- restore_console_loglevel();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- free_loglevel_state();
-}
-
-static struct klp_func no_funcs[] = {
- {}
-};
-
-static struct klp_object objs[] = {
- {
- .name = NULL, /* vmlinux */
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, { }
-};
-
-static struct klp_state states[] = {
- {
- .id = CONSOLE_LOGLEVEL_STATE,
- .version = CONSOLE_LOGLEVEL_STATE_VERSION,
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
- .states = states,
- .replace = true,
-};
-
-static int test_klp_callbacks_demo_init(void)
-{
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_callbacks_demo_exit(void)
-{
-}
-
-module_init(test_klp_callbacks_demo_init);
-module_exit(test_klp_callbacks_demo_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>");
-MODULE_DESCRIPTION("Livepatch test: system state modification");
diff --git a/lib/livepatch/test_klp_state2.c b/lib/livepatch/test_klp_state2.c
deleted file mode 100644
index c978ea4d5e67..000000000000
--- a/lib/livepatch/test_klp_state2.c
+++ /dev/null
@@ -1,191 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2019 SUSE
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/printk.h>
-#include <linux/livepatch.h>
-
-#define CONSOLE_LOGLEVEL_STATE 1
-/* Version 2 supports migration. */
-#define CONSOLE_LOGLEVEL_STATE_VERSION 2
-
-static const char *const module_state[] = {
- [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
- [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
- [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
- [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
-};
-
-static void callback_info(const char *callback, struct klp_object *obj)
-{
- if (obj->mod)
- pr_info("%s: %s -> %s\n", callback, obj->mod->name,
- module_state[obj->mod->state]);
- else
- pr_info("%s: vmlinux\n", callback);
-}
-
-static struct klp_patch patch;
-
-static int allocate_loglevel_state(void)
-{
- struct klp_state *loglevel_state, *prev_loglevel_state;
-
- prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
- if (prev_loglevel_state) {
- pr_info("%s: space to store console_loglevel already allocated\n",
- __func__);
- return 0;
- }
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return -EINVAL;
-
- loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL);
- if (!loglevel_state->data)
- return -ENOMEM;
-
- pr_info("%s: allocating space to store console_loglevel\n",
- __func__);
- return 0;
-}
-
-static void fix_console_loglevel(void)
-{
- struct klp_state *loglevel_state, *prev_loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
- if (prev_loglevel_state) {
- pr_info("%s: taking over the console_loglevel change\n",
- __func__);
- loglevel_state->data = prev_loglevel_state->data;
- return;
- }
-
- pr_info("%s: fixing console_loglevel\n", __func__);
- *(int *)loglevel_state->data = console_loglevel;
- console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
-}
-
-static void restore_console_loglevel(void)
-{
- struct klp_state *loglevel_state, *prev_loglevel_state;
-
- prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
- if (prev_loglevel_state) {
- pr_info("%s: passing the console_loglevel change back to the old livepatch\n",
- __func__);
- return;
- }
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: restoring console_loglevel\n", __func__);
- console_loglevel = *(int *)loglevel_state->data;
-}
-
-static void free_loglevel_state(void)
-{
- struct klp_state *loglevel_state, *prev_loglevel_state;
-
- prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
- if (prev_loglevel_state) {
- pr_info("%s: keeping space to store console_loglevel\n",
- __func__);
- return;
- }
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: freeing space for the stored console_loglevel\n",
- __func__);
- kfree(loglevel_state->data);
-}
-
-/* Executed on object patching (ie, patch enablement) */
-static int pre_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- return allocate_loglevel_state();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- fix_console_loglevel();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void pre_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- restore_console_loglevel();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- free_loglevel_state();
-}
-
-static struct klp_func no_funcs[] = {
- {}
-};
-
-static struct klp_object objs[] = {
- {
- .name = NULL, /* vmlinux */
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, { }
-};
-
-static struct klp_state states[] = {
- {
- .id = CONSOLE_LOGLEVEL_STATE,
- .version = CONSOLE_LOGLEVEL_STATE_VERSION,
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
- .states = states,
- .replace = true,
-};
-
-static int test_klp_callbacks_demo_init(void)
-{
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_callbacks_demo_exit(void)
-{
-}
-
-module_init(test_klp_callbacks_demo_init);
-module_exit(test_klp_callbacks_demo_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>");
-MODULE_DESCRIPTION("Livepatch test: system state modification");
diff --git a/lib/livepatch/test_klp_state3.c b/lib/livepatch/test_klp_state3.c
deleted file mode 100644
index 9226579d10c5..000000000000
--- a/lib/livepatch/test_klp_state3.c
+++ /dev/null
@@ -1,5 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2019 SUSE
-
-/* The console loglevel fix is the same in the next cumulative patch. */
-#include "test_klp_state2.c"
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 6f241bb38799..55e1b35bf877 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -1307,8 +1307,8 @@ static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
}
/*
- * mas_node_count() - Check if enough nodes are allocated and request more if
- * there is not enough nodes.
+ * mas_node_count_gfp() - Check if enough nodes are allocated and request more
+ * if there is not enough nodes.
* @mas: The maple state
* @count: The number of nodes needed
* @gfp: the gfp flags
@@ -2271,8 +2271,6 @@ bool mast_spanning_rebalance(struct maple_subtree_state *mast)
struct ma_state l_tmp = *mast->orig_l;
unsigned char depth = 0;
- r_tmp = *mast->orig_r;
- l_tmp = *mast->orig_l;
do {
mas_ascend(mast->orig_r);
mas_ascend(mast->orig_l);
@@ -4290,6 +4288,56 @@ exists:
}
+/**
+ * mas_alloc_cyclic() - Internal call to find somewhere to store an entry
+ * @mas: The maple state.
+ * @startp: Pointer to ID.
+ * @range_lo: Lower bound of range to search.
+ * @range_hi: Upper bound of range to search.
+ * @entry: The entry to store.
+ * @next: Pointer to next ID to allocate.
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * Return: 0 if the allocation succeeded without wrapping, 1 if the
+ * allocation succeeded after wrapping, or -EBUSY if there are no
+ * free entries.
+ */
+int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
+ void *entry, unsigned long range_lo, unsigned long range_hi,
+ unsigned long *next, gfp_t gfp)
+{
+ unsigned long min = range_lo;
+ int ret = 0;
+
+ range_lo = max(min, *next);
+ ret = mas_empty_area(mas, range_lo, range_hi, 1);
+ if ((mas->tree->ma_flags & MT_FLAGS_ALLOC_WRAPPED) && ret == 0) {
+ mas->tree->ma_flags &= ~MT_FLAGS_ALLOC_WRAPPED;
+ ret = 1;
+ }
+ if (ret < 0 && range_lo > min) {
+ ret = mas_empty_area(mas, min, range_hi, 1);
+ if (ret == 0)
+ ret = 1;
+ }
+ if (ret < 0)
+ return ret;
+
+ do {
+ mas_insert(mas, entry);
+ } while (mas_nomem(mas, gfp));
+ if (mas_is_err(mas))
+ return xa_err(mas->node);
+
+ *startp = mas->index;
+ *next = *startp + 1;
+ if (*next == 0)
+ mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED;
+
+ return ret;
+}
+EXPORT_SYMBOL(mas_alloc_cyclic);
+
static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index)
{
retry:
@@ -6443,6 +6491,49 @@ unlock:
}
EXPORT_SYMBOL(mtree_alloc_range);
+/**
+ * mtree_alloc_cyclic() - Find somewhere to store this entry in the tree.
+ * @mt: The maple tree.
+ * @startp: Pointer to ID.
+ * @range_lo: Lower bound of range to search.
+ * @range_hi: Upper bound of range to search.
+ * @entry: The entry to store.
+ * @next: Pointer to next ID to allocate.
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * Finds an empty entry in @mt after @next, stores the new index into
+ * the @id pointer, stores the entry at that index, then updates @next.
+ *
+ * @mt must be initialized with the MT_FLAGS_ALLOC_RANGE flag.
+ *
+ * Context: Any context. Takes and releases the mt.lock. May sleep if
+ * the @gfp flags permit.
+ *
+ * Return: 0 if the allocation succeeded without wrapping, 1 if the
+ * allocation succeeded after wrapping, -ENOMEM if memory could not be
+ * allocated, -EINVAL if @mt cannot be used, or -EBUSY if there are no
+ * free entries.
+ */
+int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long range_lo, unsigned long range_hi,
+ unsigned long *next, gfp_t gfp)
+{
+ int ret;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ if (!mt_is_alloc(mt))
+ return -EINVAL;
+ if (WARN_ON_ONCE(mt_is_reserved(entry)))
+ return -EINVAL;
+ mtree_lock(mt);
+ ret = mas_alloc_cyclic(&mas, startp, entry, range_lo, range_hi,
+ next, gfp);
+ mtree_unlock(mt);
+ return ret;
+}
+EXPORT_SYMBOL(mtree_alloc_cyclic);
+
int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
void *entry, unsigned long size, unsigned long min,
unsigned long max, gfp_t gfp)
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 55a81782e271..191761b1b623 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -22,6 +22,7 @@
#include <linux/export.h>
#include <linux/math.h>
#include <linux/math64.h>
+#include <linux/minmax.h>
#include <linux/log2.h>
/* Not needed on 64bit architectures */
@@ -191,6 +192,20 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
/* can a * b overflow ? */
if (ilog2(a) + ilog2(b) > 62) {
/*
+ * Note that the algorithm after the if block below might lose
+ * some precision and the result is more exact for b > a. So
+ * exchange a and b if a is bigger than b.
+ *
+ * For example with a = 43980465100800, b = 100000000, c = 1000000000
+ * the below calculation doesn't modify b at all because div == 0
+ * and then shift becomes 45 + 26 - 62 = 9 and so the result
+ * becomes 4398035251080. However with a and b swapped the exact
+ * result is calculated (i.e. 4398046510080).
+ */
+ if (a > b)
+ swap(a, b);
+
+ /*
* (b * a) / c is equal to
*
* (b / c) * a +
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index 440aee705ccc..30e00ef0bf2e 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -32,7 +32,7 @@ struct some_bytes {
BUILD_BUG_ON(sizeof(instance.data) != 32); \
for (size_t i = 0; i < sizeof(instance.data); i++) { \
KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \
- "line %d: '%s' not initialized to 0x%02x @ %d (saw 0x%02x)\n", \
+ "line %d: '%s' not initialized to 0x%02x @ %zu (saw 0x%02x)\n", \
__LINE__, #instance, v, i, instance.data[i]); \
} \
} while (0)
@@ -41,7 +41,7 @@ struct some_bytes {
BUILD_BUG_ON(sizeof(one) != sizeof(two)); \
for (size_t i = 0; i < sizeof(one); i++) { \
KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \
- "line %d: %s.data[%d] (0x%02x) != %s.data[%d] (0x%02x)\n", \
+ "line %d: %s.data[%zu] (0x%02x) != %s.data[%zu] (0x%02x)\n", \
__LINE__, #one, i, one.data[i], #two, i, two.data[i]); \
} \
kunit_info(test, "ok: " TEST_OP "() " name "\n"); \
diff --git a/lib/nlattr.c b/lib/nlattr.c
index ed2ab43e1b22..be9c576b6e2d 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -30,6 +30,8 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
[NLA_S16] = sizeof(s16),
[NLA_S32] = sizeof(s32),
[NLA_S64] = sizeof(s64),
+ [NLA_BE16] = sizeof(__be16),
+ [NLA_BE32] = sizeof(__be32),
};
static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
@@ -43,6 +45,8 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
[NLA_S16] = sizeof(s16),
[NLA_S32] = sizeof(s32),
[NLA_S64] = sizeof(s64),
+ [NLA_BE16] = sizeof(__be16),
+ [NLA_BE32] = sizeof(__be32),
};
/*
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index c527f6b75789..65e8a72a83bf 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -258,25 +258,84 @@ DEFINE_TEST_ARRAY(s64) = {
\
_of = check_ ## op ## _overflow(a, b, &_r); \
KUNIT_EXPECT_EQ_MSG(test, _of, of, \
- "expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \
+ "expected check "fmt" "sym" "fmt" to%s overflow (type %s)\n", \
a, b, of ? "" : " not", #t); \
KUNIT_EXPECT_EQ_MSG(test, _r, r, \
- "expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
+ "expected check "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
a, b, r, _r, #t); \
/* Check for internal macro side-effects. */ \
_of = check_ ## op ## _overflow(_a_orig++, _b_orig++, &_r); \
- KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, "Unexpected " #op " macro side-effect!\n"); \
- KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, "Unexpected " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, \
+ "Unexpected check " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \
+ "Unexpected check " #op " macro side-effect!\n"); \
+ \
+ _r = wrapping_ ## op(t, a, b); \
+ KUNIT_EXPECT_TRUE_MSG(test, _r == r, \
+ "expected wrap "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
+ a, b, r, _r, #t); \
+ /* Check for internal macro side-effects. */ \
+ _a_orig = a; \
+ _b_orig = b; \
+ _r = wrapping_ ## op(t, _a_orig++, _b_orig++); \
+ KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, \
+ "Unexpected wrap " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \
+ "Unexpected wrap " #op " macro side-effect!\n"); \
+} while (0)
+
+static int global_counter;
+static void bump_counter(void)
+{
+ global_counter++;
+}
+
+static int get_index(void)
+{
+ volatile int index = 0;
+ bump_counter();
+ return index;
+}
+
+#define check_self_op(fmt, op, sym, a, b) do { \
+ typeof(a + 0) _a = a; \
+ typeof(b + 0) _b = b; \
+ typeof(a + 0) _a_sym = a; \
+ typeof(a + 0) _a_orig[1] = { a }; \
+ typeof(b + 0) _b_orig = b; \
+ typeof(b + 0) _b_bump = b + 1; \
+ typeof(a + 0) _r; \
+ \
+ _a_sym sym _b; \
+ _r = wrapping_ ## op(_a, _b); \
+ KUNIT_EXPECT_TRUE_MSG(test, _r == _a_sym, \
+ "expected "fmt" "#op" "fmt" == "fmt", got "fmt"\n", \
+ a, b, _a_sym, _r); \
+ KUNIT_EXPECT_TRUE_MSG(test, _a == _a_sym, \
+ "expected "fmt" "#op" "fmt" == "fmt", got "fmt"\n", \
+ a, b, _a_sym, _a); \
+ /* Check for internal macro side-effects. */ \
+ global_counter = 0; \
+ wrapping_ ## op(_a_orig[get_index()], _b_orig++); \
+ KUNIT_EXPECT_EQ_MSG(test, global_counter, 1, \
+ "Unexpected wrapping_" #op " macro side-effect on arg1!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \
+ "Unexpected wrapping_" #op " macro side-effect on arg2!\n"); \
} while (0)
#define DEFINE_TEST_FUNC_TYPED(n, t, fmt) \
static void do_test_ ## n(struct kunit *test, const struct test_ ## n *p) \
{ \
+ /* check_{add,sub,mul}_overflow() and wrapping_{add,sub,mul} */ \
check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \
check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \
check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
+ /* wrapping_assign_{add,sub}() */ \
+ check_self_op(fmt, assign_add, +=, p->a, p->b); \
+ check_self_op(fmt, assign_add, +=, p->b, p->a); \
+ check_self_op(fmt, assign_sub, -=, p->a, p->b); \
} \
\
static void n ## _overflow_test(struct kunit *test) { \
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
deleted file mode 100644
index ce39ce9f3526..000000000000
--- a/lib/pci_iomap.c
+++ /dev/null
@@ -1,180 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Implement the default iomap interfaces
- *
- * (C) Copyright 2004 Linus Torvalds
- */
-#include <linux/pci.h>
-#include <linux/io.h>
-
-#include <linux/export.h>
-
-#ifdef CONFIG_PCI
-/**
- * pci_iomap_range - create a virtual mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @offset: map memory at the given offset in BAR
- * @maxlen: max length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR from offset to the end, pass %0 here.
- * */
-void __iomem *pci_iomap_range(struct pci_dev *dev,
- int bar,
- unsigned long offset,
- unsigned long maxlen)
-{
- resource_size_t start = pci_resource_start(dev, bar);
- resource_size_t len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
-
- if (len <= offset || !start)
- return NULL;
- len -= offset;
- start += offset;
- if (maxlen && len > maxlen)
- len = maxlen;
- if (flags & IORESOURCE_IO)
- return __pci_ioport_map(dev, start, len);
- if (flags & IORESOURCE_MEM)
- return ioremap(start, len);
- /* What? */
- return NULL;
-}
-EXPORT_SYMBOL(pci_iomap_range);
-
-/**
- * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @offset: map memory at the given offset in BAR
- * @maxlen: max length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way. When possible write combining
- * is used.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR from offset to the end, pass %0 here.
- * */
-void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
- int bar,
- unsigned long offset,
- unsigned long maxlen)
-{
- resource_size_t start = pci_resource_start(dev, bar);
- resource_size_t len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
-
-
- if (flags & IORESOURCE_IO)
- return NULL;
-
- if (len <= offset || !start)
- return NULL;
-
- len -= offset;
- start += offset;
- if (maxlen && len > maxlen)
- len = maxlen;
-
- if (flags & IORESOURCE_MEM)
- return ioremap_wc(start, len);
-
- /* What? */
- return NULL;
-}
-EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
-
-/**
- * pci_iomap - create a virtual mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @maxlen: length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR without checking for its length first, pass %0 here.
- * */
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
- return pci_iomap_range(dev, bar, 0, maxlen);
-}
-EXPORT_SYMBOL(pci_iomap);
-
-/**
- * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @maxlen: length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way. When possible write combining
- * is used.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR without checking for its length first, pass %0 here.
- * */
-void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
- return pci_iomap_wc_range(dev, bar, 0, maxlen);
-}
-EXPORT_SYMBOL_GPL(pci_iomap_wc);
-
-/*
- * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
- * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
- * the different IOMAP ranges.
- *
- * But if the architecture does not use the generic iomap code, and if
- * it has _not_ defined it's own private pci_iounmap function, we define
- * it here.
- *
- * NOTE! This default implementation assumes that if the architecture
- * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
- * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
- * and does not need unmapping with 'ioport_unmap()'.
- *
- * If you have different rules for your architecture, you need to
- * implement your own pci_iounmap() that knows the rules for where
- * and how IO vs MEM get mapped.
- *
- * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
- * from legacy <asm-generic/io.h> header file behavior. In particular,
- * it would seem to make sense to do the iounmap(p) for the non-IO-space
- * case here regardless, but that's not what the old header file code
- * did. Probably incorrectly, but this is meant to be bug-for-bug
- * compatible.
- */
-#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
-
-void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
-#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
- uintptr_t start = (uintptr_t) PCI_IOBASE;
- uintptr_t addr = (uintptr_t) p;
-
- if (addr >= start && addr < start + IO_SPACE_LIMIT)
- return;
- iounmap(p);
-#endif
-}
-EXPORT_SYMBOL(pci_iounmap);
-
-#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
-
-#endif /* CONFIG_PCI */
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 1c5420ff254e..385a94aa0b99 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -21,7 +21,7 @@ altivec_flags += -isystem $(shell $(CC) -print-file-name=include)
ifdef CONFIG_CC_IS_CLANG
# clang ppc port does not yet support -maltivec when -msoft-float is
# enabled. A future release of clang will resolve this
-# https://bugs.llvm.org/show_bug.cgi?id=31177
+# https://llvm.org/pr31177
CFLAGS_REMOVE_altivec1.o += -msoft-float
CFLAGS_REMOVE_altivec2.o += -msoft-float
CFLAGS_REMOVE_altivec4.o += -msoft-float
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index cd0b9e95f499..863e2d320938 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -12,15 +12,14 @@
*/
#include <linux/raid/pq.h>
-#include <asm/fpu/api.h>
-#include <asm/vx-insn.h>
+#include <asm/fpu.h>
#define NSIZE 16
-static inline void LOAD_CONST(void)
+static __always_inline void LOAD_CONST(void)
{
- asm volatile("VREPIB %v24,7");
- asm volatile("VREPIB %v25,0x1d");
+ fpu_vrepib(24, 0x07);
+ fpu_vrepib(25, 0x1d);
}
/*
@@ -28,10 +27,7 @@ static inline void LOAD_CONST(void)
* vector register y left by 1 bit and stores the result in
* vector register x.
*/
-static inline void SHLBYTE(int x, int y)
-{
- asm volatile ("VAB %0,%1,%1" : : "i" (x), "i" (y));
-}
+#define SHLBYTE(x, y) fpu_vab(x, y, y)
/*
* For each of the 16 bytes in the vector register y the MASK()
@@ -39,49 +35,17 @@ static inline void SHLBYTE(int x, int y)
* or 0x00 if the high bit is 0. The result is stored in vector
* register x.
*/
-static inline void MASK(int x, int y)
-{
- asm volatile ("VESRAVB %0,%1,24" : : "i" (x), "i" (y));
-}
-
-static inline void AND(int x, int y, int z)
-{
- asm volatile ("VN %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
-}
-
-static inline void XOR(int x, int y, int z)
-{
- asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
-}
+#define MASK(x, y) fpu_vesravb(x, y, 24)
-static inline void LOAD_DATA(int x, u8 *ptr)
-{
- typedef struct { u8 _[16 * $#]; } addrtype;
- register addrtype *__ptr asm("1") = (addrtype *) ptr;
-
- asm volatile ("VLM %2,%3,0,%1"
- : : "m" (*__ptr), "a" (__ptr), "i" (x),
- "i" (x + $# - 1));
-}
-
-static inline void STORE_DATA(int x, u8 *ptr)
-{
- typedef struct { u8 _[16 * $#]; } addrtype;
- register addrtype *__ptr asm("1") = (addrtype *) ptr;
-
- asm volatile ("VSTM %2,%3,0,1"
- : "=m" (*__ptr) : "a" (__ptr), "i" (x),
- "i" (x + $# - 1));
-}
-
-static inline void COPY_VEC(int x, int y)
-{
- asm volatile ("VLR %0,%1" : : "i" (x), "i" (y));
-}
+#define AND(x, y, z) fpu_vn(x, y, z)
+#define XOR(x, y, z) fpu_vx(x, y, z)
+#define LOAD_DATA(x, ptr) fpu_vlm(x, x + $# - 1, ptr)
+#define STORE_DATA(x, ptr) fpu_vstm(x, x + $# - 1, ptr)
+#define COPY_VEC(x, y) fpu_vlr(x, y)
static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
- struct kernel_fpu vxstate;
+ DECLARE_KERNEL_FPU_ONSTACK32(vxstate);
u8 **dptr, *p, *q;
int d, z, z0;
@@ -114,7 +78,7 @@ static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
- struct kernel_fpu vxstate;
+ DECLARE_KERNEL_FPU_ONSTACK32(vxstate);
u8 **dptr, *p, *q;
int d, z, z0;
diff --git a/lib/sort.c b/lib/sort.c
index b399bf10d675..a0509088f82a 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -215,6 +215,7 @@ void sort_r(void *base, size_t num, size_t size,
/* pre-scale counters for performance */
size_t n = num * size, a = (num/2) * size;
const unsigned int lsbit = size & -size; /* Used to find parent */
+ size_t shift = 0;
if (!a) /* num < 2 || size == 0 */
return;
@@ -242,12 +243,21 @@ void sort_r(void *base, size_t num, size_t size,
for (;;) {
size_t b, c, d;
- if (a) /* Building heap: sift down --a */
- a -= size;
- else if (n -= size) /* Sorting: Extract root to --n */
+ if (a) /* Building heap: sift down a */
+ a -= size << shift;
+ else if (n > 3 * size) { /* Sorting: Extract two largest elements */
+ n -= size;
do_swap(base, base + n, size, swap_func, priv);
- else /* Sort complete */
+ shift = do_cmp(base + size, base + 2 * size, cmp_func, priv) <= 0;
+ a = size << shift;
+ n -= size;
+ do_swap(base + a, base + n, size, swap_func, priv);
+ } else if (n > size) { /* Sorting: Extract root */
+ n -= size;
+ do_swap(base, base + n, size, swap_func, priv);
+ } else { /* Sort complete */
break;
+ }
/*
* Sift element at "a" down into heap. This is the
@@ -262,7 +272,7 @@ void sort_r(void *base, size_t num, size_t size,
* average, 3/4 worst-case.)
*/
for (b = a; c = 2*b + size, (d = c + size) < n;)
- b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d;
+ b = do_cmp(base + c, base + d, cmp_func, priv) > 0 ? c : d;
if (d == n) /* Special case last leaf with no sibling */
b = c;
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 5caa1f566553..af6cc19a2003 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -22,6 +22,7 @@
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
+#include <linux/poison.h>
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
@@ -35,66 +36,11 @@
#include <linux/memblock.h>
#include <linux/kasan-enabled.h>
-#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
-
-#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
-#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
-#define DEPOT_STACK_ALIGN 4
-#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
-#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
- STACK_DEPOT_EXTRA_BITS)
-#if IS_ENABLED(CONFIG_KMSAN) && CONFIG_STACKDEPOT_MAX_FRAMES >= 32
-/*
- * KMSAN is frequently used in fuzzing scenarios and thus saves a lot of stack
- * traces. As KMSAN does not support evicting stack traces from the stack
- * depot, the stack depot capacity might be reached quickly with large stack
- * records. Adjust the maximum number of stack depot pools for this case.
- */
-#define DEPOT_POOLS_CAP (8192 * (CONFIG_STACKDEPOT_MAX_FRAMES / 16))
-#else
#define DEPOT_POOLS_CAP 8192
-#endif
+/* The pool_index is offset by 1 so the first record does not have a 0 handle. */
#define DEPOT_MAX_POOLS \
- (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
- (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
-
-/* Compact structure that stores a reference to a stack. */
-union handle_parts {
- depot_stack_handle_t handle;
- struct {
- u32 pool_index : DEPOT_POOL_INDEX_BITS;
- u32 offset : DEPOT_OFFSET_BITS;
- u32 extra : STACK_DEPOT_EXTRA_BITS;
- };
-};
-
-struct stack_record {
- struct list_head hash_list; /* Links in the hash table */
- u32 hash; /* Hash in hash table */
- u32 size; /* Number of stored frames */
- union handle_parts handle; /* Constant after initialization */
- refcount_t count;
- union {
- unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
- struct {
- /*
- * An important invariant of the implementation is to
- * only place a stack record onto the freelist iff its
- * refcount is zero. Because stack records with a zero
- * refcount are never considered as valid, it is safe to
- * union @entries and freelist management state below.
- * Conversely, as soon as an entry is off the freelist
- * and its refcount becomes non-zero, the below must not
- * be accessed until being placed back on the freelist.
- */
- struct list_head free_list; /* Links in the freelist */
- unsigned long rcu_state; /* RCU cookie */
- };
- };
-};
-
-#define DEPOT_STACK_RECORD_SIZE \
- ALIGN(sizeof(struct stack_record), 1 << DEPOT_STACK_ALIGN)
+ (((1LL << (DEPOT_POOL_INDEX_BITS)) - 1 < DEPOT_POOLS_CAP) ? \
+ (1LL << (DEPOT_POOL_INDEX_BITS)) - 1 : DEPOT_POOLS_CAP)
static bool stack_depot_disabled;
static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
@@ -121,32 +67,31 @@ static void *stack_pools[DEPOT_MAX_POOLS];
static void *new_pool;
/* Number of pools in stack_pools. */
static int pools_num;
+/* Offset to the unused space in the currently used pool. */
+static size_t pool_offset = DEPOT_POOL_SIZE;
/* Freelist of stack records within stack_pools. */
static LIST_HEAD(free_stacks);
-/*
- * Stack depot tries to keep an extra pool allocated even before it runs out
- * of space in the currently used pool. This flag marks whether this extra pool
- * needs to be allocated. It has the value 0 when either an extra pool is not
- * yet allocated or if the limit on the number of pools is reached.
- */
-static bool new_pool_required = true;
/* The lock must be held when performing pool or freelist modifications. */
static DEFINE_RAW_SPINLOCK(pool_lock);
/* Statistics counters for debugfs. */
enum depot_counter_id {
- DEPOT_COUNTER_ALLOCS,
- DEPOT_COUNTER_FREES,
- DEPOT_COUNTER_INUSE,
+ DEPOT_COUNTER_REFD_ALLOCS,
+ DEPOT_COUNTER_REFD_FREES,
+ DEPOT_COUNTER_REFD_INUSE,
DEPOT_COUNTER_FREELIST_SIZE,
+ DEPOT_COUNTER_PERSIST_COUNT,
+ DEPOT_COUNTER_PERSIST_BYTES,
DEPOT_COUNTER_COUNT,
};
static long counters[DEPOT_COUNTER_COUNT];
static const char *const counter_names[] = {
- [DEPOT_COUNTER_ALLOCS] = "allocations",
- [DEPOT_COUNTER_FREES] = "frees",
- [DEPOT_COUNTER_INUSE] = "in_use",
+ [DEPOT_COUNTER_REFD_ALLOCS] = "refcounted_allocations",
+ [DEPOT_COUNTER_REFD_FREES] = "refcounted_frees",
+ [DEPOT_COUNTER_REFD_INUSE] = "refcounted_in_use",
[DEPOT_COUNTER_FREELIST_SIZE] = "freelist_size",
+ [DEPOT_COUNTER_PERSIST_COUNT] = "persistent_count",
+ [DEPOT_COUNTER_PERSIST_BYTES] = "persistent_bytes",
};
static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT);
@@ -294,48 +239,52 @@ out_unlock:
EXPORT_SYMBOL_GPL(stack_depot_init);
/*
- * Initializes new stack depot @pool, release all its entries to the freelist,
- * and update the list of pools.
+ * Initializes new stack pool, and updates the list of pools.
*/
-static void depot_init_pool(void *pool)
+static bool depot_init_pool(void **prealloc)
{
- int offset;
-
lockdep_assert_held(&pool_lock);
- /* Initialize handles and link stack records into the freelist. */
- for (offset = 0; offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
- offset += DEPOT_STACK_RECORD_SIZE) {
- struct stack_record *stack = pool + offset;
-
- stack->handle.pool_index = pools_num;
- stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
- stack->handle.extra = 0;
-
- /*
- * Stack traces of size 0 are never saved, and we can simply use
- * the size field as an indicator if this is a new unused stack
- * record in the freelist.
- */
- stack->size = 0;
+ if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
+ /* Bail out if we reached the pool limit. */
+ WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */
+ WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */
+ WARN_ONCE(1, "Stack depot reached limit capacity");
+ return false;
+ }
- INIT_LIST_HEAD(&stack->hash_list);
- /*
- * Add to the freelist front to prioritize never-used entries:
- * required in case there are entries in the freelist, but their
- * RCU cookie still belongs to the current RCU grace period
- * (there can still be concurrent readers).
- */
- list_add(&stack->free_list, &free_stacks);
- counters[DEPOT_COUNTER_FREELIST_SIZE]++;
+ if (!new_pool && *prealloc) {
+ /* We have preallocated memory, use it. */
+ WRITE_ONCE(new_pool, *prealloc);
+ *prealloc = NULL;
}
+ if (!new_pool)
+ return false; /* new_pool and *prealloc are NULL */
+
/* Save reference to the pool to be used by depot_fetch_stack(). */
- stack_pools[pools_num] = pool;
+ stack_pools[pools_num] = new_pool;
+
+ /*
+ * Stack depot tries to keep an extra pool allocated even before it runs
+ * out of space in the currently used pool.
+ *
+ * To indicate that a new preallocation is needed new_pool is reset to
+ * NULL; do not reset to NULL if we have reached the maximum number of
+ * pools.
+ */
+ if (pools_num < DEPOT_MAX_POOLS)
+ WRITE_ONCE(new_pool, NULL);
+ else
+ WRITE_ONCE(new_pool, STACK_DEPOT_POISON);
/* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */
WRITE_ONCE(pools_num, pools_num + 1);
ASSERT_EXCLUSIVE_WRITER(pools_num);
+
+ pool_offset = 0;
+
+ return true;
}
/* Keeps the preallocated memory to be used for a new stack depot pool. */
@@ -347,63 +296,51 @@ static void depot_keep_new_pool(void **prealloc)
* If a new pool is already saved or the maximum number of
* pools is reached, do not use the preallocated memory.
*/
- if (!new_pool_required)
+ if (new_pool)
return;
- /*
- * Use the preallocated memory for the new pool
- * as long as we do not exceed the maximum number of pools.
- */
- if (pools_num < DEPOT_MAX_POOLS) {
- new_pool = *prealloc;
- *prealloc = NULL;
- }
-
- /*
- * At this point, either a new pool is kept or the maximum
- * number of pools is reached. In either case, take note that
- * keeping another pool is not required.
- */
- WRITE_ONCE(new_pool_required, false);
+ WRITE_ONCE(new_pool, *prealloc);
+ *prealloc = NULL;
}
/*
- * Try to initialize a new stack depot pool from either a previous or the
- * current pre-allocation, and release all its entries to the freelist.
+ * Try to initialize a new stack record from the current pool, a cached pool, or
+ * the current pre-allocation.
*/
-static bool depot_try_init_pool(void **prealloc)
+static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
{
+ struct stack_record *stack;
+ void *current_pool;
+ u32 pool_index;
+
lockdep_assert_held(&pool_lock);
- /* Check if we have a new pool saved and use it. */
- if (new_pool) {
- depot_init_pool(new_pool);
- new_pool = NULL;
+ if (pool_offset + size > DEPOT_POOL_SIZE) {
+ if (!depot_init_pool(prealloc))
+ return NULL;
+ }
- /* Take note that we might need a new new_pool. */
- if (pools_num < DEPOT_MAX_POOLS)
- WRITE_ONCE(new_pool_required, true);
+ if (WARN_ON_ONCE(pools_num < 1))
+ return NULL;
+ pool_index = pools_num - 1;
+ current_pool = stack_pools[pool_index];
+ if (WARN_ON_ONCE(!current_pool))
+ return NULL;
- return true;
- }
+ stack = current_pool + pool_offset;
- /* Bail out if we reached the pool limit. */
- if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
- WARN_ONCE(1, "Stack depot reached limit capacity");
- return false;
- }
+ /* Pre-initialize handle once. */
+ stack->handle.pool_index = pool_index + 1;
+ stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
+ stack->handle.extra = 0;
+ INIT_LIST_HEAD(&stack->hash_list);
- /* Check if we have preallocated memory and use it. */
- if (*prealloc) {
- depot_init_pool(*prealloc);
- *prealloc = NULL;
- return true;
- }
+ pool_offset += size;
- return false;
+ return stack;
}
-/* Try to find next free usable entry. */
+/* Try to find next free usable entry from the freelist. */
static struct stack_record *depot_pop_free(void)
{
struct stack_record *stack;
@@ -420,7 +357,7 @@ static struct stack_record *depot_pop_free(void)
* check the first entry.
*/
stack = list_first_entry(&free_stacks, struct stack_record, free_list);
- if (stack->size && !poll_state_synchronize_rcu(stack->rcu_state))
+ if (!poll_state_synchronize_rcu(stack->rcu_state))
return NULL;
list_del(&stack->free_list);
@@ -429,48 +366,73 @@ static struct stack_record *depot_pop_free(void)
return stack;
}
+static inline size_t depot_stack_record_size(struct stack_record *s, unsigned int nr_entries)
+{
+ const size_t used = flex_array_size(s, entries, nr_entries);
+ const size_t unused = sizeof(s->entries) - used;
+
+ WARN_ON_ONCE(sizeof(s->entries) < used);
+
+ return ALIGN(sizeof(struct stack_record) - unused, 1 << DEPOT_STACK_ALIGN);
+}
+
/* Allocates a new stack in a stack depot pool. */
static struct stack_record *
-depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
+depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc)
{
- struct stack_record *stack;
+ struct stack_record *stack = NULL;
+ size_t record_size;
lockdep_assert_held(&pool_lock);
/* This should already be checked by public API entry points. */
- if (WARN_ON_ONCE(!size))
+ if (WARN_ON_ONCE(!nr_entries))
return NULL;
- /* Check if we have a stack record to save the stack trace. */
- stack = depot_pop_free();
- if (!stack) {
- /* No usable entries on the freelist - try to refill the freelist. */
- if (!depot_try_init_pool(prealloc))
- return NULL;
+ /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
+ if (nr_entries > CONFIG_STACKDEPOT_MAX_FRAMES)
+ nr_entries = CONFIG_STACKDEPOT_MAX_FRAMES;
+
+ if (flags & STACK_DEPOT_FLAG_GET) {
+ /*
+ * Evictable entries have to allocate the max. size so they may
+ * safely be re-used by differently sized allocations.
+ */
+ record_size = depot_stack_record_size(stack, CONFIG_STACKDEPOT_MAX_FRAMES);
stack = depot_pop_free();
- if (WARN_ON(!stack))
- return NULL;
+ } else {
+ record_size = depot_stack_record_size(stack, nr_entries);
}
- /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
- if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
- size = CONFIG_STACKDEPOT_MAX_FRAMES;
+ if (!stack) {
+ stack = depot_pop_free_pool(prealloc, record_size);
+ if (!stack)
+ return NULL;
+ }
/* Save the stack trace. */
stack->hash = hash;
- stack->size = size;
- /* stack->handle is already filled in by depot_init_pool(). */
- refcount_set(&stack->count, 1);
- memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
+ stack->size = nr_entries;
+ /* stack->handle is already filled in by depot_pop_free_pool(). */
+ memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries));
+
+ if (flags & STACK_DEPOT_FLAG_GET) {
+ refcount_set(&stack->count, 1);
+ counters[DEPOT_COUNTER_REFD_ALLOCS]++;
+ counters[DEPOT_COUNTER_REFD_INUSE]++;
+ } else {
+ /* Warn on attempts to switch to refcounting this entry. */
+ refcount_set(&stack->count, REFCOUNT_SATURATED);
+ counters[DEPOT_COUNTER_PERSIST_COUNT]++;
+ counters[DEPOT_COUNTER_PERSIST_BYTES] += record_size;
+ }
/*
* Let KMSAN know the stored stack record is initialized. This shall
* prevent false positive reports if instrumented code accesses it.
*/
- kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE);
+ kmsan_unpoison_memory(stack, record_size);
- counters[DEPOT_COUNTER_ALLOCS]++;
- counters[DEPOT_COUNTER_INUSE]++;
return stack;
}
@@ -479,18 +441,19 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
const int pools_num_cached = READ_ONCE(pools_num);
union handle_parts parts = { .handle = handle };
void *pool;
+ u32 pool_index = parts.pool_index - 1;
size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
lockdep_assert_not_held(&pool_lock);
- if (parts.pool_index > pools_num_cached) {
+ if (pool_index >= pools_num_cached) {
WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
- parts.pool_index, pools_num_cached, handle);
+ pool_index, pools_num_cached, handle);
return NULL;
}
- pool = stack_pools[parts.pool_index];
+ pool = stack_pools[pool_index];
if (WARN_ON(!pool))
return NULL;
@@ -538,8 +501,8 @@ static void depot_free_stack(struct stack_record *stack)
list_add_tail(&stack->free_list, &free_stacks);
counters[DEPOT_COUNTER_FREELIST_SIZE]++;
- counters[DEPOT_COUNTER_FREES]++;
- counters[DEPOT_COUNTER_INUSE]--;
+ counters[DEPOT_COUNTER_REFD_FREES]++;
+ counters[DEPOT_COUNTER_REFD_INUSE]--;
printk_deferred_exit();
raw_spin_unlock_irqrestore(&pool_lock, flags);
@@ -660,7 +623,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
* Allocate memory for a new pool if required now:
* we won't be able to do that under the lock.
*/
- if (unlikely(can_alloc && READ_ONCE(new_pool_required))) {
+ if (unlikely(can_alloc && !READ_ONCE(new_pool))) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
@@ -681,7 +644,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
if (!found) {
struct stack_record *new =
- depot_alloc_stack(entries, nr_entries, hash, &prealloc);
+ depot_alloc_stack(entries, nr_entries, hash, depot_flags, &prealloc);
if (new) {
/*
@@ -724,6 +687,14 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
}
EXPORT_SYMBOL_GPL(stack_depot_save);
+struct stack_record *__stack_depot_get_stack_record(depot_stack_handle_t handle)
+{
+ if (!handle)
+ return NULL;
+
+ return depot_fetch_stack(handle);
+}
+
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c
index 05947a2feb93..3bc14d1ee816 100644
--- a/lib/stackinit_kunit.c
+++ b/lib/stackinit_kunit.c
@@ -63,7 +63,16 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
#define FETCH_ARG_STRING(var) var
#define FETCH_ARG_STRUCT(var) &var
+/*
+ * On m68k, if the leaf function test variable is longer than 8 bytes,
+ * the start of the stack frame moves. 8 is sufficiently large to
+ * test m68k char arrays, but leave it at 16 for other architectures.
+ */
+#ifdef CONFIG_M68K
+#define FILL_SIZE_STRING 8
+#else
#define FILL_SIZE_STRING 16
+#endif
#define INIT_CLONE_SCALAR /**/
#define INIT_CLONE_STRING [FILL_SIZE_STRING]
@@ -165,19 +174,23 @@ static noinline void test_ ## name (struct kunit *test) \
/* Verify all bytes overwritten with 0xFF. */ \
for (sum = 0, i = 0; i < target_size; i++) \
sum += (check_buf[i] != 0xFF); \
- KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
- "leaf fill was not 0xFF!?\n"); \
/* Clear entire check buffer for later bit tests. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
/* Extract stack-defined variable contents. */ \
ignored = leaf_ ##name((unsigned long)&ignored, 0, \
FETCH_ARG_ ## which(zero)); \
+ /* \
+ * Delay the sum test to here to do as little as \
+ * possible between the two leaf function calls. \
+ */ \
+ KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
+ "leaf fill was not 0xFF!?\n"); \
\
/* Validate that compiler lined up fill and target. */ \
KUNIT_ASSERT_TRUE_MSG(test, \
stackinit_range_contains(fill_start, fill_size, \
target_start, target_size), \
- "stack fill missed target!? " \
+ "stackframe was not the same between calls!? " \
"(fill %zu wide, target offset by %d)\n", \
fill_size, \
(int)((ssize_t)(uintptr_t)fill_start - \
@@ -404,7 +417,7 @@ static noinline int leaf_switch_2_none(unsigned long sp, bool fill,
* These are expected to fail for most configurations because neither
* GCC nor Clang have a way to perform initialization of variables in
* non-code areas (i.e. in a switch statement before the first "case").
- * https://bugs.llvm.org/show_bug.cgi?id=44916
+ * https://llvm.org/pr44916
*/
DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, ALWAYS_FAIL);
DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL);
diff --git a/lib/string.c b/lib/string.c
index 6891d15ce991..966da44bfc86 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -15,19 +15,20 @@
*/
#define __NO_FORTIFY
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
+#include <linux/bits.h>
#include <linux/bug.h>
+#include <linux/ctype.h>
#include <linux/errno.h>
-#include <linux/slab.h>
+#include <linux/limits.h>
+#include <linux/linkage.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/page.h>
+#include <asm/rwonce.h>
#include <asm/unaligned.h>
-#include <asm/byteorder.h>
#include <asm/word-at-a-time.h>
-#include <asm/page.h>
#ifndef __HAVE_ARCH_STRNCASECMP
/**
@@ -103,8 +104,7 @@ char *strncpy(char *dest, const char *src, size_t count)
EXPORT_SYMBOL(strncpy);
#endif
-#ifndef __HAVE_ARCH_STRSCPY
-ssize_t strscpy(char *dest, const char *src, size_t count)
+ssize_t sized_strscpy(char *dest, const char *src, size_t count)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
size_t max = count;
@@ -170,8 +170,7 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
return -E2BIG;
}
-EXPORT_SYMBOL(strscpy);
-#endif
+EXPORT_SYMBOL(sized_strscpy);
/**
* stpcpy - copy a string from src to dest returning a pointer to the new end
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 7713f73e66b0..69ba49b853c7 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -18,12 +18,14 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/string_helpers.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
/**
* string_get_size - get the size in the specified units
* @size: The size to be converted in blocks
* @blk_size: Size of the block (use 1 for size in bytes)
- * @units: units to use (powers of 1000 or 1024)
+ * @units: Units to use (powers of 1000 or 1024), whether to include space separator
* @buf: buffer to format to
* @len: length of buffer
*
@@ -37,11 +39,12 @@
int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
char *buf, int len)
{
+ enum string_size_units units_base = units & STRING_UNITS_MASK;
static const char *const units_10[] = {
- "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
+ "", "k", "M", "G", "T", "P", "E", "Z", "Y",
};
static const char *const units_2[] = {
- "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"
+ "", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi",
};
static const char *const *const units_str[] = {
[STRING_UNITS_10] = units_10,
@@ -66,7 +69,7 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
/* This is Napier's algorithm. Reduce the original block size to
*
- * coefficient * divisor[units]^i
+ * coefficient * divisor[units_base]^i
*
* we do the reduction so both coefficients are just under 32 bits so
* that multiplying them together won't overflow 64 bits and we keep
@@ -76,12 +79,12 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
* precision is in the coefficients.
*/
while (blk_size >> 32) {
- do_div(blk_size, divisor[units]);
+ do_div(blk_size, divisor[units_base]);
i++;
}
while (size >> 32) {
- do_div(size, divisor[units]);
+ do_div(size, divisor[units_base]);
i++;
}
@@ -90,8 +93,8 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
size *= blk_size;
/* and logarithmically reduce it until it's just under the divisor */
- while (size >= divisor[units]) {
- remainder = do_div(size, divisor[units]);
+ while (size >= divisor[units_base]) {
+ remainder = do_div(size, divisor[units_base]);
i++;
}
@@ -101,10 +104,10 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
for (j = 0; sf_cap*10 < 1000; j++)
sf_cap *= 10;
- if (units == STRING_UNITS_2) {
+ if (units_base == STRING_UNITS_2) {
/* express the remainder as a decimal. It's currently the
* numerator of a fraction whose denominator is
- * divisor[units], which is 1 << 10 for STRING_UNITS_2 */
+ * divisor[units_base], which is 1 << 10 for STRING_UNITS_2 */
remainder *= 1000;
remainder >>= 10;
}
@@ -126,10 +129,12 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
if (i >= ARRAY_SIZE(units_2))
unit = "UNK";
else
- unit = units_str[units][i];
+ unit = units_str[units_base][i];
- return snprintf(buf, len, "%u%s %s", (u32)size,
- tmp, unit);
+ return snprintf(buf, len, "%u%s%s%s%s", (u32)size, tmp,
+ (units & STRING_UNITS_NO_SPACE) ? "" : " ",
+ unit,
+ (units & STRING_UNITS_NO_BYTES) ? "" : "B");
}
EXPORT_SYMBOL(string_get_size);
@@ -826,40 +831,6 @@ char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n)
EXPORT_SYMBOL_GPL(devm_kasprintf_strarray);
/**
- * strscpy_pad() - Copy a C-string into a sized buffer
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @count: Size of destination buffer
- *
- * Copy the string, or as much of it as fits, into the dest buffer. The
- * behavior is undefined if the string buffers overlap. The destination
- * buffer is always %NUL terminated, unless it's zero-sized.
- *
- * If the source string is shorter than the destination buffer, zeros
- * the tail of the destination buffer.
- *
- * For full explanation of why you may want to consider using the
- * 'strscpy' functions please see the function docstring for strscpy().
- *
- * Returns:
- * * The number of characters copied (not including the trailing %NUL)
- * * -E2BIG if count is 0 or @src was truncated.
- */
-ssize_t strscpy_pad(char *dest, const char *src, size_t count)
-{
- ssize_t written;
-
- written = strscpy(dest, src, count);
- if (written < 0 || written == count - 1)
- return written;
-
- memset(dest + written + 1, 0, count - written - 1);
-
- return written;
-}
-EXPORT_SYMBOL(strscpy_pad);
-
-/**
* skip_spaces - Removes leading whitespace from @str.
* @str: The string to be stripped.
*
@@ -1042,10 +1013,28 @@ EXPORT_SYMBOL(__read_overflow2_field);
void __write_overflow_field(size_t avail, size_t wanted) { }
EXPORT_SYMBOL(__write_overflow_field);
-void fortify_panic(const char *name)
+static const char * const fortify_func_name[] = {
+#define MAKE_FORTIFY_FUNC_NAME(func) [MAKE_FORTIFY_FUNC(func)] = #func
+ EACH_FORTIFY_FUNC(MAKE_FORTIFY_FUNC_NAME)
+#undef MAKE_FORTIFY_FUNC_NAME
+};
+
+void __fortify_report(const u8 reason, const size_t avail, const size_t size)
+{
+ const u8 func = FORTIFY_REASON_FUNC(reason);
+ const bool write = FORTIFY_REASON_DIR(reason);
+ const char *name;
+
+ name = fortify_func_name[umin(func, FORTIFY_FUNC_UNKNOWN)];
+ WARN(1, "%s: detected buffer overflow: %zu byte %s of buffer size %zu\n",
+ name, size, str_read_write(!write), avail);
+}
+EXPORT_SYMBOL(__fortify_report);
+
+void __fortify_panic(const u8 reason, const size_t avail, const size_t size)
{
- pr_emerg("detected buffer overflow in %s\n", name);
+ __fortify_report(reason, avail, size);
BUG();
}
-EXPORT_SYMBOL(fortify_panic);
+EXPORT_SYMBOL(__fortify_panic);
#endif /* CONFIG_FORTIFY_SOURCE */
diff --git a/lib/test-string_helpers.c b/lib/string_helpers_kunit.c
index 9a68849a5d55..f88e39fd68d6 100644
--- a/lib/test-string_helpers.c
+++ b/lib/string_helpers_kunit.c
@@ -1,34 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Test cases for lib/string_helpers.c module.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/init.h>
+#include <kunit/test.h>
+#include <linux/array_size.h>
#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/string_helpers.h>
-static __init bool test_string_check_buf(const char *name, unsigned int flags,
- char *in, size_t p,
- char *out_real, size_t q_real,
- char *out_test, size_t q_test)
+static void test_string_check_buf(struct kunit *test,
+ const char *name, unsigned int flags,
+ char *in, size_t p,
+ char *out_real, size_t q_real,
+ char *out_test, size_t q_test)
{
- if (q_real == q_test && !memcmp(out_test, out_real, q_test))
- return true;
-
- pr_warn("Test '%s' failed: flags = %#x\n", name, flags);
-
- print_hex_dump(KERN_WARNING, "Input: ", DUMP_PREFIX_NONE, 16, 1,
- in, p, true);
- print_hex_dump(KERN_WARNING, "Expected: ", DUMP_PREFIX_NONE, 16, 1,
- out_test, q_test, true);
- print_hex_dump(KERN_WARNING, "Got: ", DUMP_PREFIX_NONE, 16, 1,
- out_real, q_real, true);
-
- return false;
+ KUNIT_ASSERT_EQ_MSG(test, q_real, q_test, "name:%s", name);
+ KUNIT_EXPECT_MEMEQ_MSG(test, out_test, out_real, q_test,
+ "name:%s", name);
}
struct test_string {
@@ -37,7 +28,7 @@ struct test_string {
unsigned int flags;
};
-static const struct test_string strings[] __initconst = {
+static const struct test_string strings[] = {
{
.in = "\\f\\ \\n\\r\\t\\v",
.out = "\f\\ \n\r\t\v",
@@ -60,17 +51,19 @@ static const struct test_string strings[] __initconst = {
},
};
-static void __init test_string_unescape(const char *name, unsigned int flags,
- bool inplace)
+static void test_string_unescape(struct kunit *test,
+ const char *name, unsigned int flags,
+ bool inplace)
{
int q_real = 256;
- char *in = kmalloc(q_real, GFP_KERNEL);
- char *out_test = kmalloc(q_real, GFP_KERNEL);
- char *out_real = kmalloc(q_real, GFP_KERNEL);
+ char *in = kunit_kzalloc(test, q_real, GFP_KERNEL);
+ char *out_test = kunit_kzalloc(test, q_real, GFP_KERNEL);
+ char *out_real = kunit_kzalloc(test, q_real, GFP_KERNEL);
int i, p = 0, q_test = 0;
- if (!in || !out_test || !out_real)
- goto out;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, in);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_real);
for (i = 0; i < ARRAY_SIZE(strings); i++) {
const char *s = strings[i].in;
@@ -103,12 +96,8 @@ static void __init test_string_unescape(const char *name, unsigned int flags,
q_real = string_unescape(in, out_real, q_real, flags);
}
- test_string_check_buf(name, flags, in, p - 1, out_real, q_real,
+ test_string_check_buf(test, name, flags, in, p - 1, out_real, q_real,
out_test, q_test);
-out:
- kfree(out_real);
- kfree(out_test);
- kfree(in);
}
struct test_string_1 {
@@ -123,7 +112,7 @@ struct test_string_2 {
};
#define TEST_STRING_2_DICT_0 NULL
-static const struct test_string_2 escape0[] __initconst = {{
+static const struct test_string_2 escape0[] = {{
.in = "\f\\ \n\r\t\v",
.s1 = {{
.out = "\\f\\ \\n\\r\\t\\v",
@@ -221,7 +210,7 @@ static const struct test_string_2 escape0[] __initconst = {{
}};
#define TEST_STRING_2_DICT_1 "b\\ \t\r\xCF"
-static const struct test_string_2 escape1[] __initconst = {{
+static const struct test_string_2 escape1[] = {{
.in = "\f\\ \n\r\t\v",
.s1 = {{
.out = "\f\\134\\040\n\\015\\011\v",
@@ -358,7 +347,7 @@ static const struct test_string_2 escape1[] __initconst = {{
/* terminator */
}};
-static const struct test_string strings_upper[] __initconst = {
+static const struct test_string strings_upper[] = {
{
.in = "abcdefgh1234567890test",
.out = "ABCDEFGH1234567890TEST",
@@ -369,7 +358,7 @@ static const struct test_string strings_upper[] __initconst = {
},
};
-static const struct test_string strings_lower[] __initconst = {
+static const struct test_string strings_lower[] = {
{
.in = "ABCDEFGH1234567890TEST",
.out = "abcdefgh1234567890test",
@@ -380,8 +369,8 @@ static const struct test_string strings_lower[] __initconst = {
},
};
-static __init const char *test_string_find_match(const struct test_string_2 *s2,
- unsigned int flags)
+static const char *test_string_find_match(const struct test_string_2 *s2,
+ unsigned int flags)
{
const struct test_string_1 *s1 = s2->s1;
unsigned int i;
@@ -402,31 +391,31 @@ static __init const char *test_string_find_match(const struct test_string_2 *s2,
return NULL;
}
-static __init void
-test_string_escape_overflow(const char *in, int p, unsigned int flags, const char *esc,
+static void
+test_string_escape_overflow(struct kunit *test,
+ const char *in, int p, unsigned int flags, const char *esc,
int q_test, const char *name)
{
int q_real;
q_real = string_escape_mem(in, p, NULL, 0, flags, esc);
- if (q_real != q_test)
- pr_warn("Test '%s' failed: flags = %#x, osz = 0, expected %d, got %d\n",
- name, flags, q_test, q_real);
+ KUNIT_EXPECT_EQ_MSG(test, q_real, q_test, "name:%s: flags:%#x", name, flags);
}
-static __init void test_string_escape(const char *name,
- const struct test_string_2 *s2,
- unsigned int flags, const char *esc)
+static void test_string_escape(struct kunit *test, const char *name,
+ const struct test_string_2 *s2,
+ unsigned int flags, const char *esc)
{
size_t out_size = 512;
- char *out_test = kmalloc(out_size, GFP_KERNEL);
- char *out_real = kmalloc(out_size, GFP_KERNEL);
- char *in = kmalloc(256, GFP_KERNEL);
+ char *out_test = kunit_kzalloc(test, out_size, GFP_KERNEL);
+ char *out_real = kunit_kzalloc(test, out_size, GFP_KERNEL);
+ char *in = kunit_kzalloc(test, 256, GFP_KERNEL);
int p = 0, q_test = 0;
int q_real;
- if (!out_test || !out_real || !in)
- goto out;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_real);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, in);
for (; s2->in; s2++) {
const char *out;
@@ -462,62 +451,99 @@ static __init void test_string_escape(const char *name,
q_real = string_escape_mem(in, p, out_real, out_size, flags, esc);
- test_string_check_buf(name, flags, in, p, out_real, q_real, out_test,
+ test_string_check_buf(test, name, flags, in, p, out_real, q_real, out_test,
q_test);
- test_string_escape_overflow(in, p, flags, esc, q_test, name);
-
-out:
- kfree(in);
- kfree(out_real);
- kfree(out_test);
+ test_string_escape_overflow(test, in, p, flags, esc, q_test, name);
}
#define string_get_size_maxbuf 16
-#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \
- do { \
- BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \
- BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \
- __test_string_get_size((size), (blk_size), (exp_result10), \
- (exp_result2)); \
+#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \
+ do { \
+ BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \
+ BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \
+ __test_string_get_size(test, (size), (blk_size), (exp_result10), \
+ (exp_result2)); \
} while (0)
-static __init void test_string_get_size_check(const char *units,
- const char *exp,
- char *res,
- const u64 size,
- const u64 blk_size)
+static void test_string_get_size_check(struct kunit *test,
+ const char *units,
+ const char *exp,
+ char *res,
+ const u64 size,
+ const u64 blk_size)
{
- if (!memcmp(res, exp, strlen(exp) + 1))
- return;
-
- res[string_get_size_maxbuf - 1] = '\0';
-
- pr_warn("Test 'test_string_get_size' failed!\n");
- pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %s)\n",
+ KUNIT_EXPECT_MEMEQ_MSG(test, res, exp, strlen(exp) + 1,
+ "string_get_size(size = %llu, blk_size = %llu, units = %s)",
size, blk_size, units);
- pr_warn("expected: '%s', got '%s'\n", exp, res);
}
-static __init void __test_string_get_size(const u64 size, const u64 blk_size,
- const char *exp_result10,
- const char *exp_result2)
+static void __strchrcut(char *dst, const char *src, const char *cut)
+{
+ const char *from = src;
+ size_t len;
+
+ do {
+ len = strcspn(from, cut);
+ memcpy(dst, from, len);
+ dst += len;
+ from += len;
+ } while (*from++);
+ *dst = '\0';
+}
+
+static void __test_string_get_size_one(struct kunit *test,
+ const u64 size, const u64 blk_size,
+ const char *exp_result10,
+ const char *exp_result2,
+ enum string_size_units units,
+ const char *cut)
{
char buf10[string_get_size_maxbuf];
char buf2[string_get_size_maxbuf];
+ char exp10[string_get_size_maxbuf];
+ char exp2[string_get_size_maxbuf];
+ char prefix10[64];
+ char prefix2[64];
+
+ sprintf(prefix10, "STRING_UNITS_10 [%s]", cut);
+ sprintf(prefix2, "STRING_UNITS_2 [%s]", cut);
+
+ __strchrcut(exp10, exp_result10, cut);
+ __strchrcut(exp2, exp_result2, cut);
- string_get_size(size, blk_size, STRING_UNITS_10, buf10, sizeof(buf10));
- string_get_size(size, blk_size, STRING_UNITS_2, buf2, sizeof(buf2));
+ string_get_size(size, blk_size, STRING_UNITS_10 | units, buf10, sizeof(buf10));
+ string_get_size(size, blk_size, STRING_UNITS_2 | units, buf2, sizeof(buf2));
- test_string_get_size_check("STRING_UNITS_10", exp_result10, buf10,
- size, blk_size);
+ test_string_get_size_check(test, prefix10, exp10, buf10, size, blk_size);
+ test_string_get_size_check(test, prefix2, exp2, buf2, size, blk_size);
+}
+
+static void __test_string_get_size(struct kunit *test,
+ const u64 size, const u64 blk_size,
+ const char *exp_result10,
+ const char *exp_result2)
+{
+ struct {
+ enum string_size_units units;
+ const char *cut;
+ } get_size_test_cases[] = {
+ { 0, "" },
+ { STRING_UNITS_NO_SPACE, " " },
+ { STRING_UNITS_NO_SPACE | STRING_UNITS_NO_BYTES, " B" },
+ { STRING_UNITS_NO_BYTES, "B" },
+ };
+ int i;
- test_string_get_size_check("STRING_UNITS_2", exp_result2, buf2,
- size, blk_size);
+ for (i = 0; i < ARRAY_SIZE(get_size_test_cases); i++)
+ __test_string_get_size_one(test, size, blk_size,
+ exp_result10, exp_result2,
+ get_size_test_cases[i].units,
+ get_size_test_cases[i].cut);
}
-static __init void test_string_get_size(void)
+static void test_get_size(struct kunit *test)
{
/* small values */
test_string_get_size_one(0, 512, "0 B", "0 B");
@@ -537,7 +563,7 @@ static __init void test_string_get_size(void)
test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB");
}
-static void __init test_string_upper_lower(void)
+static void test_upper_lower(struct kunit *test)
{
char *dst;
int i;
@@ -547,16 +573,10 @@ static void __init test_string_upper_lower(void)
int len = strlen(strings_upper[i].in) + 1;
dst = kmalloc(len, GFP_KERNEL);
- if (!dst)
- return;
+ KUNIT_ASSERT_NOT_NULL(test, dst);
string_upper(dst, s);
- if (memcmp(dst, strings_upper[i].out, len)) {
- pr_warn("Test 'string_upper' failed : expected %s, got %s!\n",
- strings_upper[i].out, dst);
- kfree(dst);
- return;
- }
+ KUNIT_EXPECT_STREQ(test, dst, strings_upper[i].out);
kfree(dst);
}
@@ -565,45 +585,44 @@ static void __init test_string_upper_lower(void)
int len = strlen(strings_lower[i].in) + 1;
dst = kmalloc(len, GFP_KERNEL);
- if (!dst)
- return;
+ KUNIT_ASSERT_NOT_NULL(test, dst);
string_lower(dst, s);
- if (memcmp(dst, strings_lower[i].out, len)) {
- pr_warn("Test 'string_lower failed : : expected %s, got %s!\n",
- strings_lower[i].out, dst);
- kfree(dst);
- return;
- }
+ KUNIT_EXPECT_STREQ(test, dst, strings_lower[i].out);
kfree(dst);
}
}
-static int __init test_string_helpers_init(void)
+static void test_unescape(struct kunit *test)
{
unsigned int i;
- pr_info("Running tests...\n");
for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++)
- test_string_unescape("unescape", i, false);
- test_string_unescape("unescape inplace",
+ test_string_unescape(test, "unescape", i, false);
+ test_string_unescape(test, "unescape inplace",
get_random_u32_below(UNESCAPE_ALL_MASK + 1), true);
/* Without dictionary */
for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
- test_string_escape("escape 0", escape0, i, TEST_STRING_2_DICT_0);
+ test_string_escape(test, "escape 0", escape0, i, TEST_STRING_2_DICT_0);
/* With dictionary */
for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
- test_string_escape("escape 1", escape1, i, TEST_STRING_2_DICT_1);
+ test_string_escape(test, "escape 1", escape1, i, TEST_STRING_2_DICT_1);
+}
- /* Test string_get_size() */
- test_string_get_size();
+static struct kunit_case string_helpers_test_cases[] = {
+ KUNIT_CASE(test_get_size),
+ KUNIT_CASE(test_upper_lower),
+ KUNIT_CASE(test_unescape),
+ {}
+};
- /* Test string upper(), string_lower() */
- test_string_upper_lower();
+static struct kunit_suite string_helpers_test_suite = {
+ .name = "string_helpers",
+ .test_cases = string_helpers_test_cases,
+};
+
+kunit_test_suites(&string_helpers_test_suite);
- return -EINVAL;
-}
-module_init(test_string_helpers_init);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/string_kunit.c b/lib/string_kunit.c
new file mode 100644
index 000000000000..eabf025cf77c
--- /dev/null
+++ b/lib/string_kunit.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for string functions.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+static void test_memset16(struct kunit *test)
+{
+ unsigned i, j, k;
+ u16 v, *p;
+
+ p = kunit_kzalloc(test, 256 * 2 * 2, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p);
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset16(p + i, 0xb1b2, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ } else if (k < i + j) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2,
+ "i:%d j:%d k:%d", i, j, k);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ }
+ }
+ }
+ }
+}
+
+static void test_memset32(struct kunit *test)
+{
+ unsigned i, j, k;
+ u32 v, *p;
+
+ p = kunit_kzalloc(test, 256 * 2 * 4, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p);
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset32(p + i, 0xb1b2b3b4, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ } else if (k < i + j) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2b3b4,
+ "i:%d j:%d k:%d", i, j, k);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ }
+ }
+ }
+ }
+}
+
+static void test_memset64(struct kunit *test)
+{
+ unsigned i, j, k;
+ u64 v, *p;
+
+ p = kunit_kzalloc(test, 256 * 2 * 8, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p);
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1a1a1a1a1ULL,
+ "i:%d j:%d k:%d", i, j, k);
+ } else if (k < i + j) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2b3b4b5b6b7b8ULL,
+ "i:%d j:%d k:%d", i, j, k);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1a1a1a1a1ULL,
+ "i:%d j:%d k:%d", i, j, k);
+ }
+ }
+ }
+ }
+}
+
+static void test_strchr(struct kunit *test)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ result = strchr(test_string, test_string[i]);
+ KUNIT_ASSERT_EQ_MSG(test, result - test_string, i,
+ "char:%c", 'a' + i);
+ }
+
+ result = strchr(empty_string, '\0');
+ KUNIT_ASSERT_PTR_EQ(test, result, empty_string);
+
+ result = strchr(empty_string, 'a');
+ KUNIT_ASSERT_NULL(test, result);
+
+ result = strchr(test_string, 'z');
+ KUNIT_ASSERT_NULL(test, result);
+}
+
+static void test_strnchr(struct kunit *test)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i, j;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ for (j = 0; j < strlen(test_string) + 2; j++) {
+ result = strnchr(test_string, j, test_string[i]);
+ if (j <= i) {
+ KUNIT_ASSERT_NULL_MSG(test, result,
+ "char:%c i:%d j:%d", 'a' + i, i, j);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, result - test_string, i,
+ "char:%c i:%d j:%d", 'a' + i, i, j);
+ }
+ }
+ }
+
+ result = strnchr(empty_string, 0, '\0');
+ KUNIT_ASSERT_NULL(test, result);
+
+ result = strnchr(empty_string, 1, '\0');
+ KUNIT_ASSERT_PTR_EQ(test, result, empty_string);
+
+ result = strnchr(empty_string, 1, 'a');
+ KUNIT_ASSERT_NULL(test, result);
+
+ result = strnchr(NULL, 0, '\0');
+ KUNIT_ASSERT_NULL(test, result);
+}
+
+static void test_strspn(struct kunit *test)
+{
+ static const struct strspn_test {
+ const char str[16];
+ const char accept[16];
+ const char reject[16];
+ unsigned a;
+ unsigned r;
+ } tests[] = {
+ { "foobar", "", "", 0, 6 },
+ { "abba", "abc", "ABBA", 4, 4 },
+ { "abba", "a", "b", 1, 1 },
+ { "", "abc", "abc", 0, 0},
+ };
+ const struct strspn_test *s = tests;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) {
+ KUNIT_ASSERT_EQ_MSG(test, s->a, strspn(s->str, s->accept),
+ "i:%zu", i);
+ KUNIT_ASSERT_EQ_MSG(test, s->r, strcspn(s->str, s->reject),
+ "i:%zu", i);
+ }
+}
+
+static struct kunit_case string_test_cases[] = {
+ KUNIT_CASE(test_memset16),
+ KUNIT_CASE(test_memset32),
+ KUNIT_CASE(test_memset64),
+ KUNIT_CASE(test_strchr),
+ KUNIT_CASE(test_strnchr),
+ KUNIT_CASE(test_strspn),
+ {}
+};
+
+static struct kunit_suite string_test_suite = {
+ .name = "string",
+ .test_cases = string_test_cases,
+};
+
+kunit_test_suites(&string_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 65f22c2578b0..6b2b33579f56 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -380,6 +380,47 @@ static void __init test_replace(void)
expect_eq_bitmap(bmap, exp3_1_0, nbits);
}
+static const unsigned long sg_mask[] __initconst = {
+ BITMAP_FROM_U64(0x000000000000035aULL),
+};
+
+static const unsigned long sg_src[] __initconst = {
+ BITMAP_FROM_U64(0x0000000000000667ULL),
+};
+
+static const unsigned long sg_gather_exp[] __initconst = {
+ BITMAP_FROM_U64(0x0000000000000029ULL),
+};
+
+static const unsigned long sg_scatter_exp[] __initconst = {
+ BITMAP_FROM_U64(0x000000000000021aULL),
+};
+
+static void __init test_bitmap_sg(void)
+{
+ unsigned int nbits = 64;
+ DECLARE_BITMAP(bmap_gather, 100);
+ DECLARE_BITMAP(bmap_scatter, 100);
+ DECLARE_BITMAP(bmap_tmp, 100);
+ DECLARE_BITMAP(bmap_res, 100);
+
+ /* Simple gather call */
+ bitmap_zero(bmap_gather, 100);
+ bitmap_gather(bmap_gather, sg_src, sg_mask, nbits);
+ expect_eq_bitmap(sg_gather_exp, bmap_gather, nbits);
+
+ /* Simple scatter call */
+ bitmap_zero(bmap_scatter, 100);
+ bitmap_scatter(bmap_scatter, sg_src, sg_mask, nbits);
+ expect_eq_bitmap(sg_scatter_exp, bmap_scatter, nbits);
+
+ /* Scatter/gather relationship */
+ bitmap_zero(bmap_tmp, 100);
+ bitmap_gather(bmap_tmp, bmap_scatter, sg_mask, nbits);
+ bitmap_scatter(bmap_res, bmap_tmp, sg_mask, nbits);
+ expect_eq_bitmap(bmap_scatter, bmap_res, nbits);
+}
+
#define PARSE_TIME 0x1
#define NO_LEN 0x2
@@ -1252,6 +1293,7 @@ static void __init selftest(void)
test_copy();
test_bitmap_region();
test_replace();
+ test_bitmap_sg();
test_bitmap_arr32();
test_bitmap_arr64();
test_bitmap_parse();
diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c
index 4c40580a99a3..f247089d63c0 100644
--- a/lib/test_blackhole_dev.c
+++ b/lib/test_blackhole_dev.c
@@ -29,7 +29,6 @@ static int __init test_blackholedev_init(void)
{
struct ipv6hdr *ip6h;
struct sk_buff *skb;
- struct ethhdr *ethh;
struct udphdr *uh;
int data_len;
int ret;
@@ -61,7 +60,7 @@ static int __init test_blackholedev_init(void)
ip6h->saddr = in6addr_loopback;
ip6h->daddr = in6addr_loopback;
/* Ether */
- ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
+ skb_push(skb, sizeof(struct ethhdr));
skb_set_mac_header(skb, 0);
skb->protocol = htons(ETH_P_IPV6);
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 43d9dfd57ab7..1eec3b7ac67c 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -58,11 +58,14 @@ static int num_test_devs;
* @need_mod_put for your tests case.
*/
enum kmod_test_case {
+ /* private: */
__TEST_KMOD_INVALID = 0,
+ /* public: */
TEST_KMOD_DRIVER,
TEST_KMOD_FS_TYPE,
+ /* private: */
__TEST_KMOD_MAX,
};
@@ -82,6 +85,7 @@ struct kmod_test_device;
* @ret_sync: return value if request_module() is used, sync request for
* @TEST_KMOD_DRIVER
* @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE
+ * @task_sync: kthread's task_struct or %NULL if not running
* @thread_idx: thread ID
* @test_dev: test device test is being performed under
* @need_mod_put: Some tests (get_fs_type() is one) requires putting the module
@@ -108,7 +112,7 @@ struct kmod_test_device_info {
* @dev: pointer to misc_dev's own struct device
* @config_mutex: protects configuration of test
* @trigger_mutex: the test trigger can only be fired once at a time
- * @thread_lock: protects @done count, and the @info per each thread
+ * @thread_mutex: protects @done count, and the @info per each thread
* @done: number of threads which have completed or failed
* @test_is_oom: when we run out of memory, use this to halt moving forward
* @kthreads_done: completion used to signal when all work is done
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 29185ac5c727..399380db449c 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -3599,6 +3599,45 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
mas_unlock(&mas);
}
+static noinline void __init alloc_cyclic_testing(struct maple_tree *mt)
+{
+ unsigned long location;
+ unsigned long next;
+ int ret = 0;
+ MA_STATE(mas, mt, 0, 0);
+
+ next = 0;
+ mtree_lock(mt);
+ for (int i = 0; i < 100; i++) {
+ mas_alloc_cyclic(&mas, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MAS_BUG_ON(&mas, i != location - 2);
+ MAS_BUG_ON(&mas, mas.index != location);
+ MAS_BUG_ON(&mas, mas.last != location);
+ MAS_BUG_ON(&mas, i != next - 3);
+ }
+
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+ next = 0;
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (int i = 0; i < 100; i++) {
+ mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, i != location - 2);
+ MT_BUG_ON(mt, i != next - 3);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ }
+
+ mtree_destroy(mt);
+ /* Overflow test */
+ next = ULONG_MAX - 1;
+ ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, ret != 0);
+ ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, ret != 0);
+ ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, ret != 1);
+}
+
static DEFINE_MTREE(tree);
static int __init maple_tree_seed(void)
{
@@ -3880,6 +3919,11 @@ static int __init maple_tree_seed(void)
check_state_handling(&tree);
mtree_destroy(&tree);
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ alloc_cyclic_testing(&tree);
+ mtree_destroy(&tree);
+
+
#if defined(BENCH)
skip:
#endif
diff --git a/lib/test_string.c b/lib/test_string.c
deleted file mode 100644
index c5cb92fb710e..000000000000
--- a/lib/test_string.c
+++ /dev/null
@@ -1,257 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
-#include <linux/printk.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-
-static __init int memset16_selftest(void)
-{
- unsigned i, j, k;
- u16 v, *p;
-
- p = kmalloc(256 * 2 * 2, GFP_KERNEL);
- if (!p)
- return -1;
-
- for (i = 0; i < 256; i++) {
- for (j = 0; j < 256; j++) {
- memset(p, 0xa1, 256 * 2 * sizeof(v));
- memset16(p + i, 0xb1b2, j);
- for (k = 0; k < 512; k++) {
- v = p[k];
- if (k < i) {
- if (v != 0xa1a1)
- goto fail;
- } else if (k < i + j) {
- if (v != 0xb1b2)
- goto fail;
- } else {
- if (v != 0xa1a1)
- goto fail;
- }
- }
- }
- }
-
-fail:
- kfree(p);
- if (i < 256)
- return (i << 24) | (j << 16) | k | 0x8000;
- return 0;
-}
-
-static __init int memset32_selftest(void)
-{
- unsigned i, j, k;
- u32 v, *p;
-
- p = kmalloc(256 * 2 * 4, GFP_KERNEL);
- if (!p)
- return -1;
-
- for (i = 0; i < 256; i++) {
- for (j = 0; j < 256; j++) {
- memset(p, 0xa1, 256 * 2 * sizeof(v));
- memset32(p + i, 0xb1b2b3b4, j);
- for (k = 0; k < 512; k++) {
- v = p[k];
- if (k < i) {
- if (v != 0xa1a1a1a1)
- goto fail;
- } else if (k < i + j) {
- if (v != 0xb1b2b3b4)
- goto fail;
- } else {
- if (v != 0xa1a1a1a1)
- goto fail;
- }
- }
- }
- }
-
-fail:
- kfree(p);
- if (i < 256)
- return (i << 24) | (j << 16) | k | 0x8000;
- return 0;
-}
-
-static __init int memset64_selftest(void)
-{
- unsigned i, j, k;
- u64 v, *p;
-
- p = kmalloc(256 * 2 * 8, GFP_KERNEL);
- if (!p)
- return -1;
-
- for (i = 0; i < 256; i++) {
- for (j = 0; j < 256; j++) {
- memset(p, 0xa1, 256 * 2 * sizeof(v));
- memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j);
- for (k = 0; k < 512; k++) {
- v = p[k];
- if (k < i) {
- if (v != 0xa1a1a1a1a1a1a1a1ULL)
- goto fail;
- } else if (k < i + j) {
- if (v != 0xb1b2b3b4b5b6b7b8ULL)
- goto fail;
- } else {
- if (v != 0xa1a1a1a1a1a1a1a1ULL)
- goto fail;
- }
- }
- }
- }
-
-fail:
- kfree(p);
- if (i < 256)
- return (i << 24) | (j << 16) | k | 0x8000;
- return 0;
-}
-
-static __init int strchr_selftest(void)
-{
- const char *test_string = "abcdefghijkl";
- const char *empty_string = "";
- char *result;
- int i;
-
- for (i = 0; i < strlen(test_string) + 1; i++) {
- result = strchr(test_string, test_string[i]);
- if (result - test_string != i)
- return i + 'a';
- }
-
- result = strchr(empty_string, '\0');
- if (result != empty_string)
- return 0x101;
-
- result = strchr(empty_string, 'a');
- if (result)
- return 0x102;
-
- result = strchr(test_string, 'z');
- if (result)
- return 0x103;
-
- return 0;
-}
-
-static __init int strnchr_selftest(void)
-{
- const char *test_string = "abcdefghijkl";
- const char *empty_string = "";
- char *result;
- int i, j;
-
- for (i = 0; i < strlen(test_string) + 1; i++) {
- for (j = 0; j < strlen(test_string) + 2; j++) {
- result = strnchr(test_string, j, test_string[i]);
- if (j <= i) {
- if (!result)
- continue;
- return ((i + 'a') << 8) | j;
- }
- if (result - test_string != i)
- return ((i + 'a') << 8) | j;
- }
- }
-
- result = strnchr(empty_string, 0, '\0');
- if (result)
- return 0x10001;
-
- result = strnchr(empty_string, 1, '\0');
- if (result != empty_string)
- return 0x10002;
-
- result = strnchr(empty_string, 1, 'a');
- if (result)
- return 0x10003;
-
- result = strnchr(NULL, 0, '\0');
- if (result)
- return 0x10004;
-
- return 0;
-}
-
-static __init int strspn_selftest(void)
-{
- static const struct strspn_test {
- const char str[16];
- const char accept[16];
- const char reject[16];
- unsigned a;
- unsigned r;
- } tests[] __initconst = {
- { "foobar", "", "", 0, 6 },
- { "abba", "abc", "ABBA", 4, 4 },
- { "abba", "a", "b", 1, 1 },
- { "", "abc", "abc", 0, 0},
- };
- const struct strspn_test *s = tests;
- size_t i, res;
-
- for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) {
- res = strspn(s->str, s->accept);
- if (res != s->a)
- return 0x100 + 2*i;
- res = strcspn(s->str, s->reject);
- if (res != s->r)
- return 0x100 + 2*i + 1;
- }
- return 0;
-}
-
-static __exit void string_selftest_remove(void)
-{
-}
-
-static __init int string_selftest_init(void)
-{
- int test, subtest;
-
- test = 1;
- subtest = memset16_selftest();
- if (subtest)
- goto fail;
-
- test = 2;
- subtest = memset32_selftest();
- if (subtest)
- goto fail;
-
- test = 3;
- subtest = memset64_selftest();
- if (subtest)
- goto fail;
-
- test = 4;
- subtest = strchr_selftest();
- if (subtest)
- goto fail;
-
- test = 5;
- subtest = strnchr_selftest();
- if (subtest)
- goto fail;
-
- test = 6;
- subtest = strspn_selftest();
- if (subtest)
- goto fail;
-
- pr_info("String selftests succeeded\n");
- return 0;
-fail:
- pr_crit("String selftest failure %d.%08x\n", test, subtest);
- return 0;
-}
-
-module_init(string_selftest_init);
-module_exit(string_selftest_remove);
-MODULE_LICENSE("GPL v2");
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 2062be1f2e80..276c12140ee2 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -11,6 +11,39 @@ typedef void(*test_ubsan_fp)(void);
#config, IS_ENABLED(config) ? "y" : "n"); \
} while (0)
+static void test_ubsan_add_overflow(void)
+{
+ volatile int val = INT_MAX;
+
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ val += 2;
+}
+
+static void test_ubsan_sub_overflow(void)
+{
+ volatile int val = INT_MIN;
+ volatile int val2 = 2;
+
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ val -= val2;
+}
+
+static void test_ubsan_mul_overflow(void)
+{
+ volatile int val = INT_MAX / 2;
+
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ val *= 3;
+}
+
+static void test_ubsan_negate_overflow(void)
+{
+ volatile int val = INT_MIN;
+
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ val = -val;
+}
+
static void test_ubsan_divrem_overflow(void)
{
volatile int val = 16;
@@ -23,8 +56,8 @@ static void test_ubsan_divrem_overflow(void)
static void test_ubsan_shift_out_of_bounds(void)
{
volatile int neg = -1, wrap = 4;
- int val1 = 10;
- int val2 = INT_MAX;
+ volatile int val1 = 10;
+ volatile int val2 = INT_MAX;
UBSAN_TEST(CONFIG_UBSAN_SHIFT, "negative exponent");
val1 <<= neg;
@@ -90,6 +123,10 @@ static void test_ubsan_misaligned_access(void)
}
static const test_ubsan_fp test_ubsan_array[] = {
+ test_ubsan_add_overflow,
+ test_ubsan_sub_overflow,
+ test_ubsan_mul_overflow,
+ test_ubsan_negate_overflow,
test_ubsan_shift_out_of_bounds,
test_ubsan_out_of_bounds,
test_ubsan_load_invalid_value,
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 3718d9886407..4ddf769861ff 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -117,7 +117,7 @@ static int align_shift_alloc_test(void)
int i;
for (i = 0; i < BITS_PER_LONG; i++) {
- align = ((unsigned long) 1) << i;
+ align = 1UL << i;
ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0,
__builtin_return_address(0));
@@ -501,7 +501,7 @@ static int test_func(void *private)
}
static int
-init_test_configurtion(void)
+init_test_configuration(void)
{
/*
* A maximum number of workers is defined as hard-coded
@@ -531,7 +531,7 @@ static void do_concurrent_test(void)
/*
* Set some basic configurations plus sanity check.
*/
- ret = init_test_configurtion();
+ ret = init_test_configuration();
if (ret < 0)
return;
@@ -600,12 +600,7 @@ static int vmalloc_test_init(void)
return -EAGAIN; /* Fail will directly unload the module */
}
-static void vmalloc_test_exit(void)
-{
-}
-
module_init(vmalloc_test_init)
-module_exit(vmalloc_test_exit)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Uladzislau Rezki");
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index e77d4856442c..ebe2af2e072d 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -423,6 +423,59 @@ static noinline void check_cmpxchg(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_cmpxchg_order(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ void *FIVE = xa_mk_value(5);
+ unsigned int i, order = 3;
+
+ XA_BUG_ON(xa, xa_store_order(xa, 0, order, FIVE, GFP_KERNEL));
+
+ /* Check entry FIVE has the order saved */
+ XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != order);
+
+ /* Check all the tied indexes have the same entry and order */
+ for (i = 0; i < (1 << order); i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != order);
+ }
+
+ /* Ensure that nothing is stored at index '1 << order' */
+ XA_BUG_ON(xa, xa_load(xa, 1 << order) != NULL);
+
+ /*
+ * Additionally, keep the node information and the order at
+ * '1 << order'
+ */
+ XA_BUG_ON(xa, xa_store_order(xa, 1 << order, order, FIVE, GFP_KERNEL));
+ for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != order);
+ }
+
+ /* Conditionally replace FIVE entry at index '0' with NULL */
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, NULL, GFP_KERNEL) != FIVE);
+
+ /* Verify the order is lost at FIVE (and old) entries */
+ XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != 0);
+
+ /* Verify the order and entries are lost in all the tied indexes */
+ for (i = 0; i < (1 << order); i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != NULL);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
+ }
+
+ /* Verify node and order are kept at '1 << order' */
+ for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != order);
+ }
+
+ xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+#endif
+}
+
static noinline void check_reserve(struct xarray *xa)
{
void *entry;
@@ -674,6 +727,181 @@ static noinline void check_multi_store(struct xarray *xa)
#endif
}
+#ifdef CONFIG_XARRAY_MULTI
+/* mimics page cache __filemap_add_folio() */
+static noinline void check_xa_multi_store_adv_add(struct xarray *xa,
+ unsigned long index,
+ unsigned int order,
+ void *p)
+{
+ XA_STATE(xas, xa, index);
+ unsigned int nrpages = 1UL << order;
+
+ /* users are responsible for index alignemnt to the order when adding */
+ XA_BUG_ON(xa, index & (nrpages - 1));
+
+ xas_set_order(&xas, index, order);
+
+ do {
+ xas_lock_irq(&xas);
+
+ xas_store(&xas, p);
+ XA_BUG_ON(xa, xas_error(&xas));
+ XA_BUG_ON(xa, xa_load(xa, index) != p);
+
+ xas_unlock_irq(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ XA_BUG_ON(xa, xas_error(&xas));
+}
+
+/* mimics page_cache_delete() */
+static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa,
+ unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, index);
+
+ xas_set_order(&xas, index, order);
+ xas_store(&xas, NULL);
+ xas_init_marks(&xas);
+}
+
+static noinline void check_xa_multi_store_adv_delete(struct xarray *xa,
+ unsigned long index,
+ unsigned int order)
+{
+ xa_lock_irq(xa);
+ check_xa_multi_store_adv_del_entry(xa, index, order);
+ xa_unlock_irq(xa);
+}
+
+/* mimics page cache filemap_get_entry() */
+static noinline void *test_get_entry(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ void *p;
+ static unsigned int loops = 0;
+
+ rcu_read_lock();
+repeat:
+ xas_reset(&xas);
+ p = xas_load(&xas);
+ if (xas_retry(&xas, p))
+ goto repeat;
+ rcu_read_unlock();
+
+ /*
+ * This is not part of the page cache, this selftest is pretty
+ * aggressive and does not want to trust the xarray API but rather
+ * test it, and for order 20 (4 GiB block size) we can loop over
+ * over a million entries which can cause a soft lockup. Page cache
+ * APIs won't be stupid, proper page cache APIs loop over the proper
+ * order so when using a larger order we skip shared entries.
+ */
+ if (++loops % XA_CHECK_SCHED == 0)
+ schedule();
+
+ return p;
+}
+
+static unsigned long some_val = 0xdeadbeef;
+static unsigned long some_val_2 = 0xdeaddead;
+
+/* mimics the page cache usage */
+static noinline void check_xa_multi_store_adv(struct xarray *xa,
+ unsigned long pos,
+ unsigned int order)
+{
+ unsigned int nrpages = 1UL << order;
+ unsigned long index, base, next_index, next_next_index;
+ unsigned int i;
+
+ index = pos >> PAGE_SHIFT;
+ base = round_down(index, nrpages);
+ next_index = round_down(base + nrpages, nrpages);
+ next_next_index = round_down(next_index + nrpages, nrpages);
+
+ check_xa_multi_store_adv_add(xa, base, order, &some_val);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val);
+
+ XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL);
+
+ /* Use order 0 for the next item */
+ check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2);
+ XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2);
+
+ /* Remove the next item */
+ check_xa_multi_store_adv_delete(xa, next_index, 0);
+
+ /* Now use order for a new pointer */
+ check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
+
+ check_xa_multi_store_adv_delete(xa, next_index, order);
+ check_xa_multi_store_adv_delete(xa, base, order);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* starting fresh again */
+
+ /* let's test some holes now */
+
+ /* hole at base and next_next */
+ check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL);
+
+ check_xa_multi_store_adv_delete(xa, next_index, order);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* hole at base and next */
+
+ check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != NULL);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2);
+
+ check_xa_multi_store_adv_delete(xa, next_next_index, order);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+#endif
+
+static noinline void check_multi_store_advanced(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned long end = ULONG_MAX/2;
+ unsigned long pos, i;
+
+ /*
+ * About 117 million tests below.
+ */
+ for (pos = 7; pos < end; pos = (pos * pos) + 564) {
+ for (i = 0; i < max_order; i++) {
+ check_xa_multi_store_adv(xa, pos, i);
+ check_xa_multi_store_adv(xa, pos + 157, i);
+ }
+ }
+#endif
+}
+
static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
{
int i;
@@ -1801,9 +2029,11 @@ static int xarray_checks(void)
check_xas_erase(&array);
check_insert(&array);
check_cmpxchg(&array);
+ check_cmpxchg_order(&array);
check_reserve(&array);
check_reserve(&xa0);
check_multi_store(&array);
+ check_multi_store_advanced(&array);
check_get_order(&array);
check_xa_alloc();
check_find(&array);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index df4f8d1354bb..5fc107f61934 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -222,6 +222,74 @@ static void ubsan_epilogue(void)
check_panic_on_warn("UBSAN");
}
+static void handle_overflow(struct overflow_data *data, void *lhs,
+ void *rhs, char op)
+{
+
+ struct type_descriptor *type = data->type;
+ char lhs_val_str[VALUE_LENGTH];
+ char rhs_val_str[VALUE_LENGTH];
+
+ if (suppress_report(&data->location))
+ return;
+
+ ubsan_prologue(&data->location, type_is_signed(type) ?
+ "signed-integer-overflow" :
+ "unsigned-integer-overflow");
+
+ val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
+ val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
+ pr_err("%s %c %s cannot be represented in type %s\n",
+ lhs_val_str,
+ op,
+ rhs_val_str,
+ type->type_name);
+
+ ubsan_epilogue();
+}
+
+void __ubsan_handle_add_overflow(void *data,
+ void *lhs, void *rhs)
+{
+
+ handle_overflow(data, lhs, rhs, '+');
+}
+EXPORT_SYMBOL(__ubsan_handle_add_overflow);
+
+void __ubsan_handle_sub_overflow(void *data,
+ void *lhs, void *rhs)
+{
+ handle_overflow(data, lhs, rhs, '-');
+}
+EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
+
+void __ubsan_handle_mul_overflow(void *data,
+ void *lhs, void *rhs)
+{
+ handle_overflow(data, lhs, rhs, '*');
+}
+EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
+
+void __ubsan_handle_negate_overflow(void *_data, void *old_val)
+{
+ struct overflow_data *data = _data;
+ char old_val_str[VALUE_LENGTH];
+
+ if (suppress_report(&data->location))
+ return;
+
+ ubsan_prologue(&data->location, "negation-overflow");
+
+ val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
+
+ pr_err("negation of %s cannot be represented in type %s:\n",
+ old_val_str, data->type->type_name);
+
+ ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
+
+
void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
{
struct overflow_data *data = _data;
diff --git a/lib/ubsan.h b/lib/ubsan.h
index 5d99ab81913b..0abbbac8700d 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -124,6 +124,10 @@ typedef s64 s_max;
typedef u64 u_max;
#endif
+void __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs);
+void __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs);
+void __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs);
+void __ubsan_handle_negate_overflow(void *_data, void *old_val);
void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);