diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 5 | ||||
-rw-r--r-- | lib/Kconfig.debug | 15 | ||||
-rw-r--r-- | lib/Kconfig.kcsan | 20 | ||||
-rw-r--r-- | lib/Makefile | 4 | ||||
-rw-r--r-- | lib/asn1_encoder.c | 2 | ||||
-rw-r--r-- | lib/atomic64.c | 2 | ||||
-rw-r--r-- | lib/crypto/Kconfig | 23 | ||||
-rw-r--r-- | lib/crypto/Makefile | 9 | ||||
-rw-r--r-- | lib/crypto/blake2s-generic.c | 6 | ||||
-rw-r--r-- | lib/crypto/blake2s.c | 6 | ||||
-rw-r--r-- | lib/iov_iter.c | 30 | ||||
-rw-r--r-- | lib/kobject.c | 8 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 6 | ||||
-rw-r--r-- | lib/kunit/test.c | 25 | ||||
-rw-r--r-- | lib/locking-selftest.c | 172 | ||||
-rw-r--r-- | lib/logic_iomem.c | 23 | ||||
-rw-r--r-- | lib/mpi/mpi-mod.c | 2 | ||||
-rw-r--r-- | lib/objagg.c | 7 | ||||
-rw-r--r-- | lib/raid6/algos.c | 78 | ||||
-rw-r--r-- | lib/raid6/avx2.c | 8 | ||||
-rw-r--r-- | lib/raid6/avx512.c | 6 | ||||
-rw-r--r-- | lib/ref_tracker.c | 140 | ||||
-rw-r--r-- | lib/string_helpers.c | 64 | ||||
-rw-r--r-- | lib/test_bpf.c | 4 | ||||
-rw-r--r-- | lib/test_ref_tracker.c | 115 | ||||
-rw-r--r-- | lib/vsprintf.c | 4 | ||||
-rw-r--r-- | lib/xarray.c | 6 |
27 files changed, 598 insertions, 192 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 5e7165e6a346..655b0e43f260 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -680,6 +680,11 @@ config STACK_HASH_ORDER Select the hash size as a power of 2 for the stackdepot hash table. Choose a lower value to reduce the memory impact. +config REF_TRACKER + bool + depends on STACKTRACE_SUPPORT + select STACKDEPOT + config SBITMAP bool diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5e14e32056ad..c77fe36bb3d8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -599,6 +599,11 @@ config DEBUG_MISC Say Y here if you need to enable miscellaneous debug code that should be under a more specific debug option but isn't. +menu "Networking Debugging" + +source "net/Kconfig.debug" + +endmenu # "Networking Debugging" menu "Memory Debugging" @@ -2107,6 +2112,16 @@ config BACKTRACE_SELF_TEST Say N if you are unsure. +config TEST_REF_TRACKER + tristate "Self test for reference tracker" + depends on DEBUG_KERNEL && STACKTRACE_SUPPORT + select REF_TRACKER + help + This option provides a kernel module performing tests + using reference tracker infrastructure. + + Say N if you are unsure. + config RBTREE_TEST tristate "Red-Black tree test" depends on DEBUG_KERNEL diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index e0a93ffdef30..63b70b8c5551 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -191,6 +191,26 @@ config KCSAN_STRICT closely aligns with the rules defined by the Linux-kernel memory consistency model (LKMM). +config KCSAN_WEAK_MEMORY + bool "Enable weak memory modeling to detect missing memory barriers" + default y + depends on KCSAN_STRICT + # We can either let objtool nop __tsan_func_{entry,exit}() and builtin + # atomics instrumentation in .noinstr.text, or use a compiler that can + # implement __no_kcsan to really remove all instrumentation. + depends on STACK_VALIDATION || CC_IS_GCC || CLANG_VERSION >= 140000 + help + Enable support for modeling a subset of weak memory, which allows + detecting a subset of data races due to missing memory barriers. + + Depends on KCSAN_STRICT, because the options strenghtening certain + plain accesses by default (depending on !KCSAN_STRICT) reduce the + ability to detect any data races invoving reordered accesses, in + particular reordered writes. + + Weak memory modeling relies on additional instrumentation and may + affect performance. + config KCSAN_REPORT_VALUE_CHANGE_ONLY bool "Only report races where watcher observed a data value change" default y diff --git a/lib/Makefile b/lib/Makefile index 364c23f15578..b213a7bbf3fd 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -101,7 +101,7 @@ obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o obj-$(CONFIG_TEST_HMM) += test_hmm.o obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o - +obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o # # CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns # off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS @@ -270,6 +270,8 @@ obj-$(CONFIG_STACKDEPOT) += stackdepot.o KASAN_SANITIZE_stackdepot.o := n KCOV_INSTRUMENT_stackdepot.o := n +obj-$(CONFIG_REF_TRACKER) += ref_tracker.o + libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ fdt_empty_tree.o fdt_addresses.o $(foreach file, $(libfdt_files), \ diff --git a/lib/asn1_encoder.c b/lib/asn1_encoder.c index 27bbe891714f..0fd3c454a468 100644 --- a/lib/asn1_encoder.c +++ b/lib/asn1_encoder.c @@ -164,8 +164,6 @@ asn1_encode_oid(unsigned char *data, const unsigned char *end_data, data_len -= 3; - ret = 0; - for (i = 2; i < oid_len; i++) { ret = asn1_encode_oid_digit(&d, &data_len, oid[i]); if (ret < 0) diff --git a/lib/atomic64.c b/lib/atomic64.c index 3df653994177..caf895789a1e 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -118,7 +118,6 @@ ATOMIC64_OPS(sub, -=) #undef ATOMIC64_OPS #define ATOMIC64_OPS(op, c_op) \ ATOMIC64_OP(op, c_op) \ - ATOMIC64_OP_RETURN(op, c_op) \ ATOMIC64_FETCH_OP(op, c_op) ATOMIC64_OPS(and, &=) @@ -127,7 +126,6 @@ ATOMIC64_OPS(xor, ^=) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP -#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP s64 generic_atomic64_dec_if_positive(atomic64_t *v) diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 545ccbddf6a1..8620f38e117c 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -1,7 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -comment "Crypto library routines" - config CRYPTO_LIB_AES tristate @@ -9,14 +7,14 @@ config CRYPTO_LIB_ARC4 tristate config CRYPTO_ARCH_HAVE_LIB_BLAKE2S - tristate + bool help Declares whether the architecture provides an arch-specific accelerated implementation of the Blake2s library interface, either builtin or as a module. config CRYPTO_LIB_BLAKE2S_GENERIC - tristate + def_bool !CRYPTO_ARCH_HAVE_LIB_BLAKE2S help This symbol can be depended upon by arch implementations of the Blake2s library interface that require the generic code as a @@ -24,15 +22,6 @@ config CRYPTO_LIB_BLAKE2S_GENERIC implementation is enabled, this implementation serves the users of CRYPTO_LIB_BLAKE2S. -config CRYPTO_LIB_BLAKE2S - tristate "BLAKE2s hash function library" - depends on CRYPTO_ARCH_HAVE_LIB_BLAKE2S || !CRYPTO_ARCH_HAVE_LIB_BLAKE2S - select CRYPTO_LIB_BLAKE2S_GENERIC if CRYPTO_ARCH_HAVE_LIB_BLAKE2S=n - help - Enable the Blake2s library interface. This interface may be fulfilled - by either the generic implementation or an arch-specific one, if one - is available and enabled. - config CRYPTO_ARCH_HAVE_LIB_CHACHA tristate help @@ -51,7 +40,7 @@ config CRYPTO_LIB_CHACHA_GENERIC of CRYPTO_LIB_CHACHA. config CRYPTO_LIB_CHACHA - tristate "ChaCha library interface" + tristate depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n help @@ -76,7 +65,7 @@ config CRYPTO_LIB_CURVE25519_GENERIC of CRYPTO_LIB_CURVE25519. config CRYPTO_LIB_CURVE25519 - tristate "Curve25519 scalar multiplication library" + tristate depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n help @@ -111,7 +100,7 @@ config CRYPTO_LIB_POLY1305_GENERIC of CRYPTO_LIB_POLY1305. config CRYPTO_LIB_POLY1305 - tristate "Poly1305 library interface" + tristate depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n help @@ -120,7 +109,7 @@ config CRYPTO_LIB_POLY1305 is available and enabled. config CRYPTO_LIB_CHACHA20POLY1305 - tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)" + tristate depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 select CRYPTO_LIB_CHACHA diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 73205ed269ba..ed43a41f2dcc 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -10,11 +10,10 @@ libaes-y := aes.o obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o libarc4-y := arc4.o -obj-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += libblake2s-generic.o -libblake2s-generic-y += blake2s-generic.o - -obj-$(CONFIG_CRYPTO_LIB_BLAKE2S) += libblake2s.o -libblake2s-y += blake2s.o +# blake2s is used by the /dev/random driver which is always builtin +obj-y += libblake2s.o +libblake2s-y := blake2s.o +libblake2s-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += blake2s-generic.o obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o libchacha20poly1305-y += chacha20poly1305.o diff --git a/lib/crypto/blake2s-generic.c b/lib/crypto/blake2s-generic.c index 04ff8df24513..75ccb3e633e6 100644 --- a/lib/crypto/blake2s-generic.c +++ b/lib/crypto/blake2s-generic.c @@ -37,7 +37,11 @@ static inline void blake2s_increment_counter(struct blake2s_state *state, state->t[1] += (state->t[0] < inc); } -void blake2s_compress_generic(struct blake2s_state *state,const u8 *block, +void blake2s_compress(struct blake2s_state *state, const u8 *block, + size_t nblocks, const u32 inc) + __weak __alias(blake2s_compress_generic); + +void blake2s_compress_generic(struct blake2s_state *state, const u8 *block, size_t nblocks, const u32 inc) { u32 m[16]; diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index 4055aa593ec4..93f2ae051370 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -16,12 +16,6 @@ #include <linux/init.h> #include <linux/bug.h> -#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S) -# define blake2s_compress blake2s_compress_arch -#else -# define blake2s_compress blake2s_compress_generic -#endif - void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen) { __blake2s_update(state, in, inlen, blake2s_compress); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 66a740e6e153..b0e0acdf96c1 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -69,42 +69,40 @@ #define iterate_xarray(i, n, base, len, __off, STEP) { \ __label__ __out; \ size_t __off = 0; \ - struct page *head = NULL; \ + struct folio *folio; \ loff_t start = i->xarray_start + i->iov_offset; \ - unsigned offset = start % PAGE_SIZE; \ pgoff_t index = start / PAGE_SIZE; \ - int j; \ - \ XA_STATE(xas, i->xarray, index); \ \ + len = PAGE_SIZE - offset_in_page(start); \ rcu_read_lock(); \ - xas_for_each(&xas, head, ULONG_MAX) { \ + xas_for_each(&xas, folio, ULONG_MAX) { \ unsigned left; \ - if (xas_retry(&xas, head)) \ + size_t offset; \ + if (xas_retry(&xas, folio)) \ continue; \ - if (WARN_ON(xa_is_value(head))) \ + if (WARN_ON(xa_is_value(folio))) \ break; \ - if (WARN_ON(PageHuge(head))) \ + if (WARN_ON(folio_test_hugetlb(folio))) \ break; \ - for (j = (head->index < index) ? index - head->index : 0; \ - j < thp_nr_pages(head); j++) { \ - void *kaddr = kmap_local_page(head + j); \ - base = kaddr + offset; \ - len = PAGE_SIZE - offset; \ + offset = offset_in_folio(folio, start + __off); \ + while (offset < folio_size(folio)) { \ + base = kmap_local_folio(folio, offset); \ len = min(n, len); \ left = (STEP); \ - kunmap_local(kaddr); \ + kunmap_local(base); \ len -= left; \ __off += len; \ n -= len; \ if (left || n == 0) \ goto __out; \ - offset = 0; \ + offset += len; \ + len = PAGE_SIZE; \ } \ } \ __out: \ rcu_read_unlock(); \ - i->iov_offset += __off; \ + i->iov_offset += __off; \ n = __off; \ } diff --git a/lib/kobject.c b/lib/kobject.c index 4a56f519139d..56fa037501b5 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -65,7 +65,7 @@ void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) */ static int populate_dir(struct kobject *kobj) { - struct kobj_type *t = get_ktype(kobj); + const struct kobj_type *t = get_ktype(kobj); struct attribute *attr; int error = 0; int i; @@ -346,7 +346,7 @@ EXPORT_SYMBOL(kobject_set_name); * to kobject_put(), not by a call to kfree directly to ensure that all of * the memory is cleaned up properly. */ -void kobject_init(struct kobject *kobj, struct kobj_type *ktype) +void kobject_init(struct kobject *kobj, const struct kobj_type *ktype) { char *err_str; @@ -461,7 +461,7 @@ EXPORT_SYMBOL(kobject_add); * same type of error handling after a call to kobject_add() and kobject * lifetime rules are the same here. */ -int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype, +int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, struct kobject *parent, const char *fmt, ...) { va_list args; @@ -679,7 +679,7 @@ EXPORT_SYMBOL(kobject_get_unless_zero); static void kobject_cleanup(struct kobject *kobj) { struct kobject *parent = kobj->parent; - struct kobj_type *t = get_ktype(kobj); + const struct kobj_type *t = get_ktype(kobj); const char *name = kobj->name; pr_debug("kobject: '%s' (%p): %s, parent %p\n", diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index c87d5b6a8a55..7c44b7ae4c5c 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -501,7 +501,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, } /* skip the event, if the filter returns zero. */ if (uevent_ops && uevent_ops->filter) - if (!uevent_ops->filter(kset, kobj)) { + if (!uevent_ops->filter(kobj)) { pr_debug("kobject: '%s' (%p): %s: filter function " "caused the event to drop!\n", kobject_name(kobj), kobj, __func__); @@ -510,7 +510,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, /* originating subsystem */ if (uevent_ops && uevent_ops->name) - subsystem = uevent_ops->name(kset, kobj); + subsystem = uevent_ops->name(kobj); else subsystem = kobject_name(&kset->kobj); if (!subsystem) { @@ -554,7 +554,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, /* let the kset specific function add its stuff */ if (uevent_ops && uevent_ops->uevent) { - retval = uevent_ops->uevent(kset, kobj, env); + retval = uevent_ops->uevent(kobj, env); if (retval) { pr_debug("kobject: '%s' (%p): %s: uevent() returned " "%d\n", kobject_name(kobj), kobj, diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 3bd741e50a2d..c7ed4aabec04 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -504,25 +504,28 @@ int kunit_run_tests(struct kunit_suite *suite) struct kunit_result_stats param_stats = { 0 }; test_case->status = KUNIT_SKIPPED; - if (test_case->generate_params) { + if (!test_case->generate_params) { + /* Non-parameterised test. */ + kunit_run_case_catch_errors(suite, test_case, &test); + kunit_update_stats(¶m_stats, test.status); + } else { /* Get initial param. */ param_desc[0] = '\0'; test.param_value = test_case->generate_params(NULL, param_desc); - } + kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT + "# Subtest: %s", test_case->name); - do { - kunit_run_case_catch_errors(suite, test_case, &test); + while (test.param_value) { + kunit_run_case_catch_errors(suite, test_case, &test); - if (test_case->generate_params) { if (param_desc[0] == '\0') { snprintf(param_desc, sizeof(param_desc), "param-%d", test.param_index); } kunit_log(KERN_INFO, &test, - KUNIT_SUBTEST_INDENT - "# %s: %s %d - %s", - test_case->name, + KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT + "%s %d - %s", kunit_status_to_ok_not_ok(test.status), test.param_index + 1, param_desc); @@ -530,11 +533,11 @@ int kunit_run_tests(struct kunit_suite *suite) param_desc[0] = '\0'; test.param_value = test_case->generate_params(test.param_value, param_desc); test.param_index++; - } - kunit_update_stats(¶m_stats, test.status); + kunit_update_stats(¶m_stats, test.status); + } + } - } while (test.param_value); kunit_print_test_stats(&test, param_stats); diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 71652e1c397c..8d24279fad05 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -26,6 +26,12 @@ #include <linux/rtmutex.h> #include <linux/local_lock.h> +#ifdef CONFIG_PREEMPT_RT +# define NON_RT(...) +#else +# define NON_RT(...) __VA_ARGS__ +#endif + /* * Change this to 1 if you want to see the failure printouts: */ @@ -139,7 +145,7 @@ static DEFINE_RT_MUTEX(rtmutex_Z2); #endif -static local_lock_t local_A = INIT_LOCAL_LOCK(local_A); +static DEFINE_PER_CPU(local_lock_t, local_A); /* * non-inlined runtime initializers, to let separate locks share @@ -712,12 +718,18 @@ GENERATE_TESTCASE(ABCDBCDA_rtmutex); #undef E +#ifdef CONFIG_PREEMPT_RT +# define RT_PREPARE_DBL_UNLOCK() { migrate_disable(); rcu_read_lock(); } +#else +# define RT_PREPARE_DBL_UNLOCK() +#endif /* * Double unlock: */ #define E() \ \ LOCK(A); \ + RT_PREPARE_DBL_UNLOCK(); \ UNLOCK(A); \ UNLOCK(A); /* fail */ @@ -802,6 +814,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin) @@ -810,10 +823,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) +#endif #undef E1 #undef E2 +#ifndef CONFIG_PREEMPT_RT /* * Enabling hardirqs with a softirq-safe lock held: */ @@ -846,6 +861,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) #undef E1 #undef E2 +#endif + /* * Enabling irqs with an irq-safe lock held: */ @@ -875,6 +892,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin) @@ -883,6 +901,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) +#endif #undef E1 #undef E2 @@ -921,6 +940,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin) @@ -929,6 +949,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) +#endif #undef E1 #undef E2 @@ -969,6 +990,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin) @@ -977,6 +999,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) +#endif #undef E1 #undef E2 @@ -1031,6 +1054,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin) @@ -1039,6 +1063,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock) +#endif #undef E1 #undef E2 @@ -1206,12 +1231,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-softirq.h" #include "locking-selftest-rlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock) +#endif #undef E1 #undef E2 @@ -1252,12 +1279,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-softirq.h" #include "locking-selftest-rlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_wlock) +#endif #undef E1 #undef E2 @@ -1306,12 +1335,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-softirq.h" #include "locking-selftest-rlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock) +#endif #ifdef CONFIG_DEBUG_LOCK_ALLOC # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map) @@ -1320,7 +1351,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock) # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) # define I_WW(x) lockdep_reset_lock(&x.dep_map) -# define I_LOCAL_LOCK(x) lockdep_reset_lock(&local_##x.dep_map) +# define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.dep_map)) #ifdef CONFIG_RT_MUTEXES # define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map) #endif @@ -1380,7 +1411,7 @@ static void reset_locks(void) init_shared_classes(); raw_spin_lock_init(&raw_lock_A); raw_spin_lock_init(&raw_lock_B); - local_lock_init(&local_A); + local_lock_init(this_cpu_ptr(&local_A)); ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep); memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2)); @@ -1398,7 +1429,13 @@ static int unexpected_testcase_failures; static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) { - unsigned long saved_preempt_count = preempt_count(); + int saved_preempt_count = preempt_count(); +#ifdef CONFIG_PREEMPT_RT +#ifdef CONFIG_SMP + int saved_mgd_count = current->migration_disabled; +#endif + int saved_rcu_count = current->rcu_read_lock_nesting; +#endif WARN_ON(irqs_disabled()); @@ -1432,6 +1469,18 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) * count, so restore it: */ preempt_count_set(saved_preempt_count); + +#ifdef CONFIG_PREEMPT_RT +#ifdef CONFIG_SMP + while (current->migration_disabled > saved_mgd_count) + migrate_enable(); +#endif + + while (current->rcu_read_lock_nesting > saved_rcu_count) + rcu_read_unlock(); + WARN_ON_ONCE(current->rcu_read_lock_nesting < saved_rcu_count); +#endif + #ifdef CONFIG_TRACE_IRQFLAGS if (softirq_count()) current->softirqs_enabled = 0; @@ -1499,7 +1548,7 @@ static inline void print_testname(const char *testname) #define DO_TESTCASE_2x2RW(desc, name, nr) \ DO_TESTCASE_2RW("hard-"desc, name##_hard, nr) \ - DO_TESTCASE_2RW("soft-"desc, name##_soft, nr) \ + NON_RT(DO_TESTCASE_2RW("soft-"desc, name##_soft, nr)) \ #define DO_TESTCASE_6x2x2RW(desc, name) \ DO_TESTCASE_2x2RW(desc, name, 123); \ @@ -1547,19 +1596,19 @@ static inline void print_testname(const char *testname) #define DO_TESTCASE_2I(desc, name, nr) \ DO_TESTCASE_1("hard-"desc, name##_hard, nr); \ - DO_TESTCASE_1("soft-"desc, name##_soft, nr); + NON_RT(DO_TESTCASE_1("soft-"desc, name##_soft, nr)); #define DO_TESTCASE_2IB(desc, name, nr) \ DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \ - DO_TESTCASE_1B("soft-"desc, name##_soft, nr); + NON_RT(DO_TESTCASE_1B("soft-"desc, name##_soft, nr)); #define DO_TESTCASE_6I(desc, name, nr) \ DO_TESTCASE_3("hard-"desc, name##_hard, nr); \ - DO_TESTCASE_3("soft-"desc, name##_soft, nr); + NON_RT(DO_TESTCASE_3("soft-"desc, name##_soft, nr)); #define DO_TESTCASE_6IRW(desc, name, nr) \ DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \ - DO_TESTCASE_3RW("soft-"desc, name##_soft, nr); + NON_RT(DO_TESTCASE_3RW("soft-"desc, name##_soft, nr)); #define DO_TESTCASE_2x3(desc, name) \ DO_TESTCASE_3(desc, name, 12); \ @@ -1651,6 +1700,22 @@ static void ww_test_fail_acquire(void) #endif } +#ifdef CONFIG_PREEMPT_RT +#define ww_mutex_base_lock(b) rt_mutex_lock(b) +#define ww_mutex_base_trylock(b) rt_mutex_trylock(b) +#define ww_mutex_base_lock_nest_lock(b, b2) rt_mutex_lock_nest_lock(b, b2) +#define ww_mutex_base_lock_interruptible(b) rt_mutex_lock_interruptible(b) +#define ww_mutex_base_lock_killable(b) rt_mutex_lock_killable(b) +#define ww_mutex_base_unlock(b) rt_mutex_unlock(b) +#else +#define ww_mutex_base_lock(b) mutex_lock(b) +#define ww_mutex_base_trylock(b) mutex_trylock(b) +#define ww_mutex_base_lock_nest_lock(b, b2) mutex_lock_nest_lock(b, b2) +#define ww_mutex_base_lock_interruptible(b) mutex_lock_interruptible(b) +#define ww_mutex_base_lock_killable(b) mutex_lock_killable(b) +#define ww_mutex_base_unlock(b) mutex_unlock(b) +#endif + static void ww_test_normal(void) { int ret; @@ -1665,50 +1730,50 @@ static void ww_test_normal(void) /* mutex_lock (and indirectly, mutex_lock_nested) */ o.ctx = (void *)~0UL; - mutex_lock(&o.base); - mutex_unlock(&o.base); + ww_mutex_base_lock(&o.base); + ww_mutex_base_unlock(&o.base); WARN_ON(o.ctx != (void *)~0UL); /* mutex_lock_interruptible (and *_nested) */ o.ctx = (void *)~0UL; - ret = mutex_lock_interruptible(&o.base); + ret = ww_mutex_base_lock_interruptible(&o.base); if (!ret) - mutex_unlock(&o.base); + ww_mutex_base_unlock(&o.base); else WARN_ON(1); WARN_ON(o.ctx != (void *)~0UL); /* mutex_lock_killable (and *_nested) */ o.ctx = (void *)~0UL; - ret = mutex_lock_killable(&o.base); + ret = ww_mutex_base_lock_killable(&o.base); if (!ret) - mutex_unlock(&o.base); + ww_mutex_base_unlock(&o.base); else WARN_ON(1); WARN_ON(o.ctx != (void *)~0UL); /* trylock, succeeding */ o.ctx = (void *)~0UL; - ret = mutex_trylock(&o.base); + ret = ww_mutex_base_trylock(&o.base); WARN_ON(!ret); if (ret) - mutex_unlock(&o.base); + ww_mutex_base_unlock(&o.base); else WARN_ON(1); WARN_ON(o.ctx != (void *)~0UL); /* trylock, failing */ o.ctx = (void *)~0UL; - mutex_lock(&o.base); - ret = mutex_trylock(&o.base); + ww_mutex_base_lock(&o.base); + ret = ww_mutex_base_trylock(&o.base); WARN_ON(ret); - mutex_unlock(&o.base); + ww_mutex_base_unlock(&o.base); WARN_ON(o.ctx != (void *)~0UL); /* nest_lock */ o.ctx = (void *)~0UL; - mutex_lock_nest_lock(&o.base, &t); - mutex_unlock(&o.base); + ww_mutex_base_lock_nest_lock(&o.base, &t); + ww_mutex_base_unlock(&o.base); WARN_ON(o.ctx != (void *)~0UL); } @@ -1721,7 +1786,7 @@ static void ww_test_two_contexts(void) static void ww_test_diff_class(void) { WWAI(&t); -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES t.ww_class = NULL; #endif WWL(&o, &t); @@ -1785,7 +1850,7 @@ static void ww_test_edeadlk_normal(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); o2.ctx = &t2; mutex_release(&o2.base.dep_map, _THIS_IP_); @@ -1801,7 +1866,7 @@ static void ww_test_edeadlk_normal(void) o2.ctx = NULL; mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); - mutex_unlock(&o2.base); + ww_mutex_base_unlock(&o2.base); WWU(&o); WWL(&o2, &t); @@ -1811,7 +1876,7 @@ static void ww_test_edeadlk_normal_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -1827,7 +1892,7 @@ static void ww_test_edeadlk_normal_slow(void) o2.ctx = NULL; mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); - mutex_unlock(&o2.base); + ww_mutex_base_unlock(&o2.base); WWU(&o); ww_mutex_lock_slow(&o2, &t); @@ -1837,7 +1902,7 @@ static void ww_test_edeadlk_no_unlock(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); o2.ctx = &t2; mutex_release(&o2.base.dep_map, _THIS_IP_); @@ -1853,7 +1918,7 @@ static void ww_test_edeadlk_no_unlock(void) o2.ctx = NULL; mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); - mutex_unlock(&o2.base); + ww_mutex_base_unlock(&o2.base); WWL(&o2, &t); } @@ -1862,7 +1927,7 @@ static void ww_test_edeadlk_no_unlock_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -1878,7 +1943,7 @@ static void ww_test_edeadlk_no_unlock_slow(void) o2.ctx = NULL; mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); - mutex_unlock(&o2.base); + ww_mutex_base_unlock(&o2.base); ww_mutex_lock_slow(&o2, &t); } @@ -1887,7 +1952,7 @@ static void ww_test_edeadlk_acquire_more(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -1908,7 +1973,7 @@ static void ww_test_edeadlk_acquire_more_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -1929,11 +1994,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; - mutex_lock(&o3.base); + ww_mutex_base_lock(&o3.base); mutex_release(&o3.base.dep_map, _THIS_IP_); o3.ctx = &t2; @@ -1955,11 +2020,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; - mutex_lock(&o3.base); + ww_mutex_base_lock(&o3.base); mutex_release(&o3.base.dep_map, _THIS_IP_); o3.ctx = &t2; @@ -1980,7 +2045,7 @@ static void ww_test_edeadlk_acquire_wrong(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -2005,7 +2070,7 @@ static void ww_test_edeadlk_acquire_wrong_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -2646,8 +2711,8 @@ static void wait_context_tests(void) static void local_lock_2(void) { - local_lock_acquire(&local_A); /* IRQ-ON */ - local_lock_release(&local_A); + local_lock(&local_A); /* IRQ-ON */ + local_unlock(&local_A); HARDIRQ_ENTER(); spin_lock(&lock_A); /* IN-IRQ */ @@ -2656,18 +2721,18 @@ static void local_lock_2(void) HARDIRQ_DISABLE(); spin_lock(&lock_A); - local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */ - local_lock_release(&local_A); + local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */ + local_unlock(&local_A); spin_unlock(&lock_A); HARDIRQ_ENABLE(); } static void local_lock_3A(void) { - local_lock_acquire(&local_A); /* IRQ-ON */ + local_lock(&local_A); /* IRQ-ON */ spin_lock(&lock_B); /* IRQ-ON */ spin_unlock(&lock_B); - local_lock_release(&local_A); + local_unlock(&local_A); HARDIRQ_ENTER(); spin_lock(&lock_A); /* IN-IRQ */ @@ -2676,18 +2741,18 @@ static void local_lock_3A(void) HARDIRQ_DISABLE(); spin_lock(&lock_A); - local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ - local_lock_release(&local_A); + local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ + local_unlock(&local_A); spin_unlock(&lock_A); HARDIRQ_ENABLE(); } static void local_lock_3B(void) { - local_lock_acquire(&local_A); /* IRQ-ON */ + local_lock(&local_A); /* IRQ-ON */ spin_lock(&lock_B); /* IRQ-ON */ spin_unlock(&lock_B); - local_lock_release(&local_A); + local_unlock(&local_A); HARDIRQ_ENTER(); spin_lock(&lock_A); /* IN-IRQ */ @@ -2696,8 +2761,8 @@ static void local_lock_3B(void) HARDIRQ_DISABLE(); spin_lock(&lock_A); - local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ - local_lock_release(&local_A); + local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ + local_unlock(&local_A); spin_unlock(&lock_A); HARDIRQ_ENABLE(); @@ -2812,7 +2877,7 @@ void locking_selftest(void) printk("------------------------\n"); printk("| Locking API testsuite:\n"); printk("----------------------------------------------------------------------------\n"); - printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n"); + printk(" | spin |wlock |rlock |mutex | wsem | rsem |rtmutex\n"); printk(" --------------------------------------------------------------------------\n"); init_shared_classes(); @@ -2885,12 +2950,11 @@ void locking_selftest(void) DO_TESTCASE_6x1RR("rlock W1R2/R2R3/W3W1", W1R2_R2R3_W3W1); printk(" --------------------------------------------------------------------------\n"); - /* * irq-context testcases: */ DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1); - DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A); + NON_RT(DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A)); DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B); DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3); DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4); diff --git a/lib/logic_iomem.c b/lib/logic_iomem.c index 9bdfde0c0f86..8c3365f26e51 100644 --- a/lib/logic_iomem.c +++ b/lib/logic_iomem.c @@ -21,15 +21,15 @@ struct logic_iomem_area { #define AREA_SHIFT 24 #define MAX_AREA_SIZE (1 << AREA_SHIFT) -#define MAX_AREAS ((1ULL<<32) / MAX_AREA_SIZE) +#define MAX_AREAS ((1U << 31) / MAX_AREA_SIZE) #define AREA_BITS ((MAX_AREAS - 1) << AREA_SHIFT) #define AREA_MASK (MAX_AREA_SIZE - 1) #ifdef CONFIG_64BIT #define IOREMAP_BIAS 0xDEAD000000000000UL #define IOREMAP_MASK 0xFFFFFFFF00000000UL #else -#define IOREMAP_BIAS 0 -#define IOREMAP_MASK 0 +#define IOREMAP_BIAS 0x80000000UL +#define IOREMAP_MASK 0x80000000UL #endif static DEFINE_MUTEX(regions_mtx); @@ -76,10 +76,10 @@ static void __iomem *real_ioremap(phys_addr_t offset, size_t size) return NULL; } -static void real_iounmap(void __iomem *addr) +static void real_iounmap(volatile void __iomem *addr) { WARN(1, "invalid iounmap for addr 0x%llx\n", - (unsigned long long __force)addr); + (unsigned long long)(uintptr_t __force)addr); } #endif /* CONFIG_LOGIC_IOMEM_FALLBACK */ @@ -149,7 +149,7 @@ get_area(const volatile void __iomem *addr) return NULL; } -void iounmap(void __iomem *addr) +void iounmap(volatile void __iomem *addr) { struct logic_iomem_area *area = get_area(addr); @@ -173,7 +173,7 @@ EXPORT_SYMBOL(iounmap); static u##sz real_raw_read ## op(const volatile void __iomem *addr) \ { \ WARN(1, "Invalid read" #op " at address %llx\n", \ - (unsigned long long __force)addr); \ + (unsigned long long)(uintptr_t __force)addr); \ return (u ## sz)~0ULL; \ } \ \ @@ -181,7 +181,8 @@ static void real_raw_write ## op(u ## sz val, \ volatile void __iomem *addr) \ { \ WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \ - (unsigned long long)val, (unsigned long long __force)addr);\ + (unsigned long long)val, \ + (unsigned long long)(uintptr_t __force)addr);\ } \ MAKE_FALLBACK(b, 8); @@ -194,14 +195,14 @@ MAKE_FALLBACK(q, 64); static void real_memset_io(volatile void __iomem *addr, int value, size_t size) { WARN(1, "Invalid memset_io at address 0x%llx\n", - (unsigned long long __force)addr); + (unsigned long long)(uintptr_t __force)addr); } static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr, size_t size) { WARN(1, "Invalid memcpy_fromio at address 0x%llx\n", - (unsigned long long __force)addr); + (unsigned long long)(uintptr_t __force)addr); memset(buffer, 0xff, size); } @@ -210,7 +211,7 @@ static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size) { WARN(1, "Invalid memcpy_toio at address 0x%llx\n", - (unsigned long long __force)addr); + (unsigned long long)(uintptr_t __force)addr); } #endif /* CONFIG_LOGIC_IOMEM_FALLBACK */ diff --git a/lib/mpi/mpi-mod.c b/lib/mpi/mpi-mod.c index 47bc59edd4ff..54fcc01564d9 100644 --- a/lib/mpi/mpi-mod.c +++ b/lib/mpi/mpi-mod.c @@ -40,6 +40,8 @@ mpi_barrett_t mpi_barrett_init(MPI m, int copy) mpi_normalize(m); ctx = kcalloc(1, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; if (copy) { ctx->m = mpi_copy(m); diff --git a/lib/objagg.c b/lib/objagg.c index 5e1676ccdadd..1e248629ed64 100644 --- a/lib/objagg.c +++ b/lib/objagg.c @@ -781,7 +781,6 @@ static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg) struct objagg_tmp_node *node; struct objagg_tmp_node *pnode; struct objagg_obj *objagg_obj; - size_t alloc_size; int i, j; graph = kzalloc(sizeof(*graph), GFP_KERNEL); @@ -793,9 +792,7 @@ static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg) goto err_nodes_alloc; graph->nodes_count = nodes_count; - alloc_size = BITS_TO_LONGS(nodes_count * nodes_count) * - sizeof(unsigned long); - graph->edges = kzalloc(alloc_size, GFP_KERNEL); + graph->edges = bitmap_zalloc(nodes_count * nodes_count, GFP_KERNEL); if (!graph->edges) goto err_edges_alloc; @@ -833,7 +830,7 @@ err_nodes_alloc: static void objagg_tmp_graph_destroy(struct objagg_tmp_graph *graph) { - kfree(graph->edges); + bitmap_free(graph->edges); kfree(graph->nodes); kfree(graph); } diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index 6d5e5000fdd7..39b74221f4a7 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c @@ -145,13 +145,13 @@ static inline const struct raid6_recov_calls *raid6_choose_recov(void) static inline const struct raid6_calls *raid6_choose_gen( void *(*const dptrs)[RAID6_TEST_DISKS], const int disks) { - unsigned long perf, bestgenperf, bestxorperf, j0, j1; + unsigned long perf, bestgenperf, j0, j1; int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */ const struct raid6_calls *const *algo; const struct raid6_calls *best; - for (bestgenperf = 0, bestxorperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) { - if (!best || (*algo)->prefer >= best->prefer) { + for (bestgenperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) { + if (!best || (*algo)->priority >= best->priority) { if ((*algo)->valid && !(*algo)->valid()) continue; @@ -180,50 +180,48 @@ static inline const struct raid6_calls *raid6_choose_gen( pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name, (perf * HZ * (disks-2)) >> (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2)); + } + } - if (!(*algo)->xor_syndrome) - continue; + if (!best) { + pr_err("raid6: Yikes! No algorithm found!\n"); + goto out; + } - perf = 0; + raid6_call = *best; - preempt_disable(); - j0 = jiffies; - while ((j1 = jiffies) == j0) - cpu_relax(); - while (time_before(jiffies, - j1 + (1<<RAID6_TIME_JIFFIES_LG2))) { - (*algo)->xor_syndrome(disks, start, stop, - PAGE_SIZE, *dptrs); - perf++; - } - preempt_enable(); - - if (best == *algo) - bestxorperf = perf; + if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) { + pr_info("raid6: skipped pq benchmark and selected %s\n", + best->name); + goto out; + } - pr_info("raid6: %-8s xor() %5ld MB/s\n", (*algo)->name, - (perf * HZ * (disks-2)) >> - (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1)); + pr_info("raid6: using algorithm %s gen() %ld MB/s\n", + best->name, + (bestgenperf * HZ * (disks - 2)) >> + (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2)); + + if (best->xor_syndrome) { + perf = 0; + + preempt_disable(); + j0 = jiffies; + while ((j1 = jiffies) == j0) + cpu_relax(); + while (time_before(jiffies, + j1 + (1 << RAID6_TIME_JIFFIES_LG2))) { + best->xor_syndrome(disks, start, stop, + PAGE_SIZE, *dptrs); + perf++; } - } + preempt_enable(); - if (best) { - if (IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) { - pr_info("raid6: using algorithm %s gen() %ld MB/s\n", - best->name, - (bestgenperf * HZ * (disks-2)) >> - (20 - PAGE_SHIFT+RAID6_TIME_JIFFIES_LG2)); - if (best->xor_syndrome) - pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n", - (bestxorperf * HZ * (disks-2)) >> - (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1)); - } else - pr_info("raid6: skip pq benchmark and using algorithm %s\n", - best->name); - raid6_call = *best; - } else - pr_err("raid6: Yikes! No algorithm found!\n"); + pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n", + (perf * HZ * (disks - 2)) >> + (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1)); + } +out: return best; } diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c index f299476e1d76..059024234dce 100644 --- a/lib/raid6/avx2.c +++ b/lib/raid6/avx2.c @@ -132,7 +132,7 @@ const struct raid6_calls raid6_avx2x1 = { raid6_avx21_xor_syndrome, raid6_have_avx2, "avx2x1", - 1 /* Has cache hints */ + .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */ }; /* @@ -262,7 +262,7 @@ const struct raid6_calls raid6_avx2x2 = { raid6_avx22_xor_syndrome, raid6_have_avx2, "avx2x2", - 1 /* Has cache hints */ + .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */ }; #ifdef CONFIG_X86_64 @@ -465,6 +465,6 @@ const struct raid6_calls raid6_avx2x4 = { raid6_avx24_xor_syndrome, raid6_have_avx2, "avx2x4", - 1 /* Has cache hints */ + .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */ }; -#endif +#endif /* CONFIG_X86_64 */ diff --git a/lib/raid6/avx512.c b/lib/raid6/avx512.c index bb684d144ee2..9c3e822e1adf 100644 --- a/lib/raid6/avx512.c +++ b/lib/raid6/avx512.c @@ -162,7 +162,7 @@ const struct raid6_calls raid6_avx512x1 = { raid6_avx5121_xor_syndrome, raid6_have_avx512, "avx512x1", - 1 /* Has cache hints */ + .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */ }; /* @@ -319,7 +319,7 @@ const struct raid6_calls raid6_avx512x2 = { raid6_avx5122_xor_syndrome, raid6_have_avx512, "avx512x2", - 1 /* Has cache hints */ + .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */ }; #ifdef CONFIG_X86_64 @@ -557,7 +557,7 @@ const struct raid6_calls raid6_avx512x4 = { raid6_avx5124_xor_syndrome, raid6_have_avx512, "avx512x4", - 1 /* Has cache hints */ + .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */ }; #endif diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c new file mode 100644 index 000000000000..0ae2e66dcf0f --- /dev/null +++ b/lib/ref_tracker.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/export.h> +#include <linux/ref_tracker.h> +#include <linux/slab.h> +#include <linux/stacktrace.h> +#include <linux/stackdepot.h> + +#define REF_TRACKER_STACK_ENTRIES 16 + +struct ref_tracker { + struct list_head head; /* anchor into dir->list or dir->quarantine */ + bool dead; + depot_stack_handle_t alloc_stack_handle; + depot_stack_handle_t free_stack_handle; +}; + +void ref_tracker_dir_exit(struct ref_tracker_dir *dir) +{ + struct ref_tracker *tracker, *n; + unsigned long flags; + bool leak = false; + + spin_lock_irqsave(&dir->lock, flags); + list_for_each_entry_safe(tracker, n, &dir->quarantine, head) { + list_del(&tracker->head); + kfree(tracker); + dir->quarantine_avail++; + } + list_for_each_entry_safe(tracker, n, &dir->list, head) { + pr_err("leaked reference.\n"); + if (tracker->alloc_stack_handle) + stack_depot_print(tracker->alloc_stack_handle); + leak = true; + list_del(&tracker->head); + kfree(tracker); + } + spin_unlock_irqrestore(&dir->lock, flags); + WARN_ON_ONCE(leak); + WARN_ON_ONCE(refcount_read(&dir->untracked) != 1); +} +EXPORT_SYMBOL(ref_tracker_dir_exit); + +void ref_tracker_dir_print(struct ref_tracker_dir *dir, + unsigned int display_limit) +{ + struct ref_tracker *tracker; + unsigned long flags; + unsigned int i = 0; + + spin_lock_irqsave(&dir->lock, flags); + list_for_each_entry(tracker, &dir->list, head) { + if (i < display_limit) { + pr_err("leaked reference.\n"); + if (tracker->alloc_stack_handle) + stack_depot_print(tracker->alloc_stack_handle); + i++; + } else { + break; + } + } + spin_unlock_irqrestore(&dir->lock, flags); +} +EXPORT_SYMBOL(ref_tracker_dir_print); + +int ref_tracker_alloc(struct ref_tracker_dir *dir, + struct ref_tracker **trackerp, + gfp_t gfp) +{ + unsigned long entries[REF_TRACKER_STACK_ENTRIES]; + struct ref_tracker *tracker; + unsigned int nr_entries; + unsigned long flags; + + *trackerp = tracker = kzalloc(sizeof(*tracker), gfp | __GFP_NOFAIL); + if (unlikely(!tracker)) { + pr_err_once("memory allocation failure, unreliable refcount tracker.\n"); + refcount_inc(&dir->untracked); + return -ENOMEM; + } + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); + nr_entries = filter_irq_stacks(entries, nr_entries); + tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp); + + spin_lock_irqsave(&dir->lock, flags); + list_add(&tracker->head, &dir->list); + spin_unlock_irqrestore(&dir->lock, flags); + return 0; +} +EXPORT_SYMBOL_GPL(ref_tracker_alloc); + +int ref_tracker_free(struct ref_tracker_dir *dir, + struct ref_tracker **trackerp) +{ + unsigned long entries[REF_TRACKER_STACK_ENTRIES]; + struct ref_tracker *tracker = *trackerp; + depot_stack_handle_t stack_handle; + unsigned int nr_entries; + unsigned long flags; + + if (!tracker) { + refcount_dec(&dir->untracked); + return -EEXIST; + } + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); + nr_entries = filter_irq_stacks(entries, nr_entries); + stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC); + + spin_lock_irqsave(&dir->lock, flags); + if (tracker->dead) { + pr_err("reference already released.\n"); + if (tracker->alloc_stack_handle) { + pr_err("allocated in:\n"); + stack_depot_print(tracker->alloc_stack_handle); + } + if (tracker->free_stack_handle) { + pr_err("freed in:\n"); + stack_depot_print(tracker->free_stack_handle); + } + spin_unlock_irqrestore(&dir->lock, flags); + WARN_ON_ONCE(1); + return -EINVAL; + } + tracker->dead = true; + + tracker->free_stack_handle = stack_handle; + + list_move_tail(&tracker->head, &dir->quarantine); + if (!dir->quarantine_avail) { + tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head); + list_del(&tracker->head); + } else { + dir->quarantine_avail--; + tracker = NULL; + } + spin_unlock_irqrestore(&dir->lock, flags); + + kfree(tracker); + return 0; +} +EXPORT_SYMBOL_GPL(ref_tracker_free); diff --git a/lib/string_helpers.c b/lib/string_helpers.c index d5d008f5b1d9..90f9f1b7afec 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -10,6 +10,7 @@ #include <linux/math64.h> #include <linux/export.h> #include <linux/ctype.h> +#include <linux/device.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/limits.h> @@ -675,6 +676,39 @@ char *kstrdup_quotable_file(struct file *file, gfp_t gfp) EXPORT_SYMBOL_GPL(kstrdup_quotable_file); /** + * kasprintf_strarray - allocate and fill array of sequential strings + * @gfp: flags for the slab allocator + * @prefix: prefix to be used + * @n: amount of lines to be allocated and filled + * + * Allocates and fills @n strings using pattern "%s-%zu", where prefix + * is provided by caller. The caller is responsible to free them with + * kfree_strarray() after use. + * + * Returns array of strings or NULL when memory can't be allocated. + */ +char **kasprintf_strarray(gfp_t gfp, const char *prefix, size_t n) +{ + char **names; + size_t i; + + names = kcalloc(n + 1, sizeof(char *), gfp); + if (!names) + return NULL; + + for (i = 0; i < n; i++) { + names[i] = kasprintf(gfp, "%s-%zu", prefix, i); + if (!names[i]) { + kfree_strarray(names, i); + return NULL; + } + } + + return names; +} +EXPORT_SYMBOL_GPL(kasprintf_strarray); + +/** * kfree_strarray - free a number of dynamically allocated strings contained * in an array and the array itself * @@ -697,6 +731,36 @@ void kfree_strarray(char **array, size_t n) } EXPORT_SYMBOL_GPL(kfree_strarray); +struct strarray { + char **array; + size_t n; +}; + +static void devm_kfree_strarray(struct device *dev, void *res) +{ + struct strarray *array = res; + + kfree_strarray(array->array, array->n); +} + +char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n) +{ + struct strarray *ptr; + + ptr = devres_alloc(devm_kfree_strarray, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + ptr->array = kasprintf_strarray(GFP_KERNEL, prefix, n); + if (!ptr->array) { + devres_free(ptr); + return ERR_PTR(-ENOMEM); + } + + return ptr->array; +} +EXPORT_SYMBOL_GPL(devm_kasprintf_strarray); + /** * strscpy_pad() - Copy a C-string into a sized buffer * @dest: Where to copy the string to diff --git a/lib/test_bpf.c b/lib/test_bpf.c index adae39567264..0c5cb2d6436a 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -14683,7 +14683,7 @@ static struct tail_call_test tail_call_tests[] = { BPF_EXIT_INSN(), }, .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, - .result = (MAX_TAIL_CALL_CNT + 1 + 1) * MAX_TESTRUNS, + .result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS, }, { "Tail call count preserved across function calls", @@ -14705,7 +14705,7 @@ static struct tail_call_test tail_call_tests[] = { }, .stack_depth = 8, .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, - .result = (MAX_TAIL_CALL_CNT + 1 + 1) * MAX_TESTRUNS, + .result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS, }, { "Tail call error path, NULL target", diff --git a/lib/test_ref_tracker.c b/lib/test_ref_tracker.c new file mode 100644 index 000000000000..19d7dec70cc6 --- /dev/null +++ b/lib/test_ref_tracker.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Referrence tracker self test. + * + * Copyright (c) 2021 Eric Dumazet <edumazet@google.com> + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/ref_tracker.h> +#include <linux/slab.h> +#include <linux/timer.h> + +static struct ref_tracker_dir ref_dir; +static struct ref_tracker *tracker[20]; + +#define TRT_ALLOC(X) static noinline void \ + alloctest_ref_tracker_alloc##X(struct ref_tracker_dir *dir, \ + struct ref_tracker **trackerp) \ + { \ + ref_tracker_alloc(dir, trackerp, GFP_KERNEL); \ + } + +TRT_ALLOC(1) +TRT_ALLOC(2) +TRT_ALLOC(3) +TRT_ALLOC(4) +TRT_ALLOC(5) +TRT_ALLOC(6) +TRT_ALLOC(7) +TRT_ALLOC(8) +TRT_ALLOC(9) +TRT_ALLOC(10) +TRT_ALLOC(11) +TRT_ALLOC(12) +TRT_ALLOC(13) +TRT_ALLOC(14) +TRT_ALLOC(15) +TRT_ALLOC(16) +TRT_ALLOC(17) +TRT_ALLOC(18) +TRT_ALLOC(19) + +#undef TRT_ALLOC + +static noinline void +alloctest_ref_tracker_free(struct ref_tracker_dir *dir, + struct ref_tracker **trackerp) +{ + ref_tracker_free(dir, trackerp); +} + + +static struct timer_list test_ref_tracker_timer; +static atomic_t test_ref_timer_done = ATOMIC_INIT(0); + +static void test_ref_tracker_timer_func(struct timer_list *t) +{ + ref_tracker_alloc(&ref_dir, &tracker[0], GFP_ATOMIC); + atomic_set(&test_ref_timer_done, 1); +} + +static int __init test_ref_tracker_init(void) +{ + int i; + + ref_tracker_dir_init(&ref_dir, 100); + + timer_setup(&test_ref_tracker_timer, test_ref_tracker_timer_func, 0); + mod_timer(&test_ref_tracker_timer, jiffies + 1); + + alloctest_ref_tracker_alloc1(&ref_dir, &tracker[1]); + alloctest_ref_tracker_alloc2(&ref_dir, &tracker[2]); + alloctest_ref_tracker_alloc3(&ref_dir, &tracker[3]); + alloctest_ref_tracker_alloc4(&ref_dir, &tracker[4]); + alloctest_ref_tracker_alloc5(&ref_dir, &tracker[5]); + alloctest_ref_tracker_alloc6(&ref_dir, &tracker[6]); + alloctest_ref_tracker_alloc7(&ref_dir, &tracker[7]); + alloctest_ref_tracker_alloc8(&ref_dir, &tracker[8]); + alloctest_ref_tracker_alloc9(&ref_dir, &tracker[9]); + alloctest_ref_tracker_alloc10(&ref_dir, &tracker[10]); + alloctest_ref_tracker_alloc11(&ref_dir, &tracker[11]); + alloctest_ref_tracker_alloc12(&ref_dir, &tracker[12]); + alloctest_ref_tracker_alloc13(&ref_dir, &tracker[13]); + alloctest_ref_tracker_alloc14(&ref_dir, &tracker[14]); + alloctest_ref_tracker_alloc15(&ref_dir, &tracker[15]); + alloctest_ref_tracker_alloc16(&ref_dir, &tracker[16]); + alloctest_ref_tracker_alloc17(&ref_dir, &tracker[17]); + alloctest_ref_tracker_alloc18(&ref_dir, &tracker[18]); + alloctest_ref_tracker_alloc19(&ref_dir, &tracker[19]); + + /* free all trackers but first 0 and 1. */ + for (i = 2; i < ARRAY_SIZE(tracker); i++) + alloctest_ref_tracker_free(&ref_dir, &tracker[i]); + + /* Attempt to free an already freed tracker. */ + alloctest_ref_tracker_free(&ref_dir, &tracker[2]); + + while (!atomic_read(&test_ref_timer_done)) + msleep(1); + + /* This should warn about tracker[0] & tracker[1] being not freed. */ + ref_tracker_dir_exit(&ref_dir); + + return 0; +} + +static void __exit test_ref_tracker_exit(void) +{ +} + +module_init(test_ref_tracker_init); +module_exit(test_ref_tracker_exit); + +MODULE_LICENSE("GPL v2"); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 58d5e567f836..53d6081f9e8b 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -3564,7 +3564,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args) ++fmt; for ( ; *fmt && *fmt != ']'; ++fmt, ++len) - set_bit((u8)*fmt, set); + __set_bit((u8)*fmt, set); /* no ']' or no character set found */ if (!*fmt || !len) @@ -3574,7 +3574,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args) if (negate) { bitmap_complement(set, set, 256); /* exclude null '\0' byte */ - clear_bit(0, set); + __clear_bit(0, set); } /* match must be non-empty */ diff --git a/lib/xarray.c b/lib/xarray.c index f5d8f54907b4..6f47f6375808 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -157,7 +157,7 @@ static void xas_move_index(struct xa_state *xas, unsigned long offset) xas->xa_index += offset << shift; } -static void xas_advance(struct xa_state *xas) +static void xas_next_offset(struct xa_state *xas) { xas->xa_offset++; xas_move_index(xas, xas->xa_offset); @@ -1250,7 +1250,7 @@ void *xas_find(struct xa_state *xas, unsigned long max) xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1; } - xas_advance(xas); + xas_next_offset(xas); while (xas->xa_node && (xas->xa_index <= max)) { if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { @@ -1268,7 +1268,7 @@ void *xas_find(struct xa_state *xas, unsigned long max) if (entry && !xa_is_sibling(entry)) return entry; - xas_advance(xas); + xas_next_offset(xas); } if (!xas->xa_node) |