summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig20
-rw-r--r--lib/Kconfig.debug82
-rw-r--r--lib/Makefile4
-rw-r--r--lib/bitmap.c68
-rw-r--r--lib/bust_spinlocks.c3
-rw-r--r--lib/cpumask.c40
-rw-r--r--lib/cpumask_kunit.c19
-rw-r--r--lib/crypto/Kconfig7
-rw-r--r--lib/crypto/Makefile3
-rw-r--r--lib/crypto/memneq.c (renamed from lib/memneq.c)7
-rw-r--r--lib/crypto/utils.c88
-rw-r--r--lib/devres.c15
-rw-r--r--lib/dynamic_debug.c450
-rw-r--r--lib/find_bit.c233
-rw-r--r--lib/find_bit_benchmark.c18
-rw-r--r--lib/flex_proportions.c2
-rw-r--r--lib/fortify_kunit.c76
-rw-r--r--lib/is_signed_type_kunit.c53
-rw-r--r--lib/kunit/Kconfig11
-rw-r--r--lib/kunit/executor.c4
-rw-r--r--lib/kunit/test.c24
-rw-r--r--lib/memcpy_kunit.c59
-rw-r--r--lib/nlattr.c31
-rw-r--r--lib/once.c30
-rw-r--r--lib/overflow_kunit.c179
-rw-r--r--lib/sbitmap.c109
-rw-r--r--lib/sg_pool.c16
-rw-r--r--lib/stackinit_kunit.c2
-rw-r--r--lib/string_helpers.c44
-rw-r--r--lib/test_bitmap.c291
-rw-r--r--lib/test_dynamic_debug.c165
-rw-r--r--lib/vsprintf.c64
-rw-r--r--lib/zstd/Makefile16
-rw-r--r--lib/zstd/common/entropy_common.c5
-rw-r--r--lib/zstd/common/zstd_common.c10
35 files changed, 1852 insertions, 396 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index dc1ab2ed1dc6..9bbf8a4b2108 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -127,9 +127,6 @@ config TRACE_MMIO_ACCESS
source "lib/crypto/Kconfig"
-config LIB_MEMNEQ
- bool
-
config CRC_CCITT
tristate "CRC-CCITT functions"
help
@@ -343,12 +340,16 @@ config LZ4HC_COMPRESS
config LZ4_DECOMPRESS
tristate
-config ZSTD_COMPRESS
+config ZSTD_COMMON
select XXHASH
tristate
+config ZSTD_COMPRESS
+ select ZSTD_COMMON
+ tristate
+
config ZSTD_DECOMPRESS
- select XXHASH
+ select ZSTD_COMMON
tristate
source "lib/xz/Kconfig"
@@ -527,6 +528,15 @@ config CPUMASK_OFFSTACK
them on the stack. This is a bit more expensive, but avoids
stack overflow.
+config FORCE_NR_CPUS
+ bool "NR_CPUS is set to an actual number of CPUs"
+ depends on SMP
+ help
+ Say Yes if you have NR_CPUS set to an actual number of possible
+ CPUs in your system, not to a default value. This forces the core
+ code to rely on compile-time value and optimize kernel routines
+ better.
+
config CPU_RMAP
bool
depends on SMP
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index bcbe60d6c80c..3761118d1879 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -264,8 +264,10 @@ config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
config DEBUG_INFO_DWARF4
bool "Generate DWARF Version 4 debuginfo"
select DEBUG_INFO
+ depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
help
- Generate DWARF v4 debug info. This requires gcc 4.5+ and gdb 7.0+.
+ Generate DWARF v4 debug info. This requires gcc 4.5+, binutils 2.35.2
+ if using clang without clang's integrated assembler, and gdb 7.0+.
If you have consumers of DWARF debug info that are not ready for
newer revisions of DWARF, you may wish to choose this or have your
@@ -803,6 +805,9 @@ config ARCH_HAS_DEBUG_VM_PGTABLE
An architecture should select this when it can successfully
build and run DEBUG_VM_PGTABLE.
+config DEBUG_VM_IRQSOFF
+ def_bool DEBUG_VM && !PREEMPT_RT
+
config DEBUG_VM
bool "Debug VM"
depends on DEBUG_KERNEL
@@ -2509,6 +2514,18 @@ config MEMCPY_KUNIT_TEST
If unsure, say N.
+config IS_SIGNED_TYPE_KUNIT_TEST
+ tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Builds unit tests for the is_signed_type() macro.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config OVERFLOW_KUNIT_TEST
tristate "Test check_*_overflow() functions at runtime" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2533,6 +2550,25 @@ config STACKINIT_KUNIT_TEST
CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF,
or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL.
+config FORTIFY_KUNIT_TEST
+ tristate "Test fortified str*() and mem*() function internals at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT && FORTIFY_SOURCE
+ default KUNIT_ALL_TESTS
+ help
+ Builds unit tests for checking internals of FORTIFY_SOURCE as used
+ by the str*() and mem*() family of functions. For testing runtime
+ traps of FORTIFY_SOURCE, see LKDTM's "FORTIFY_*" tests.
+
+config HW_BREAKPOINT_KUNIT_TEST
+ bool "Test hw_breakpoint constraints accounting" if !KUNIT_ALL_TESTS
+ depends on HAVE_HW_BREAKPOINT
+ depends on KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ Tests for hw_breakpoint constraints accounting.
+
+ If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
help
@@ -2549,6 +2585,16 @@ config TEST_STATIC_KEYS
If unsure, say N.
+config TEST_DYNAMIC_DEBUG
+ tristate "Test DYNAMIC_DEBUG"
+ depends on DYNAMIC_DEBUG
+ help
+ This module registers a tracer callback to count enabled
+ pr_debugs in a 'do_debugging' function, then alters their
+ enablements, calls the function, and compares counts.
+
+ If unsure, say N.
+
config TEST_KMOD
tristate "kmod stress tester"
depends on m
@@ -2708,6 +2754,40 @@ config HYPERV_TESTING
endmenu # "Kernel Testing and Coverage"
+menu "Rust hacking"
+
+config RUST_DEBUG_ASSERTIONS
+ bool "Debug assertions"
+ depends on RUST
+ help
+ Enables rustc's `-Cdebug-assertions` codegen option.
+
+ This flag lets you turn `cfg(debug_assertions)` conditional
+ compilation on or off. This can be used to enable extra debugging
+ code in development but not in production. For example, it controls
+ the behavior of the standard library's `debug_assert!` macro.
+
+ Note that this will apply to all Rust code, including `core`.
+
+ If unsure, say N.
+
+config RUST_OVERFLOW_CHECKS
+ bool "Overflow checks"
+ default y
+ depends on RUST
+ help
+ Enables rustc's `-Coverflow-checks` codegen option.
+
+ This flag allows you to control the behavior of runtime integer
+ overflow. When overflow-checks are enabled, a Rust panic will occur
+ on overflow.
+
+ Note that this will apply to all Rust code, including `core`.
+
+ If unsure, say Y.
+
+endmenu # "Rust"
+
source "Documentation/Kconfig"
endmenu # Kernel hacking
diff --git a/lib/Makefile b/lib/Makefile
index ffabc30a27d4..ad570b7699ba 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -83,6 +83,7 @@ obj-$(CONFIG_TEST_SORT) += test_sort.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
+obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_SCANF) += test_scanf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
@@ -254,7 +255,6 @@ obj-$(CONFIG_DIMLIB) += dim/
obj-$(CONFIG_SIGNATURE) += digsig.o
lib-$(CONFIG_CLZ_TAB) += clz_tab.o
-lib-$(CONFIG_LIB_MEMNEQ) += memneq.o
obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
@@ -377,9 +377,11 @@ obj-$(CONFIG_BITS_TEST) += test_bits.o
obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
+obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
+obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 488e6c3e5acc..1c81413c51f8 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -333,20 +333,32 @@ bool __bitmap_subset(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_subset);
+#define BITMAP_WEIGHT(FETCH, bits) \
+({ \
+ unsigned int __bits = (bits), idx, w = 0; \
+ \
+ for (idx = 0; idx < __bits / BITS_PER_LONG; idx++) \
+ w += hweight_long(FETCH); \
+ \
+ if (__bits % BITS_PER_LONG) \
+ w += hweight_long((FETCH) & BITMAP_LAST_WORD_MASK(__bits)); \
+ \
+ w; \
+})
+
unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
{
- unsigned int k, lim = bits/BITS_PER_LONG, w = 0;
-
- for (k = 0; k < lim; k++)
- w += hweight_long(bitmap[k]);
-
- if (bits % BITS_PER_LONG)
- w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
-
- return w;
+ return BITMAP_WEIGHT(bitmap[idx], bits);
}
EXPORT_SYMBOL(__bitmap_weight);
+unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ return BITMAP_WEIGHT(bitmap1[idx] & bitmap2[idx], bits);
+}
+EXPORT_SYMBOL(__bitmap_weight_and);
+
void __bitmap_set(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
@@ -953,37 +965,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigne
if (pos >= nbits || !test_bit(pos, buf))
return -1;
- return __bitmap_weight(buf, pos);
-}
-
-/**
- * bitmap_ord_to_pos - find position of n-th set bit in bitmap
- * @buf: pointer to bitmap
- * @ord: ordinal bit position (n-th set bit, n >= 0)
- * @nbits: number of valid bit positions in @buf
- *
- * Map the ordinal offset of bit @ord in @buf to its position in @buf.
- * Value of @ord should be in range 0 <= @ord < weight(buf). If @ord
- * >= weight(buf), returns @nbits.
- *
- * If for example, just bits 4 through 7 are set in @buf, then @ord
- * values 0 through 3 will get mapped to 4 through 7, respectively,
- * and all other @ord values returns @nbits. When @ord value 3
- * gets mapped to (returns) @pos value 7 in this example, that means
- * that the 3rd set bit (starting with 0th) is at position 7 in @buf.
- *
- * The bit positions 0 through @nbits-1 are valid positions in @buf.
- */
-unsigned int bitmap_ord_to_pos(const unsigned long *buf, unsigned int ord, unsigned int nbits)
-{
- unsigned int pos;
-
- for (pos = find_first_bit(buf, nbits);
- pos < nbits && ord;
- pos = find_next_bit(buf, nbits, pos + 1))
- ord--;
-
- return pos;
+ return bitmap_weight(buf, pos);
}
/**
@@ -1035,7 +1017,7 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src,
if (n < 0 || w == 0)
set_bit(oldbit, dst); /* identity map */
else
- set_bit(bitmap_ord_to_pos(new, n % w, nbits), dst);
+ set_bit(find_nth_bit(new, nbits, n % w), dst);
}
}
EXPORT_SYMBOL(bitmap_remap);
@@ -1074,7 +1056,7 @@ int bitmap_bitremap(int oldbit, const unsigned long *old,
if (n < 0 || w == 0)
return oldbit;
else
- return bitmap_ord_to_pos(new, n % w, bits);
+ return find_nth_bit(new, bits, n % w);
}
EXPORT_SYMBOL(bitmap_bitremap);
@@ -1198,7 +1180,7 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig,
* The following code is a more efficient, but less
* obvious, equivalent to the loop:
* for (m = 0; m < bitmap_weight(relmap, bits); m++) {
- * n = bitmap_ord_to_pos(orig, m, bits);
+ * n = find_nth_bit(orig, bits, m);
* if (test_bit(m, orig))
* set_bit(n, dst);
* }
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index 8be59f84eaea..bfd53972a4d8 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -22,9 +22,6 @@ void bust_spinlocks(int yes)
if (yes) {
++oops_in_progress;
} else {
-#ifdef CONFIG_VT
- unblank_screen();
-#endif
console_unblank();
if (--oops_in_progress == 0)
wake_up_klogd();
diff --git a/lib/cpumask.c b/lib/cpumask.c
index f0ae119be8c4..c7c392514fd3 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -128,23 +128,21 @@ unsigned int cpumask_local_spread(unsigned int i, int node)
i %= num_online_cpus();
if (node == NUMA_NO_NODE) {
- for_each_cpu(cpu, cpu_online_mask)
- if (i-- == 0)
- return cpu;
+ cpu = cpumask_nth(i, cpu_online_mask);
+ if (cpu < nr_cpu_ids)
+ return cpu;
} else {
/* NUMA first. */
- for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
- if (i-- == 0)
- return cpu;
-
- for_each_cpu(cpu, cpu_online_mask) {
- /* Skip NUMA nodes, done above. */
- if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
- continue;
-
- if (i-- == 0)
- return cpu;
- }
+ cpu = cpumask_nth_and(i, cpu_online_mask, cpumask_of_node(node));
+ if (cpu < nr_cpu_ids)
+ return cpu;
+
+ i -= cpumask_weight_and(cpu_online_mask, cpumask_of_node(node));
+
+ /* Skip NUMA nodes, done above. */
+ cpu = cpumask_nth_andnot(i, cpu_online_mask, cpumask_of_node(node));
+ if (cpu < nr_cpu_ids)
+ return cpu;
}
BUG();
}
@@ -168,10 +166,8 @@ unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
/* NOTE: our first selection will skip 0. */
prev = __this_cpu_read(distribute_cpu_mask_prev);
- next = cpumask_next_and(prev, src1p, src2p);
- if (next >= nr_cpu_ids)
- next = cpumask_first_and(src1p, src2p);
-
+ next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
+ nr_cpumask_bits, prev + 1);
if (next < nr_cpu_ids)
__this_cpu_write(distribute_cpu_mask_prev, next);
@@ -185,11 +181,7 @@ unsigned int cpumask_any_distribute(const struct cpumask *srcp)
/* NOTE: our first selection will skip 0. */
prev = __this_cpu_read(distribute_cpu_mask_prev);
-
- next = cpumask_next(prev, srcp);
- if (next >= nr_cpu_ids)
- next = cpumask_first(srcp);
-
+ next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1);
if (next < nr_cpu_ids)
__this_cpu_write(distribute_cpu_mask_prev, next);
diff --git a/lib/cpumask_kunit.c b/lib/cpumask_kunit.c
index ecbeec72221e..d1fc6ece21f3 100644
--- a/lib/cpumask_kunit.c
+++ b/lib/cpumask_kunit.c
@@ -33,6 +33,19 @@
KUNIT_EXPECT_EQ_MSG((test), nr_cpu_ids - mask_weight, iter, MASK_MSG(mask)); \
} while (0)
+#define EXPECT_FOR_EACH_CPU_OP_EQ(test, op, mask1, mask2) \
+ do { \
+ const cpumask_t *m1 = (mask1); \
+ const cpumask_t *m2 = (mask2); \
+ int weight; \
+ int cpu, iter = 0; \
+ cpumask_##op(&mask_tmp, m1, m2); \
+ weight = cpumask_weight(&mask_tmp); \
+ for_each_cpu_##op(cpu, mask1, mask2) \
+ iter++; \
+ KUNIT_EXPECT_EQ((test), weight, iter); \
+ } while (0)
+
#define EXPECT_FOR_EACH_CPU_WRAP_EQ(test, mask) \
do { \
const cpumask_t *m = (mask); \
@@ -54,6 +67,7 @@
static cpumask_t mask_empty;
static cpumask_t mask_all;
+static cpumask_t mask_tmp;
static void test_cpumask_weight(struct kunit *test)
{
@@ -101,10 +115,15 @@ static void test_cpumask_iterators(struct kunit *test)
EXPECT_FOR_EACH_CPU_EQ(test, &mask_empty);
EXPECT_FOR_EACH_CPU_NOT_EQ(test, &mask_empty);
EXPECT_FOR_EACH_CPU_WRAP_EQ(test, &mask_empty);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, and, &mask_empty, &mask_empty);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, &mask_empty);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, &mask_empty, &mask_empty);
EXPECT_FOR_EACH_CPU_EQ(test, cpu_possible_mask);
EXPECT_FOR_EACH_CPU_NOT_EQ(test, cpu_possible_mask);
EXPECT_FOR_EACH_CPU_WRAP_EQ(test, cpu_possible_mask);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, cpu_possible_mask);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, cpu_possible_mask, &mask_empty);
}
static void test_cpumask_iterators_builtin(struct kunit *test)
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 47816af9a9d7..7e9683e9f5c6 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -2,6 +2,9 @@
menu "Crypto library routines"
+config CRYPTO_LIB_UTILS
+ tristate
+
config CRYPTO_LIB_AES
tristate
@@ -33,6 +36,7 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
config CRYPTO_LIB_CHACHA_GENERIC
tristate
+ select CRYPTO_LIB_UTILS
help
This symbol can be depended upon by arch implementations of the
ChaCha library interface that require the generic code as a
@@ -42,7 +46,6 @@ config CRYPTO_LIB_CHACHA_GENERIC
config CRYPTO_LIB_CHACHA
tristate "ChaCha library interface"
- depends on CRYPTO
depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
help
@@ -70,7 +73,7 @@ config CRYPTO_LIB_CURVE25519
tristate "Curve25519 scalar multiplication library"
depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
- select LIB_MEMNEQ
+ select CRYPTO_LIB_UTILS
help
Enable the Curve25519 library interface. This interface may be
fulfilled by either the generic implementation or an arch-specific
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 919cbb2c220d..c852f067ab06 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_LIB_UTILS) += libcryptoutils.o
+libcryptoutils-y := memneq.o utils.o
+
# chacha is used by the /dev/random driver which is always builtin
obj-y += chacha.o
obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o
diff --git a/lib/memneq.c b/lib/crypto/memneq.c
index fb11608b1ec1..243d8677cc51 100644
--- a/lib/memneq.c
+++ b/lib/crypto/memneq.c
@@ -59,10 +59,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <crypto/algapi.h>
#include <asm/unaligned.h>
-
-#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ
+#include <crypto/algapi.h>
+#include <linux/module.h>
/* Generic path for arbitrary size */
static inline unsigned long
@@ -172,5 +171,3 @@ noinline unsigned long __crypto_memneq(const void *a, const void *b,
}
}
EXPORT_SYMBOL(__crypto_memneq);
-
-#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */
diff --git a/lib/crypto/utils.c b/lib/crypto/utils.c
new file mode 100644
index 000000000000..53230ab1b195
--- /dev/null
+++ b/lib/crypto/utils.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto library utility functions
+ *
+ * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+
+/*
+ * XOR @len bytes from @src1 and @src2 together, writing the result to @dst
+ * (which may alias one of the sources). Don't call this directly; call
+ * crypto_xor() or crypto_xor_cpy() instead.
+ */
+void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
+{
+ int relalign = 0;
+
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ int size = sizeof(unsigned long);
+ int d = (((unsigned long)dst ^ (unsigned long)src1) |
+ ((unsigned long)dst ^ (unsigned long)src2)) &
+ (size - 1);
+
+ relalign = d ? 1 << __ffs(d) : size;
+
+ /*
+ * If we care about alignment, process as many bytes as
+ * needed to advance dst and src to values whose alignments
+ * equal their relative alignment. This will allow us to
+ * process the remainder of the input using optimal strides.
+ */
+ while (((unsigned long)dst & (relalign - 1)) && len > 0) {
+ *dst++ = *src1++ ^ *src2++;
+ len--;
+ }
+ }
+
+ while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u64 l = get_unaligned((u64 *)src1) ^
+ get_unaligned((u64 *)src2);
+ put_unaligned(l, (u64 *)dst);
+ } else {
+ *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
+ }
+ dst += 8;
+ src1 += 8;
+ src2 += 8;
+ len -= 8;
+ }
+
+ while (len >= 4 && !(relalign & 3)) {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u32 l = get_unaligned((u32 *)src1) ^
+ get_unaligned((u32 *)src2);
+ put_unaligned(l, (u32 *)dst);
+ } else {
+ *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
+ }
+ dst += 4;
+ src1 += 4;
+ src2 += 4;
+ len -= 4;
+ }
+
+ while (len >= 2 && !(relalign & 1)) {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u16 l = get_unaligned((u16 *)src1) ^
+ get_unaligned((u16 *)src2);
+ put_unaligned(l, (u16 *)dst);
+ } else {
+ *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
+ }
+ dst += 2;
+ src1 += 2;
+ src2 += 2;
+ len -= 2;
+ }
+
+ while (len--)
+ *dst++ = *src1++ ^ *src2++;
+}
+EXPORT_SYMBOL_GPL(__crypto_xor);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/devres.c b/lib/devres.c
index 55eb07e80cbb..6baf43902ead 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -104,21 +104,6 @@ void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
EXPORT_SYMBOL(devm_ioremap_wc);
/**
- * devm_ioremap_np - Managed ioremap_np()
- * @dev: Generic device to remap IO address for
- * @offset: Resource address to map
- * @size: Size of map
- *
- * Managed ioremap_np(). Map is automatically unmapped on driver detach.
- */
-void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset,
- resource_size_t size)
-{
- return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NP);
-}
-EXPORT_SYMBOL(devm_ioremap_np);
-
-/**
* devm_iounmap - Managed iounmap()
* @dev: Generic device to unmap for
* @addr: Address to unmap
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index dd7f56af9aed..009f2ead09c1 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -41,9 +41,11 @@
extern struct _ddebug __start___dyndbg[];
extern struct _ddebug __stop___dyndbg[];
+extern struct ddebug_class_map __start___dyndbg_classes[];
+extern struct ddebug_class_map __stop___dyndbg_classes[];
struct ddebug_table {
- struct list_head link;
+ struct list_head link, maps;
const char *mod_name;
unsigned int num_ddebugs;
struct _ddebug *ddebugs;
@@ -54,12 +56,13 @@ struct ddebug_query {
const char *module;
const char *function;
const char *format;
+ const char *class_string;
unsigned int first_lineno, last_lineno;
};
struct ddebug_iter {
struct ddebug_table *table;
- unsigned int idx;
+ int idx;
};
struct flag_settings {
@@ -134,15 +137,33 @@ static void vpr_info_dq(const struct ddebug_query *query, const char *msg)
fmtlen--;
}
- v3pr_info("%s: func=\"%s\" file=\"%s\" module=\"%s\" format=\"%.*s\" lineno=%u-%u\n",
- msg,
- query->function ?: "",
- query->filename ?: "",
- query->module ?: "",
- fmtlen, query->format ?: "",
- query->first_lineno, query->last_lineno);
+ v3pr_info("%s: func=\"%s\" file=\"%s\" module=\"%s\" format=\"%.*s\" lineno=%u-%u class=%s\n",
+ msg,
+ query->function ?: "",
+ query->filename ?: "",
+ query->module ?: "",
+ fmtlen, query->format ?: "",
+ query->first_lineno, query->last_lineno, query->class_string);
}
+static struct ddebug_class_map *ddebug_find_valid_class(struct ddebug_table const *dt,
+ const char *class_string, int *class_id)
+{
+ struct ddebug_class_map *map;
+ int idx;
+
+ list_for_each_entry(map, &dt->maps, link) {
+ idx = match_string(map->class_names, map->length, class_string);
+ if (idx >= 0) {
+ *class_id = idx + map->base;
+ return map;
+ }
+ }
+ *class_id = -ENOENT;
+ return NULL;
+}
+
+#define __outvar /* filled by callee */
/*
* Search the tables for _ddebug's which match the given `query' and
* apply the `flags' and `mask' to them. Returns number of matching
@@ -156,7 +177,9 @@ static int ddebug_change(const struct ddebug_query *query,
struct ddebug_table *dt;
unsigned int newflags;
unsigned int nfound = 0;
- struct flagsbuf fbuf;
+ struct flagsbuf fbuf, nbuf;
+ struct ddebug_class_map *map = NULL;
+ int __outvar valid_class;
/* search for matching ddebugs */
mutex_lock(&ddebug_lock);
@@ -167,9 +190,22 @@ static int ddebug_change(const struct ddebug_query *query,
!match_wildcard(query->module, dt->mod_name))
continue;
+ if (query->class_string) {
+ map = ddebug_find_valid_class(dt, query->class_string, &valid_class);
+ if (!map)
+ continue;
+ } else {
+ /* constrain query, do not touch class'd callsites */
+ valid_class = _DPRINTK_CLASS_DFLT;
+ }
+
for (i = 0; i < dt->num_ddebugs; i++) {
struct _ddebug *dp = &dt->ddebugs[i];
+ /* match site against query-class */
+ if (dp->class_id != valid_class)
+ continue;
+
/* match against the source filename */
if (query->filename &&
!match_wildcard(query->filename, dp->filename) &&
@@ -211,16 +247,18 @@ static int ddebug_change(const struct ddebug_query *query,
continue;
#ifdef CONFIG_JUMP_LABEL
if (dp->flags & _DPRINTK_FLAGS_PRINT) {
- if (!(modifiers->flags & _DPRINTK_FLAGS_PRINT))
+ if (!(newflags & _DPRINTK_FLAGS_PRINT))
static_branch_disable(&dp->key.dd_key_true);
- } else if (modifiers->flags & _DPRINTK_FLAGS_PRINT)
+ } else if (newflags & _DPRINTK_FLAGS_PRINT) {
static_branch_enable(&dp->key.dd_key_true);
+ }
#endif
+ v4pr_info("changed %s:%d [%s]%s %s => %s\n",
+ trim_prefix(dp->filename), dp->lineno,
+ dt->mod_name, dp->function,
+ ddebug_describe_flags(dp->flags, &fbuf),
+ ddebug_describe_flags(newflags, &nbuf));
dp->flags = newflags;
- v4pr_info("changed %s:%d [%s]%s =%s\n",
- trim_prefix(dp->filename), dp->lineno,
- dt->mod_name, dp->function,
- ddebug_describe_flags(dp->flags, &fbuf));
}
}
mutex_unlock(&ddebug_lock);
@@ -383,10 +421,6 @@ static int ddebug_parse_query(char *words[], int nwords,
return -EINVAL;
}
- if (modname)
- /* support $modname.dyndbg=<multiple queries> */
- query->module = modname;
-
for (i = 0; i < nwords; i += 2) {
char *keyword = words[i];
char *arg = words[i+1];
@@ -420,6 +454,8 @@ static int ddebug_parse_query(char *words[], int nwords,
} else if (!strcmp(keyword, "line")) {
if (parse_linerange(query, arg))
return -EINVAL;
+ } else if (!strcmp(keyword, "class")) {
+ rc = check_set(&query->class_string, arg, "class");
} else {
pr_err("unknown keyword \"%s\"\n", keyword);
return -EINVAL;
@@ -427,6 +463,13 @@ static int ddebug_parse_query(char *words[], int nwords,
if (rc)
return rc;
}
+ if (!query->module && modname)
+ /*
+ * support $modname.dyndbg=<multiple queries>, when
+ * not given in the query itself
+ */
+ query->module = modname;
+
vpr_info_dq(query, "parsed");
return 0;
}
@@ -553,34 +596,217 @@ static int ddebug_exec_queries(char *query, const char *modname)
return nfound;
}
+/* apply a new bitmap to the sys-knob's current bit-state */
+static int ddebug_apply_class_bitmap(const struct ddebug_class_param *dcp,
+ unsigned long *new_bits, unsigned long *old_bits)
+{
+#define QUERY_SIZE 128
+ char query[QUERY_SIZE];
+ const struct ddebug_class_map *map = dcp->map;
+ int matches = 0;
+ int bi, ct;
+
+ v2pr_info("apply: 0x%lx to: 0x%lx\n", *new_bits, *old_bits);
+
+ for (bi = 0; bi < map->length; bi++) {
+ if (test_bit(bi, new_bits) == test_bit(bi, old_bits))
+ continue;
+
+ snprintf(query, QUERY_SIZE, "class %s %c%s", map->class_names[bi],
+ test_bit(bi, new_bits) ? '+' : '-', dcp->flags);
+
+ ct = ddebug_exec_queries(query, NULL);
+ matches += ct;
+
+ v2pr_info("bit_%d: %d matches on class: %s -> 0x%lx\n", bi,
+ ct, map->class_names[bi], *new_bits);
+ }
+ return matches;
+}
+
+/* stub to later conditionally add "$module." prefix where not already done */
+#define KP_NAME(kp) kp->name
+
+#define CLASSMAP_BITMASK(width) ((1UL << (width)) - 1)
+
+/* accept comma-separated-list of [+-] classnames */
+static int param_set_dyndbg_classnames(const char *instr, const struct kernel_param *kp)
+{
+ const struct ddebug_class_param *dcp = kp->arg;
+ const struct ddebug_class_map *map = dcp->map;
+ unsigned long curr_bits, old_bits;
+ char *cl_str, *p, *tmp;
+ int cls_id, totct = 0;
+ bool wanted;
+
+ cl_str = tmp = kstrdup(instr, GFP_KERNEL);
+ p = strchr(cl_str, '\n');
+ if (p)
+ *p = '\0';
+
+ /* start with previously set state-bits, then modify */
+ curr_bits = old_bits = *dcp->bits;
+ vpr_info("\"%s\" > %s:0x%lx\n", cl_str, KP_NAME(kp), curr_bits);
+
+ for (; cl_str; cl_str = p) {
+ p = strchr(cl_str, ',');
+ if (p)
+ *p++ = '\0';
+
+ if (*cl_str == '-') {
+ wanted = false;
+ cl_str++;
+ } else {
+ wanted = true;
+ if (*cl_str == '+')
+ cl_str++;
+ }
+ cls_id = match_string(map->class_names, map->length, cl_str);
+ if (cls_id < 0) {
+ pr_err("%s unknown to %s\n", cl_str, KP_NAME(kp));
+ continue;
+ }
+
+ /* have one or more valid class_ids of one *_NAMES type */
+ switch (map->map_type) {
+ case DD_CLASS_TYPE_DISJOINT_NAMES:
+ /* the +/- pertains to a single bit */
+ if (test_bit(cls_id, &curr_bits) == wanted) {
+ v3pr_info("no change on %s\n", cl_str);
+ continue;
+ }
+ curr_bits ^= BIT(cls_id);
+ totct += ddebug_apply_class_bitmap(dcp, &curr_bits, dcp->bits);
+ *dcp->bits = curr_bits;
+ v2pr_info("%s: changed bit %d:%s\n", KP_NAME(kp), cls_id,
+ map->class_names[cls_id]);
+ break;
+ case DD_CLASS_TYPE_LEVEL_NAMES:
+ /* cls_id = N in 0..max. wanted +/- determines N or N-1 */
+ old_bits = CLASSMAP_BITMASK(*dcp->lvl);
+ curr_bits = CLASSMAP_BITMASK(cls_id + (wanted ? 1 : 0 ));
+
+ totct += ddebug_apply_class_bitmap(dcp, &curr_bits, &old_bits);
+ *dcp->lvl = (cls_id + (wanted ? 1 : 0));
+ v2pr_info("%s: changed bit-%d: \"%s\" %lx->%lx\n", KP_NAME(kp), cls_id,
+ map->class_names[cls_id], old_bits, curr_bits);
+ break;
+ default:
+ pr_err("illegal map-type value %d\n", map->map_type);
+ }
+ }
+ kfree(tmp);
+ vpr_info("total matches: %d\n", totct);
+ return 0;
+}
+
/**
- * dynamic_debug_exec_queries - select and change dynamic-debug prints
- * @query: query-string described in admin-guide/dynamic-debug-howto
- * @modname: string containing module name, usually &module.mod_name
+ * param_set_dyndbg_classes - class FOO >control
+ * @instr: string echo>d to sysfs, input depends on map_type
+ * @kp: kp->arg has state: bits/lvl, map, map_type
+ *
+ * Enable/disable prdbgs by their class, as given in the arguments to
+ * DECLARE_DYNDBG_CLASSMAP. For LEVEL map-types, enforce relative
+ * levels by bitpos.
*
- * This uses the >/proc/dynamic_debug/control reader, allowing module
- * authors to modify their dynamic-debug callsites. The modname is
- * canonically struct module.mod_name, but can also be null or a
- * module-wildcard, for example: "drm*".
+ * Returns: 0 or <0 if error.
*/
-int dynamic_debug_exec_queries(const char *query, const char *modname)
+int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp)
{
- int rc;
- char *qry; /* writable copy of query */
-
- if (!query) {
- pr_err("non-null query/command string expected\n");
+ const struct ddebug_class_param *dcp = kp->arg;
+ const struct ddebug_class_map *map = dcp->map;
+ unsigned long inrep, new_bits, old_bits;
+ int rc, totct = 0;
+
+ switch (map->map_type) {
+
+ case DD_CLASS_TYPE_DISJOINT_NAMES:
+ case DD_CLASS_TYPE_LEVEL_NAMES:
+ /* handle [+-]classnames list separately, we are done here */
+ return param_set_dyndbg_classnames(instr, kp);
+
+ case DD_CLASS_TYPE_DISJOINT_BITS:
+ case DD_CLASS_TYPE_LEVEL_NUM:
+ /* numeric input, accept and fall-thru */
+ rc = kstrtoul(instr, 0, &inrep);
+ if (rc) {
+ pr_err("expecting numeric input: %s > %s\n", instr, KP_NAME(kp));
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("%s: bad map type: %d\n", KP_NAME(kp), map->map_type);
return -EINVAL;
}
- qry = kstrndup(query, PAGE_SIZE, GFP_KERNEL);
- if (!qry)
- return -ENOMEM;
- rc = ddebug_exec_queries(qry, modname);
- kfree(qry);
- return rc;
+ /* only _BITS,_NUM (numeric) map-types get here */
+ switch (map->map_type) {
+ case DD_CLASS_TYPE_DISJOINT_BITS:
+ /* expect bits. mask and warn if too many */
+ if (inrep & ~CLASSMAP_BITMASK(map->length)) {
+ pr_warn("%s: input: 0x%lx exceeds mask: 0x%lx, masking\n",
+ KP_NAME(kp), inrep, CLASSMAP_BITMASK(map->length));
+ inrep &= CLASSMAP_BITMASK(map->length);
+ }
+ v2pr_info("bits:%lx > %s\n", inrep, KP_NAME(kp));
+ totct += ddebug_apply_class_bitmap(dcp, &inrep, dcp->bits);
+ *dcp->bits = inrep;
+ break;
+ case DD_CLASS_TYPE_LEVEL_NUM:
+ /* input is bitpos, of highest verbosity to be enabled */
+ if (inrep > map->length) {
+ pr_warn("%s: level:%ld exceeds max:%d, clamping\n",
+ KP_NAME(kp), inrep, map->length);
+ inrep = map->length;
+ }
+ old_bits = CLASSMAP_BITMASK(*dcp->lvl);
+ new_bits = CLASSMAP_BITMASK(inrep);
+ v2pr_info("lvl:%ld bits:0x%lx > %s\n", inrep, new_bits, KP_NAME(kp));
+ totct += ddebug_apply_class_bitmap(dcp, &new_bits, &old_bits);
+ *dcp->lvl = inrep;
+ break;
+ default:
+ pr_warn("%s: bad map type: %d\n", KP_NAME(kp), map->map_type);
+ }
+ vpr_info("%s: total matches: %d\n", KP_NAME(kp), totct);
+ return 0;
+}
+EXPORT_SYMBOL(param_set_dyndbg_classes);
+
+/**
+ * param_get_dyndbg_classes - classes reader
+ * @buffer: string description of controlled bits -> classes
+ * @kp: kp->arg has state: bits, map
+ *
+ * Reads last written state, underlying prdbg state may have been
+ * altered by direct >control. Displays 0x for DISJOINT, 0-N for
+ * LEVEL Returns: #chars written or <0 on error
+ */
+int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp)
+{
+ const struct ddebug_class_param *dcp = kp->arg;
+ const struct ddebug_class_map *map = dcp->map;
+
+ switch (map->map_type) {
+
+ case DD_CLASS_TYPE_DISJOINT_NAMES:
+ case DD_CLASS_TYPE_DISJOINT_BITS:
+ return scnprintf(buffer, PAGE_SIZE, "0x%lx\n", *dcp->bits);
+
+ case DD_CLASS_TYPE_LEVEL_NAMES:
+ case DD_CLASS_TYPE_LEVEL_NUM:
+ return scnprintf(buffer, PAGE_SIZE, "%d\n", *dcp->lvl);
+ default:
+ return -1;
+ }
}
-EXPORT_SYMBOL_GPL(dynamic_debug_exec_queries);
+EXPORT_SYMBOL(param_get_dyndbg_classes);
+
+const struct kernel_param_ops param_ops_dyndbg_classes = {
+ .set = param_set_dyndbg_classes,
+ .get = param_get_dyndbg_classes,
+};
+EXPORT_SYMBOL(param_ops_dyndbg_classes);
#define PREFIX_SIZE 64
@@ -803,13 +1029,12 @@ static struct _ddebug *ddebug_iter_first(struct ddebug_iter *iter)
{
if (list_empty(&ddebug_tables)) {
iter->table = NULL;
- iter->idx = 0;
return NULL;
}
iter->table = list_entry(ddebug_tables.next,
struct ddebug_table, link);
- iter->idx = 0;
- return &iter->table->ddebugs[iter->idx];
+ iter->idx = iter->table->num_ddebugs;
+ return &iter->table->ddebugs[--iter->idx];
}
/*
@@ -822,15 +1047,16 @@ static struct _ddebug *ddebug_iter_next(struct ddebug_iter *iter)
{
if (iter->table == NULL)
return NULL;
- if (++iter->idx == iter->table->num_ddebugs) {
+ if (--iter->idx < 0) {
/* iterate to next table */
- iter->idx = 0;
if (list_is_last(&iter->table->link, &ddebug_tables)) {
iter->table = NULL;
return NULL;
}
iter->table = list_entry(iter->table->link.next,
struct ddebug_table, link);
+ iter->idx = iter->table->num_ddebugs;
+ --iter->idx;
}
return &iter->table->ddebugs[iter->idx];
}
@@ -876,6 +1102,20 @@ static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos)
return dp;
}
+#define class_in_range(class_id, map) \
+ (class_id >= map->base && class_id < map->base + map->length)
+
+static const char *ddebug_class_name(struct ddebug_iter *iter, struct _ddebug *dp)
+{
+ struct ddebug_class_map *map;
+
+ list_for_each_entry(map, &iter->table->maps, link)
+ if (class_in_range(dp->class_id, map))
+ return map->class_names[dp->class_id - map->base];
+
+ return NULL;
+}
+
/*
* Seq_ops show method. Called several times within a read()
* call from userspace, with ddebug_lock held. Formats the
@@ -887,6 +1127,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
struct ddebug_iter *iter = m->private;
struct _ddebug *dp = p;
struct flagsbuf flags;
+ char const *class;
if (p == SEQ_START_TOKEN) {
seq_puts(m,
@@ -898,8 +1139,17 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
trim_prefix(dp->filename), dp->lineno,
iter->table->mod_name, dp->function,
ddebug_describe_flags(dp->flags, &flags));
- seq_escape(m, dp->format, "\t\r\n\"");
- seq_puts(m, "\"\n");
+ seq_escape_str(m, dp->format, ESCAPE_SPACE, "\t\r\n\"");
+ seq_puts(m, "\"");
+
+ if (dp->class_id != _DPRINTK_CLASS_DFLT) {
+ class = ddebug_class_name(iter, dp);
+ if (class)
+ seq_printf(m, " class:%s", class);
+ else
+ seq_printf(m, " class unknown, _id:%d", dp->class_id);
+ }
+ seq_puts(m, "\n");
return 0;
}
@@ -943,18 +1193,50 @@ static const struct proc_ops proc_fops = {
.proc_write = ddebug_proc_write
};
+static void ddebug_attach_module_classes(struct ddebug_table *dt,
+ struct ddebug_class_map *classes,
+ int num_classes)
+{
+ struct ddebug_class_map *cm;
+ int i, j, ct = 0;
+
+ for (cm = classes, i = 0; i < num_classes; i++, cm++) {
+
+ if (!strcmp(cm->mod_name, dt->mod_name)) {
+
+ v2pr_info("class[%d]: module:%s base:%d len:%d ty:%d\n", i,
+ cm->mod_name, cm->base, cm->length, cm->map_type);
+
+ for (j = 0; j < cm->length; j++)
+ v3pr_info(" %d: %d %s\n", j + cm->base, j,
+ cm->class_names[j]);
+
+ list_add(&cm->link, &dt->maps);
+ ct++;
+ }
+ }
+ if (ct)
+ vpr_info("module:%s attached %d classes\n", dt->mod_name, ct);
+}
+
/*
* Allocate a new ddebug_table for the given module
* and add it to the global list.
*/
-int ddebug_add_module(struct _ddebug *tab, unsigned int n,
- const char *name)
+static int __ddebug_add_module(struct _ddebug_info *di, unsigned int base,
+ const char *modname)
{
struct ddebug_table *dt;
+ v3pr_info("add-module: %s.%d sites\n", modname, di->num_descs);
+ if (!di->num_descs) {
+ v3pr_info(" skip %s\n", modname);
+ return 0;
+ }
+
dt = kzalloc(sizeof(*dt), GFP_KERNEL);
if (dt == NULL) {
- pr_err("error adding module: %s\n", name);
+ pr_err("error adding module: %s\n", modname);
return -ENOMEM;
}
/*
@@ -963,18 +1245,29 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
* member of struct module, which lives at least as long as
* this struct ddebug_table.
*/
- dt->mod_name = name;
- dt->num_ddebugs = n;
- dt->ddebugs = tab;
+ dt->mod_name = modname;
+ dt->ddebugs = di->descs;
+ dt->num_ddebugs = di->num_descs;
+
+ INIT_LIST_HEAD(&dt->link);
+ INIT_LIST_HEAD(&dt->maps);
+
+ if (di->classes && di->num_classes)
+ ddebug_attach_module_classes(dt, di->classes, di->num_classes);
mutex_lock(&ddebug_lock);
- list_add(&dt->link, &ddebug_tables);
+ list_add_tail(&dt->link, &ddebug_tables);
mutex_unlock(&ddebug_lock);
- vpr_info("%3u debug prints in module %s\n", n, dt->mod_name);
+ vpr_info("%3u debug prints in module %s\n", di->num_descs, modname);
return 0;
}
+int ddebug_add_module(struct _ddebug_info *di, const char *modname)
+{
+ return __ddebug_add_module(di, 0, modname);
+}
+
/* helper for ddebug_dyndbg_(boot|module)_param_cb */
static int ddebug_dyndbg_param_cb(char *param, char *val,
const char *modname, int on_err)
@@ -1083,11 +1376,17 @@ static int __init dynamic_debug_init_control(void)
static int __init dynamic_debug_init(void)
{
- struct _ddebug *iter, *iter_start;
- const char *modname = NULL;
+ struct _ddebug *iter, *iter_mod_start;
+ int ret, i, mod_sites, mod_ct;
+ const char *modname;
char *cmdline;
- int ret = 0;
- int n = 0, entries = 0, modct = 0;
+
+ struct _ddebug_info di = {
+ .descs = __start___dyndbg,
+ .classes = __start___dyndbg_classes,
+ .num_descs = __stop___dyndbg - __start___dyndbg,
+ .num_classes = __stop___dyndbg_classes - __start___dyndbg_classes,
+ };
if (&__start___dyndbg == &__stop___dyndbg) {
if (IS_ENABLED(CONFIG_DYNAMIC_DEBUG)) {
@@ -1098,30 +1397,39 @@ static int __init dynamic_debug_init(void)
ddebug_init_success = 1;
return 0;
}
- iter = __start___dyndbg;
+
+ iter = iter_mod_start = __start___dyndbg;
modname = iter->modname;
- iter_start = iter;
- for (; iter < __stop___dyndbg; iter++) {
- entries++;
+ i = mod_sites = mod_ct = 0;
+
+ for (; iter < __stop___dyndbg; iter++, i++, mod_sites++) {
+
if (strcmp(modname, iter->modname)) {
- modct++;
- ret = ddebug_add_module(iter_start, n, modname);
+ mod_ct++;
+ di.num_descs = mod_sites;
+ di.descs = iter_mod_start;
+ ret = __ddebug_add_module(&di, i - mod_sites, modname);
if (ret)
goto out_err;
- n = 0;
+
+ mod_sites = 0;
modname = iter->modname;
- iter_start = iter;
+ iter_mod_start = iter;
}
- n++;
}
- ret = ddebug_add_module(iter_start, n, modname);
+ di.num_descs = mod_sites;
+ di.descs = iter_mod_start;
+ ret = __ddebug_add_module(&di, i - mod_sites, modname);
if (ret)
goto out_err;
ddebug_init_success = 1;
vpr_info("%d prdebugs in %d modules, %d KiB in ddebug tables, %d kiB in __dyndbg section\n",
- entries, modct, (int)((modct * sizeof(struct ddebug_table)) >> 10),
- (int)((entries * sizeof(struct _ddebug)) >> 10));
+ i, mod_ct, (int)((mod_ct * sizeof(struct ddebug_table)) >> 10),
+ (int)((i * sizeof(struct _ddebug)) >> 10));
+
+ if (di.num_classes)
+ v2pr_info(" %d builtin ddebug class-maps\n", di.num_classes);
/* now that ddebug tables are loaded, process all boot args
* again to find and activate queries given in dyndbg params.
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 1b8e4b2a9cba..18bc0a7ac8ee 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -19,57 +19,78 @@
#include <linux/minmax.h>
#include <linux/swab.h>
-#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
- !defined(find_next_bit_le) || !defined(find_next_zero_bit_le) || \
- !defined(find_next_and_bit)
/*
- * This is a common helper function for find_next_bit, find_next_zero_bit, and
- * find_next_and_bit. The differences are:
- * - The "invert" argument, which is XORed with each fetched word before
- * searching it for one bits.
- * - The optional "addr2", which is anded with "addr1" if present.
+ * Common helper for find_bit() function family
+ * @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
+ * @MUNGE: The expression that post-processes a word containing found bit (may be empty)
+ * @size: The bitmap size in bits
*/
-unsigned long _find_next_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long nbits,
- unsigned long start, unsigned long invert, unsigned long le)
-{
- unsigned long tmp, mask;
-
- if (unlikely(start >= nbits))
- return nbits;
-
- tmp = addr1[start / BITS_PER_LONG];
- if (addr2)
- tmp &= addr2[start / BITS_PER_LONG];
- tmp ^= invert;
-
- /* Handle 1st word. */
- mask = BITMAP_FIRST_WORD_MASK(start);
- if (le)
- mask = swab(mask);
-
- tmp &= mask;
-
- start = round_down(start, BITS_PER_LONG);
-
- while (!tmp) {
- start += BITS_PER_LONG;
- if (start >= nbits)
- return nbits;
-
- tmp = addr1[start / BITS_PER_LONG];
- if (addr2)
- tmp &= addr2[start / BITS_PER_LONG];
- tmp ^= invert;
- }
+#define FIND_FIRST_BIT(FETCH, MUNGE, size) \
+({ \
+ unsigned long idx, val, sz = (size); \
+ \
+ for (idx = 0; idx * BITS_PER_LONG < sz; idx++) { \
+ val = (FETCH); \
+ if (val) { \
+ sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(val)), sz); \
+ break; \
+ } \
+ } \
+ \
+ sz; \
+})
- if (le)
- tmp = swab(tmp);
-
- return min(start + __ffs(tmp), nbits);
-}
-EXPORT_SYMBOL(_find_next_bit);
-#endif
+/*
+ * Common helper for find_next_bit() function family
+ * @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
+ * @MUNGE: The expression that post-processes a word containing found bit (may be empty)
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ */
+#define FIND_NEXT_BIT(FETCH, MUNGE, size, start) \
+({ \
+ unsigned long mask, idx, tmp, sz = (size), __start = (start); \
+ \
+ if (unlikely(__start >= sz)) \
+ goto out; \
+ \
+ mask = MUNGE(BITMAP_FIRST_WORD_MASK(__start)); \
+ idx = __start / BITS_PER_LONG; \
+ \
+ for (tmp = (FETCH) & mask; !tmp; tmp = (FETCH)) { \
+ if ((idx + 1) * BITS_PER_LONG >= sz) \
+ goto out; \
+ idx++; \
+ } \
+ \
+ sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(tmp)), sz); \
+out: \
+ sz; \
+})
+
+#define FIND_NTH_BIT(FETCH, size, num) \
+({ \
+ unsigned long sz = (size), nr = (num), idx, w, tmp; \
+ \
+ for (idx = 0; (idx + 1) * BITS_PER_LONG <= sz; idx++) { \
+ if (idx * BITS_PER_LONG + nr >= sz) \
+ goto out; \
+ \
+ tmp = (FETCH); \
+ w = hweight_long(tmp); \
+ if (w > nr) \
+ goto found; \
+ \
+ nr -= w; \
+ } \
+ \
+ if (sz % BITS_PER_LONG) \
+ tmp = (FETCH) & BITMAP_LAST_WORD_MASK(sz); \
+found: \
+ sz = min(idx * BITS_PER_LONG + fns(tmp, nr), sz); \
+out: \
+ sz; \
+})
#ifndef find_first_bit
/*
@@ -77,14 +98,7 @@ EXPORT_SYMBOL(_find_next_bit);
*/
unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
{
- unsigned long idx;
-
- for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
- if (addr[idx])
- return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
- }
-
- return size;
+ return FIND_FIRST_BIT(addr[idx], /* nop */, size);
}
EXPORT_SYMBOL(_find_first_bit);
#endif
@@ -97,15 +111,7 @@ unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
{
- unsigned long idx, val;
-
- for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
- val = addr1[idx] & addr2[idx];
- if (val)
- return min(idx * BITS_PER_LONG + __ffs(val), size);
- }
-
- return size;
+ return FIND_FIRST_BIT(addr1[idx] & addr2[idx], /* nop */, size);
}
EXPORT_SYMBOL(_find_first_and_bit);
#endif
@@ -116,16 +122,64 @@ EXPORT_SYMBOL(_find_first_and_bit);
*/
unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
- unsigned long idx;
+ return FIND_FIRST_BIT(~addr[idx], /* nop */, size);
+}
+EXPORT_SYMBOL(_find_first_zero_bit);
+#endif
- for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
- if (addr[idx] != ~0UL)
- return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
- }
+#ifndef find_next_bit
+unsigned long _find_next_bit(const unsigned long *addr, unsigned long nbits, unsigned long start)
+{
+ return FIND_NEXT_BIT(addr[idx], /* nop */, nbits, start);
+}
+EXPORT_SYMBOL(_find_next_bit);
+#endif
- return size;
+unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
+{
+ return FIND_NTH_BIT(addr[idx], size, n);
}
-EXPORT_SYMBOL(_find_first_zero_bit);
+EXPORT_SYMBOL(__find_nth_bit);
+
+unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ return FIND_NTH_BIT(addr1[idx] & addr2[idx], size, n);
+}
+EXPORT_SYMBOL(__find_nth_and_bit);
+
+unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ return FIND_NTH_BIT(addr1[idx] & ~addr2[idx], size, n);
+}
+EXPORT_SYMBOL(__find_nth_andnot_bit);
+
+#ifndef find_next_and_bit
+unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start)
+{
+ return FIND_NEXT_BIT(addr1[idx] & addr2[idx], /* nop */, nbits, start);
+}
+EXPORT_SYMBOL(_find_next_and_bit);
+#endif
+
+#ifndef find_next_andnot_bit
+unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start)
+{
+ return FIND_NEXT_BIT(addr1[idx] & ~addr2[idx], /* nop */, nbits, start);
+}
+EXPORT_SYMBOL(_find_next_andnot_bit);
+#endif
+
+#ifndef find_next_zero_bit
+unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
+ unsigned long start)
+{
+ return FIND_NEXT_BIT(~addr[idx], /* nop */, nbits, start);
+}
+EXPORT_SYMBOL(_find_next_zero_bit);
#endif
#ifndef find_last_bit
@@ -161,3 +215,38 @@ unsigned long find_next_clump8(unsigned long *clump, const unsigned long *addr,
return offset;
}
EXPORT_SYMBOL(find_next_clump8);
+
+#ifdef __BIG_ENDIAN
+
+#ifndef find_first_zero_bit_le
+/*
+ * Find the first cleared bit in an LE memory region.
+ */
+unsigned long _find_first_zero_bit_le(const unsigned long *addr, unsigned long size)
+{
+ return FIND_FIRST_BIT(~addr[idx], swab, size);
+}
+EXPORT_SYMBOL(_find_first_zero_bit_le);
+
+#endif
+
+#ifndef find_next_zero_bit_le
+unsigned long _find_next_zero_bit_le(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ return FIND_NEXT_BIT(~addr[idx], swab, size, offset);
+}
+EXPORT_SYMBOL(_find_next_zero_bit_le);
+#endif
+
+#ifndef find_next_bit_le
+unsigned long _find_next_bit_le(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ return FIND_NEXT_BIT(addr[idx], swab, size, offset);
+}
+EXPORT_SYMBOL(_find_next_bit_le);
+
+#endif
+
+#endif /* __BIG_ENDIAN */
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
index db904b57d4b8..10754586403b 100644
--- a/lib/find_bit_benchmark.c
+++ b/lib/find_bit_benchmark.c
@@ -115,6 +115,22 @@ static int __init test_find_last_bit(const void *bitmap, unsigned long len)
return 0;
}
+static int __init test_find_nth_bit(const unsigned long *bitmap, unsigned long len)
+{
+ unsigned long l, n, w = bitmap_weight(bitmap, len);
+ ktime_t time;
+
+ time = ktime_get();
+ for (n = 0; n < w; n++) {
+ l = find_nth_bit(bitmap, len, n);
+ WARN_ON(l >= len);
+ }
+ time = ktime_get() - time;
+ pr_err("find_nth_bit: %18llu ns, %6ld iterations\n", time, w);
+
+ return 0;
+}
+
static int __init test_find_next_and_bit(const void *bitmap,
const void *bitmap2, unsigned long len)
{
@@ -142,6 +158,7 @@ static int __init find_bit_test(void)
test_find_next_bit(bitmap, BITMAP_LEN);
test_find_next_zero_bit(bitmap, BITMAP_LEN);
test_find_last_bit(bitmap, BITMAP_LEN);
+ test_find_nth_bit(bitmap, BITMAP_LEN / 10);
/*
* test_find_first_bit() may take some time, so
@@ -164,6 +181,7 @@ static int __init find_bit_test(void)
test_find_next_bit(bitmap, BITMAP_LEN);
test_find_next_zero_bit(bitmap, BITMAP_LEN);
test_find_last_bit(bitmap, BITMAP_LEN);
+ test_find_nth_bit(bitmap, BITMAP_LEN);
test_find_first_bit(bitmap, BITMAP_LEN);
test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN);
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index 05cccbcf1661..83332fefa6f4 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -70,6 +70,7 @@ bool fprop_new_period(struct fprop_global *p, int periods)
*/
if (events <= 1)
return false;
+ preempt_disable_nested();
write_seqcount_begin(&p->sequence);
if (periods < 64)
events -= events >> periods;
@@ -77,6 +78,7 @@ bool fprop_new_period(struct fprop_global *p, int periods)
percpu_counter_add(&p->events, -events);
p->period += periods;
write_seqcount_end(&p->sequence);
+ preempt_enable_nested();
return true;
}
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
new file mode 100644
index 000000000000..409af07f340a
--- /dev/null
+++ b/lib/fortify_kunit.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Runtime test cases for CONFIG_FORTIFY_SOURCE that aren't expected to
+ * Oops the kernel on success. (For those, see drivers/misc/lkdtm/fortify.c)
+ *
+ * For corner cases with UBSAN, try testing with:
+ *
+ * ./tools/testing/kunit/kunit.py run --arch=x86_64 \
+ * --kconfig_add CONFIG_FORTIFY_SOURCE=y \
+ * --kconfig_add CONFIG_UBSAN=y \
+ * --kconfig_add CONFIG_UBSAN_TRAP=y \
+ * --kconfig_add CONFIG_UBSAN_BOUNDS=y \
+ * --kconfig_add CONFIG_UBSAN_LOCAL_BOUNDS=y \
+ * --make_options LLVM=1 fortify
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/string.h>
+
+static const char array_of_10[] = "this is 10";
+static const char *ptr_of_11 = "this is 11!";
+static char array_unknown[] = "compiler thinks I might change";
+
+static void known_sizes_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
+
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
+ /* Externally defined and dynamically sized string pointer: */
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
+}
+
+/* This is volatile so the optimizer can't perform DCE below. */
+static volatile int pick;
+
+/* Not inline to keep optimizer from figuring out which string we want. */
+static noinline size_t want_minus_one(int pick)
+{
+ const char *str;
+
+ switch (pick) {
+ case 1:
+ str = "4444";
+ break;
+ case 2:
+ str = "333";
+ break;
+ default:
+ str = "1";
+ break;
+ }
+ return __compiletime_strlen(str);
+}
+
+static void control_flow_split_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
+}
+
+static struct kunit_case fortify_test_cases[] = {
+ KUNIT_CASE(known_sizes_test),
+ KUNIT_CASE(control_flow_split_test),
+ {}
+};
+
+static struct kunit_suite fortify_test_suite = {
+ .name = "fortify",
+ .test_cases = fortify_test_cases,
+};
+
+kunit_test_suite(fortify_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/is_signed_type_kunit.c b/lib/is_signed_type_kunit.c
new file mode 100644
index 000000000000..207207522925
--- /dev/null
+++ b/lib/is_signed_type_kunit.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * ./tools/testing/kunit/kunit.py run is_signed_type [--raw_output]
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/compiler.h>
+
+enum unsigned_enum {
+ constant_a = 3,
+};
+
+enum signed_enum {
+ constant_b = -1,
+ constant_c = 2,
+};
+
+static void is_signed_type_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, is_signed_type(bool), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(signed char), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned char), false);
+#ifdef __CHAR_UNSIGNED__
+ KUNIT_EXPECT_EQ(test, is_signed_type(char), false);
+#else
+ KUNIT_EXPECT_EQ(test, is_signed_type(char), true);
+#endif
+ KUNIT_EXPECT_EQ(test, is_signed_type(int), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned int), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(long), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned long), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(long long), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned long long), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(enum unsigned_enum), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(enum signed_enum), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(void *), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(const char *), false);
+}
+
+static struct kunit_case is_signed_type_test_cases[] = {
+ KUNIT_CASE(is_signed_type_test),
+ {}
+};
+
+static struct kunit_suite is_signed_type_test_suite = {
+ .name = "is_signed_type",
+ .test_cases = is_signed_type_test_cases,
+};
+
+kunit_test_suite(is_signed_type_test_suite);
+
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 0b5dfb001bac..626719b95bad 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -59,4 +59,15 @@ config KUNIT_ALL_TESTS
If unsure, say N.
+config KUNIT_DEFAULT_ENABLED
+ bool "Default value of kunit.enable"
+ default y
+ help
+ Sets the default value of kunit.enable. If set to N then KUnit
+ tests will not execute unless kunit.enable=1 is passed to the
+ kernel command line.
+
+ In most cases this should be left as Y. Only if additional opt-in
+ behavior is needed should this be set to N.
+
endif # KUNIT
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 5e223327196a..9bbc422c284b 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -190,6 +190,10 @@ int kunit_run_all_tests(void)
{
struct suite_set suite_set = {__kunit_suites_start, __kunit_suites_end};
int err = 0;
+ if (!kunit_enabled()) {
+ pr_info("kunit: disabled\n");
+ goto out;
+ }
if (filter_glob_param) {
suite_set = kunit_filter_suites(&suite_set, filter_glob_param, &err);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index b73d5bb5c473..1e54373309a4 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -55,6 +55,17 @@ EXPORT_SYMBOL_GPL(__kunit_fail_current_test);
#endif
/*
+ * Enable KUnit tests to run.
+ */
+#ifdef CONFIG_KUNIT_DEFAULT_ENABLED
+static bool enable_param = true;
+#else
+static bool enable_param;
+#endif
+module_param_named(enable, enable_param, bool, 0);
+MODULE_PARM_DESC(enable, "Enable KUnit tests");
+
+/*
* KUnit statistic mode:
* 0 - disabled
* 1 - only when there is more than one subtest
@@ -586,10 +597,20 @@ static void kunit_init_suite(struct kunit_suite *suite)
suite->suite_init_err = 0;
}
+bool kunit_enabled(void)
+{
+ return enable_param;
+}
+
int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites)
{
unsigned int i;
+ if (!kunit_enabled() && num_suites > 0) {
+ pr_info("kunit: disabled\n");
+ return 0;
+ }
+
for (i = 0; i < num_suites; i++) {
kunit_init_suite(suites[i]);
kunit_run_tests(suites[i]);
@@ -607,6 +628,9 @@ void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites)
{
unsigned int i;
+ if (!kunit_enabled())
+ return;
+
for (i = 0; i < num_suites; i++)
kunit_exit_suite(suites[i]);
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index 62f8ffcbbaa3..2b5cc70ac53f 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -29,9 +29,8 @@ struct some_bytes {
};
#define check(instance, v) do { \
- int i; \
BUILD_BUG_ON(sizeof(instance.data) != 32); \
- for (i = 0; i < sizeof(instance.data); i++) { \
+ for (size_t i = 0; i < sizeof(instance.data); i++) { \
KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \
"line %d: '%s' not initialized to 0x%02x @ %d (saw 0x%02x)\n", \
__LINE__, #instance, v, i, instance.data[i]); \
@@ -39,9 +38,8 @@ struct some_bytes {
} while (0)
#define compare(name, one, two) do { \
- int i; \
BUILD_BUG_ON(sizeof(one) != sizeof(two)); \
- for (i = 0; i < sizeof(one); i++) { \
+ for (size_t i = 0; i < sizeof(one); i++) { \
KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \
"line %d: %s.data[%d] (0x%02x) != %s.data[%d] (0x%02x)\n", \
__LINE__, #one, i, one.data[i], #two, i, two.data[i]); \
@@ -272,10 +270,63 @@ static void memset_test(struct kunit *test)
#undef TEST_OP
}
+static void strtomem_test(struct kunit *test)
+{
+ static const char input[sizeof(unsigned long)] = "hi";
+ static const char truncate[] = "this is too long";
+ struct {
+ unsigned long canary1;
+ unsigned char output[sizeof(unsigned long)] __nonstring;
+ unsigned long canary2;
+ } wrap;
+
+ memset(&wrap, 0xFF, sizeof(wrap));
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX,
+ "bad initial canary value");
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX,
+ "bad initial canary value");
+
+ /* Check unpadded copy leaves surroundings untouched. */
+ strtomem(wrap.output, input);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated copy leaves surroundings untouched. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check padded copy leaves only string padded. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem_pad(wrap.output, input, 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated padded copy has no padding. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+}
+
static struct kunit_case memcpy_test_cases[] = {
KUNIT_CASE(memset_test),
KUNIT_CASE(memcpy_test),
KUNIT_CASE(memmove_test),
+ KUNIT_CASE(strtomem_test),
{}
};
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 86029ad5ead4..40f22b177d69 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -159,6 +159,31 @@ void nla_get_range_unsigned(const struct nla_policy *pt,
}
}
+static u64 nla_get_attr_bo(const struct nla_policy *pt,
+ const struct nlattr *nla)
+{
+ switch (pt->type) {
+ case NLA_U16:
+ if (pt->network_byte_order)
+ return ntohs(nla_get_be16(nla));
+
+ return nla_get_u16(nla);
+ case NLA_U32:
+ if (pt->network_byte_order)
+ return ntohl(nla_get_be32(nla));
+
+ return nla_get_u32(nla);
+ case NLA_U64:
+ if (pt->network_byte_order)
+ return be64_to_cpu(nla_get_be64(nla));
+
+ return nla_get_u64(nla);
+ }
+
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
static int nla_validate_range_unsigned(const struct nla_policy *pt,
const struct nlattr *nla,
struct netlink_ext_ack *extack,
@@ -172,12 +197,10 @@ static int nla_validate_range_unsigned(const struct nla_policy *pt,
value = nla_get_u8(nla);
break;
case NLA_U16:
- value = nla_get_u16(nla);
- break;
case NLA_U32:
- value = nla_get_u32(nla);
- break;
case NLA_U64:
+ value = nla_get_attr_bo(pt, nla);
+ break;
case NLA_MSECS:
value = nla_get_u64(nla);
break;
diff --git a/lib/once.c b/lib/once.c
index 59149bf3bfb4..2c306f0e891e 100644
--- a/lib/once.c
+++ b/lib/once.c
@@ -66,3 +66,33 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
once_disable_jump(once_key, mod);
}
EXPORT_SYMBOL(__do_once_done);
+
+static DEFINE_MUTEX(once_mutex);
+
+bool __do_once_sleepable_start(bool *done)
+ __acquires(once_mutex)
+{
+ mutex_lock(&once_mutex);
+ if (*done) {
+ mutex_unlock(&once_mutex);
+ /* Keep sparse happy by restoring an even lock count on
+ * this mutex. In case we return here, we don't call into
+ * __do_once_done but return early in the DO_ONCE_SLEEPABLE() macro.
+ */
+ __acquire(once_mutex);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(__do_once_sleepable_start);
+
+void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
+ struct module *mod)
+ __releases(once_mutex)
+{
+ *done = true;
+ mutex_unlock(&once_mutex);
+ once_disable_jump(once_key, mod);
+}
+EXPORT_SYMBOL(__do_once_sleepable_done);
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index 7e3e43679b73..5369634701fa 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Test cases for arithmetic overflow checks. See:
- * https://www.kernel.org/doc/html/latest/dev-tools/kunit/kunit-tool.html#configuring-building-and-running-tests
+ * "Running tests with kunit_tool" at Documentation/dev-tools/kunit/start.rst
* ./tools/testing/kunit/kunit.py run overflow [--raw_output]
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -16,12 +16,15 @@
#include <linux/types.h>
#include <linux/vmalloc.h>
-#define DEFINE_TEST_ARRAY(t) \
- static const struct test_ ## t { \
- t a, b; \
- t sum, diff, prod; \
- bool s_of, d_of, p_of; \
- } t ## _tests[]
+#define DEFINE_TEST_ARRAY_TYPED(t1, t2, t) \
+ static const struct test_ ## t1 ## _ ## t2 ## __ ## t { \
+ t1 a; \
+ t2 b; \
+ t sum, diff, prod; \
+ bool s_of, d_of, p_of; \
+ } t1 ## _ ## t2 ## __ ## t ## _tests[]
+
+#define DEFINE_TEST_ARRAY(t) DEFINE_TEST_ARRAY_TYPED(t, t, t)
DEFINE_TEST_ARRAY(u8) = {
{0, 0, 0, 0, 0, false, false, false},
@@ -222,21 +225,27 @@ DEFINE_TEST_ARRAY(s64) = {
};
#endif
-#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \
- t _r; \
- bool _of; \
- \
- _of = check_ ## op ## _overflow(a, b, &_r); \
- KUNIT_EXPECT_EQ_MSG(test, _of, of, \
+#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \
+ int _a_orig = a, _a_bump = a + 1; \
+ int _b_orig = b, _b_bump = b + 1; \
+ bool _of; \
+ t _r; \
+ \
+ _of = check_ ## op ## _overflow(a, b, &_r); \
+ KUNIT_EXPECT_EQ_MSG(test, _of, of, \
"expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \
- a, b, of ? "" : " not", #t); \
- KUNIT_EXPECT_EQ_MSG(test, _r, r, \
+ a, b, of ? "" : " not", #t); \
+ KUNIT_EXPECT_EQ_MSG(test, _r, r, \
"expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
- a, b, r, _r, #t); \
+ a, b, r, _r, #t); \
+ /* Check for internal macro side-effects. */ \
+ _of = check_ ## op ## _overflow(_a_orig++, _b_orig++, &_r); \
+ KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, "Unexpected " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, "Unexpected " #op " macro side-effect!\n"); \
} while (0)
-#define DEFINE_TEST_FUNC(t, fmt) \
-static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \
+#define DEFINE_TEST_FUNC_TYPED(n, t, fmt) \
+static void do_test_ ## n(struct kunit *test, const struct test_ ## n *p) \
{ \
check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
@@ -245,15 +254,18 @@ static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \
check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
} \
\
-static void t ## _overflow_test(struct kunit *test) { \
+static void n ## _overflow_test(struct kunit *test) { \
unsigned i; \
\
- for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \
- do_test_ ## t(test, &t ## _tests[i]); \
+ for (i = 0; i < ARRAY_SIZE(n ## _tests); ++i) \
+ do_test_ ## n(test, &n ## _tests[i]); \
kunit_info(test, "%zu %s arithmetic tests finished\n", \
- ARRAY_SIZE(t ## _tests), #t); \
+ ARRAY_SIZE(n ## _tests), #n); \
}
+#define DEFINE_TEST_FUNC(t, fmt) \
+ DEFINE_TEST_FUNC_TYPED(t ## _ ## t ## __ ## t, t, fmt)
+
DEFINE_TEST_FUNC(u8, "%d");
DEFINE_TEST_FUNC(s8, "%d");
DEFINE_TEST_FUNC(u16, "%d");
@@ -265,9 +277,32 @@ DEFINE_TEST_FUNC(u64, "%llu");
DEFINE_TEST_FUNC(s64, "%lld");
#endif
-static void overflow_shift_test(struct kunit *test)
-{
- int count = 0;
+DEFINE_TEST_ARRAY_TYPED(u32, u32, u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U8_MAX, 2, 1, U8_MAX - 2, U8_MAX - 1, true, false, true},
+ {U8_MAX + 1, 0, 0, 0, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(u32_u32__u8, u8, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(u32, u32, int) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U32_MAX, 0, -1, -1, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(u32_u32__int, int, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(u8, u8, int) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U8_MAX, U8_MAX, 2 * U8_MAX, 0, U8_MAX * U8_MAX, false, false, false},
+ {1, 2, 3, -1, 2, false, false, false},
+};
+DEFINE_TEST_FUNC_TYPED(u8_u8__int, int, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(int, int, u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 2, 3, U8_MAX, 2, false, true, false},
+ {-1, 0, U8_MAX, U8_MAX, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(int_int__u8, u8, "%d");
/* Args are: value, shift, type, expected result, overflow expected */
#define TEST_ONE_SHIFT(a, s, t, expect, of) do { \
@@ -292,6 +327,10 @@ static void overflow_shift_test(struct kunit *test)
count++; \
} while (0)
+static void shift_sane_test(struct kunit *test)
+{
+ int count = 0;
+
/* Sane shifts. */
TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false);
TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false);
@@ -334,6 +373,13 @@ static void overflow_shift_test(struct kunit *test)
TEST_ONE_SHIFT(0, 30, s32, 0, false);
TEST_ONE_SHIFT(0, 62, s64, 0, false);
+ kunit_info(test, "%d sane shift tests finished\n", count);
+}
+
+static void shift_overflow_test(struct kunit *test)
+{
+ int count = 0;
+
/* Overflow: shifted the bit off the end. */
TEST_ONE_SHIFT(1, 8, u8, 0, true);
TEST_ONE_SHIFT(1, 16, u16, 0, true);
@@ -381,6 +427,13 @@ static void overflow_shift_test(struct kunit *test)
/* 0100000100001000001000000010000001000010000001000100010001001011 */
TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true);
+ kunit_info(test, "%d overflow shift tests finished\n", count);
+}
+
+static void shift_truncate_test(struct kunit *test)
+{
+ int count = 0;
+
/* Overflow: values larger than destination type. */
TEST_ONE_SHIFT(0x100, 0, u8, 0, true);
TEST_ONE_SHIFT(0xFF, 0, s8, 0, true);
@@ -392,6 +445,33 @@ static void overflow_shift_test(struct kunit *test)
TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true);
TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true);
+ /* Overflow: shifted at or beyond entire type's bit width. */
+ TEST_ONE_SHIFT(0, 8, u8, 0, true);
+ TEST_ONE_SHIFT(0, 9, u8, 0, true);
+ TEST_ONE_SHIFT(0, 8, s8, 0, true);
+ TEST_ONE_SHIFT(0, 9, s8, 0, true);
+ TEST_ONE_SHIFT(0, 16, u16, 0, true);
+ TEST_ONE_SHIFT(0, 17, u16, 0, true);
+ TEST_ONE_SHIFT(0, 16, s16, 0, true);
+ TEST_ONE_SHIFT(0, 17, s16, 0, true);
+ TEST_ONE_SHIFT(0, 32, u32, 0, true);
+ TEST_ONE_SHIFT(0, 33, u32, 0, true);
+ TEST_ONE_SHIFT(0, 32, int, 0, true);
+ TEST_ONE_SHIFT(0, 33, int, 0, true);
+ TEST_ONE_SHIFT(0, 32, s32, 0, true);
+ TEST_ONE_SHIFT(0, 33, s32, 0, true);
+ TEST_ONE_SHIFT(0, 64, u64, 0, true);
+ TEST_ONE_SHIFT(0, 65, u64, 0, true);
+ TEST_ONE_SHIFT(0, 64, s64, 0, true);
+ TEST_ONE_SHIFT(0, 65, s64, 0, true);
+
+ kunit_info(test, "%d truncate shift tests finished\n", count);
+}
+
+static void shift_nonsense_test(struct kunit *test)
+{
+ int count = 0;
+
/* Nonsense: negative initial value. */
TEST_ONE_SHIFT(-1, 0, s8, 0, true);
TEST_ONE_SHIFT(-1, 0, u8, 0, true);
@@ -416,26 +496,6 @@ static void overflow_shift_test(struct kunit *test)
TEST_ONE_SHIFT(0, -30, s64, 0, true);
TEST_ONE_SHIFT(0, -30, u64, 0, true);
- /* Overflow: shifted at or beyond entire type's bit width. */
- TEST_ONE_SHIFT(0, 8, u8, 0, true);
- TEST_ONE_SHIFT(0, 9, u8, 0, true);
- TEST_ONE_SHIFT(0, 8, s8, 0, true);
- TEST_ONE_SHIFT(0, 9, s8, 0, true);
- TEST_ONE_SHIFT(0, 16, u16, 0, true);
- TEST_ONE_SHIFT(0, 17, u16, 0, true);
- TEST_ONE_SHIFT(0, 16, s16, 0, true);
- TEST_ONE_SHIFT(0, 17, s16, 0, true);
- TEST_ONE_SHIFT(0, 32, u32, 0, true);
- TEST_ONE_SHIFT(0, 33, u32, 0, true);
- TEST_ONE_SHIFT(0, 32, int, 0, true);
- TEST_ONE_SHIFT(0, 33, int, 0, true);
- TEST_ONE_SHIFT(0, 32, s32, 0, true);
- TEST_ONE_SHIFT(0, 33, s32, 0, true);
- TEST_ONE_SHIFT(0, 64, u64, 0, true);
- TEST_ONE_SHIFT(0, 65, u64, 0, true);
- TEST_ONE_SHIFT(0, 64, s64, 0, true);
- TEST_ONE_SHIFT(0, 65, s64, 0, true);
-
/*
* Corner case: for unsigned types, we fail when we've shifted
* through the entire width of bits. For signed types, we might
@@ -451,9 +511,9 @@ static void overflow_shift_test(struct kunit *test)
TEST_ONE_SHIFT(0, 31, s32, 0, false);
TEST_ONE_SHIFT(0, 63, s64, 0, false);
- kunit_info(test, "%d shift tests finished\n", count);
-#undef TEST_ONE_SHIFT
+ kunit_info(test, "%d nonsense shift tests finished\n", count);
}
+#undef TEST_ONE_SHIFT
/*
* Deal with the various forms of allocator arguments. See comments above
@@ -649,18 +709,25 @@ static void overflow_size_helpers_test(struct kunit *test)
}
static struct kunit_case overflow_test_cases[] = {
- KUNIT_CASE(u8_overflow_test),
- KUNIT_CASE(s8_overflow_test),
- KUNIT_CASE(u16_overflow_test),
- KUNIT_CASE(s16_overflow_test),
- KUNIT_CASE(u32_overflow_test),
- KUNIT_CASE(s32_overflow_test),
+ KUNIT_CASE(u8_u8__u8_overflow_test),
+ KUNIT_CASE(s8_s8__s8_overflow_test),
+ KUNIT_CASE(u16_u16__u16_overflow_test),
+ KUNIT_CASE(s16_s16__s16_overflow_test),
+ KUNIT_CASE(u32_u32__u32_overflow_test),
+ KUNIT_CASE(s32_s32__s32_overflow_test),
/* Clang 13 and earlier generate unwanted libcalls on 32-bit. */
#if BITS_PER_LONG == 64
- KUNIT_CASE(u64_overflow_test),
- KUNIT_CASE(s64_overflow_test),
+ KUNIT_CASE(u64_u64__u64_overflow_test),
+ KUNIT_CASE(s64_s64__s64_overflow_test),
#endif
- KUNIT_CASE(overflow_shift_test),
+ KUNIT_CASE(u32_u32__u8_overflow_test),
+ KUNIT_CASE(u32_u32__int_overflow_test),
+ KUNIT_CASE(u8_u8__int_overflow_test),
+ KUNIT_CASE(int_int__u8_overflow_test),
+ KUNIT_CASE(shift_sane_test),
+ KUNIT_CASE(shift_overflow_test),
+ KUNIT_CASE(shift_truncate_test),
+ KUNIT_CASE(shift_nonsense_test),
KUNIT_CASE(overflow_allocation_test),
KUNIT_CASE(overflow_size_helpers_test),
{}
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 29eb0484215a..a8108a962dfd 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -533,21 +533,20 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
nr = find_first_zero_bit(&map->word, map_depth);
if (nr + nr_tags <= map_depth) {
atomic_long_t *ptr = (atomic_long_t *) &map->word;
- int map_tags = min_t(int, nr_tags, map_depth);
- unsigned long val, ret;
+ unsigned long val;
- get_mask = ((1UL << map_tags) - 1) << nr;
+ get_mask = ((1UL << nr_tags) - 1) << nr;
+ val = READ_ONCE(map->word);
do {
- val = READ_ONCE(map->word);
if ((val & ~get_mask) != val)
goto next;
- ret = atomic_long_cmpxchg(ptr, val, get_mask | val);
- } while (ret != val);
- get_mask = (get_mask & ~ret) >> nr;
+ } while (!atomic_long_try_cmpxchg(ptr, &val,
+ get_mask | val));
+ get_mask = (get_mask & ~val) >> nr;
if (get_mask) {
*offset = nr + (index << sb->shift);
update_alloc_hint_after_get(sb, depth, hint,
- *offset + map_tags - 1);
+ *offset + nr_tags - 1);
return get_mask;
}
}
@@ -588,7 +587,7 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[wake_index];
- if (waitqueue_active(&ws->wait)) {
+ if (waitqueue_active(&ws->wait) && atomic_read(&ws->wait_cnt)) {
if (wake_index != atomic_read(&sbq->wake_index))
atomic_set(&sbq->wake_index, wake_index);
return ws;
@@ -600,50 +599,82 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
return NULL;
}
-static bool __sbq_wake_up(struct sbitmap_queue *sbq)
+static bool __sbq_wake_up(struct sbitmap_queue *sbq, int *nr)
{
struct sbq_wait_state *ws;
unsigned int wake_batch;
- int wait_cnt;
+ int wait_cnt, cur, sub;
+ bool ret;
+
+ if (*nr <= 0)
+ return false;
ws = sbq_wake_ptr(sbq);
if (!ws)
return false;
- wait_cnt = atomic_dec_return(&ws->wait_cnt);
- if (wait_cnt <= 0) {
- int ret;
-
- wake_batch = READ_ONCE(sbq->wake_batch);
-
+ cur = atomic_read(&ws->wait_cnt);
+ do {
/*
- * Pairs with the memory barrier in sbitmap_queue_resize() to
- * ensure that we see the batch size update before the wait
- * count is reset.
+ * For concurrent callers of this, callers should call this
+ * function again to wakeup a new batch on a different 'ws'.
*/
- smp_mb__before_atomic();
+ if (cur == 0)
+ return true;
+ sub = min(*nr, cur);
+ wait_cnt = cur - sub;
+ } while (!atomic_try_cmpxchg(&ws->wait_cnt, &cur, wait_cnt));
- /*
- * For concurrent callers of this, the one that failed the
- * atomic_cmpxhcg() race should call this function again
- * to wakeup a new batch on a different 'ws'.
- */
- ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
- if (ret == wait_cnt) {
- sbq_index_atomic_inc(&sbq->wake_index);
- wake_up_nr(&ws->wait, wake_batch);
- return false;
- }
+ /*
+ * If we decremented queue without waiters, retry to avoid lost
+ * wakeups.
+ */
+ if (wait_cnt > 0)
+ return !waitqueue_active(&ws->wait);
- return true;
- }
+ *nr -= sub;
- return false;
+ /*
+ * When wait_cnt == 0, we have to be particularly careful as we are
+ * responsible to reset wait_cnt regardless whether we've actually
+ * woken up anybody. But in case we didn't wakeup anybody, we still
+ * need to retry.
+ */
+ ret = !waitqueue_active(&ws->wait);
+ wake_batch = READ_ONCE(sbq->wake_batch);
+
+ /*
+ * Wake up first in case that concurrent callers decrease wait_cnt
+ * while waitqueue is empty.
+ */
+ wake_up_nr(&ws->wait, wake_batch);
+
+ /*
+ * Pairs with the memory barrier in sbitmap_queue_resize() to
+ * ensure that we see the batch size update before the wait
+ * count is reset.
+ *
+ * Also pairs with the implicit barrier between decrementing wait_cnt
+ * and checking for waitqueue_active() to make sure waitqueue_active()
+ * sees result of the wakeup if atomic_dec_return() has seen the result
+ * of atomic_set().
+ */
+ smp_mb__before_atomic();
+
+ /*
+ * Increase wake_index before updating wait_cnt, otherwise concurrent
+ * callers can see valid wait_cnt in old waitqueue, which can cause
+ * invalid wakeup on the old waitqueue.
+ */
+ sbq_index_atomic_inc(&sbq->wake_index);
+ atomic_set(&ws->wait_cnt, wake_batch);
+
+ return ret || *nr;
}
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
{
- while (__sbq_wake_up(sbq))
+ while (__sbq_wake_up(sbq, &nr))
;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
@@ -683,7 +714,7 @@ void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
atomic_long_andnot(mask, (atomic_long_t *) addr);
smp_mb__after_atomic();
- sbitmap_queue_wake_up(sbq);
+ sbitmap_queue_wake_up(sbq, nr_tags);
sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
tags[nr_tags - 1] - offset);
}
@@ -711,7 +742,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
* waiter. See the comment on waitqueue_active().
*/
smp_mb__after_atomic();
- sbitmap_queue_wake_up(sbq);
+ sbitmap_queue_wake_up(sbq, 1);
sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index a0b1a52cd6f7..9bfe60ca3f37 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/scatterlist.h>
#include <linux/mempool.h>
#include <linux/slab.h>
@@ -177,16 +177,4 @@ cleanup_sdb:
return -ENOMEM;
}
-static __exit void sg_pool_exit(void)
-{
- int i;
-
- for (i = 0; i < SG_MEMPOOL_NR; i++) {
- struct sg_pool *sgp = sg_pools + i;
- mempool_destroy(sgp->pool);
- kmem_cache_destroy(sgp->slab);
- }
-}
-
-module_init(sg_pool_init);
-module_exit(sg_pool_exit);
+subsys_initcall(sg_pool_init);
diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c
index 35c69aa425b2..4591d6cf5e01 100644
--- a/lib/stackinit_kunit.c
+++ b/lib/stackinit_kunit.c
@@ -3,7 +3,7 @@
* Test cases for compiler-based stack variable zeroing via
* -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
* For example, see:
- * https://www.kernel.org/doc/html/latest/dev-tools/kunit/kunit-tool.html#configuring-building-and-running-tests
+ * "Running tests with kunit_tool" at Documentation/dev-tools/kunit/start.rst
* ./tools/testing/kunit/kunit.py run stackinit [--raw_output] \
* --make_option LLVM=1 \
* --kconfig_add CONFIG_INIT_STACK_ALL_ZERO=y
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 5ed3beb066e6..230020a2e076 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -131,6 +131,50 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
}
EXPORT_SYMBOL(string_get_size);
+/**
+ * parse_int_array_user - Split string into a sequence of integers
+ * @from: The user space buffer to read from
+ * @count: The maximum number of bytes to read
+ * @array: Returned pointer to sequence of integers
+ *
+ * On success @array is allocated and initialized with a sequence of
+ * integers extracted from the @from plus an additional element that
+ * begins the sequence and specifies the integers count.
+ *
+ * Caller takes responsibility for freeing @array when it is no longer
+ * needed.
+ */
+int parse_int_array_user(const char __user *from, size_t count, int **array)
+{
+ int *ints, nints;
+ char *buf;
+ int ret = 0;
+
+ buf = memdup_user_nul(from, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ get_options(buf, 0, &nints);
+ if (!nints) {
+ ret = -ENOENT;
+ goto free_buf;
+ }
+
+ ints = kcalloc(nints + 1, sizeof(*ints), GFP_KERNEL);
+ if (!ints) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+
+ get_options(buf, nints + 1, ints);
+ *array = ints;
+
+free_buf:
+ kfree(buf);
+ return ret;
+}
+EXPORT_SYMBOL(parse_int_array_user);
+
static bool unescape_space(char **src, char **dst)
{
char *p = *dst, *q = *src;
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 98754ff9fe68..a8005ad3bd58 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -16,6 +16,8 @@
#include "../tools/testing/selftests/kselftest_module.h"
+#define EXP1_IN_BITS (sizeof(exp1) * 8)
+
KSTM_MODULE_GLOBALS();
static char pbl_buffer[PAGE_SIZE] __initdata;
@@ -219,6 +221,47 @@ static void __init test_zero_clear(void)
expect_eq_pbl("", bmap, 1024);
}
+static void __init test_find_nth_bit(void)
+{
+ unsigned long b, bit, cnt = 0;
+ DECLARE_BITMAP(bmap, 64 * 3);
+
+ bitmap_zero(bmap, 64 * 3);
+ __set_bit(10, bmap);
+ __set_bit(20, bmap);
+ __set_bit(30, bmap);
+ __set_bit(40, bmap);
+ __set_bit(50, bmap);
+ __set_bit(60, bmap);
+ __set_bit(80, bmap);
+ __set_bit(123, bmap);
+
+ expect_eq_uint(10, find_nth_bit(bmap, 64 * 3, 0));
+ expect_eq_uint(20, find_nth_bit(bmap, 64 * 3, 1));
+ expect_eq_uint(30, find_nth_bit(bmap, 64 * 3, 2));
+ expect_eq_uint(40, find_nth_bit(bmap, 64 * 3, 3));
+ expect_eq_uint(50, find_nth_bit(bmap, 64 * 3, 4));
+ expect_eq_uint(60, find_nth_bit(bmap, 64 * 3, 5));
+ expect_eq_uint(80, find_nth_bit(bmap, 64 * 3, 6));
+ expect_eq_uint(123, find_nth_bit(bmap, 64 * 3, 7));
+ expect_eq_uint(64 * 3, find_nth_bit(bmap, 64 * 3, 8));
+
+ expect_eq_uint(10, find_nth_bit(bmap, 64 * 3 - 1, 0));
+ expect_eq_uint(20, find_nth_bit(bmap, 64 * 3 - 1, 1));
+ expect_eq_uint(30, find_nth_bit(bmap, 64 * 3 - 1, 2));
+ expect_eq_uint(40, find_nth_bit(bmap, 64 * 3 - 1, 3));
+ expect_eq_uint(50, find_nth_bit(bmap, 64 * 3 - 1, 4));
+ expect_eq_uint(60, find_nth_bit(bmap, 64 * 3 - 1, 5));
+ expect_eq_uint(80, find_nth_bit(bmap, 64 * 3 - 1, 6));
+ expect_eq_uint(123, find_nth_bit(bmap, 64 * 3 - 1, 7));
+ expect_eq_uint(64 * 3 - 1, find_nth_bit(bmap, 64 * 3 - 1, 8));
+
+ for_each_set_bit(bit, exp1, EXP1_IN_BITS) {
+ b = find_nth_bit(exp1, EXP1_IN_BITS, cnt++);
+ expect_eq_uint(b, bit);
+ }
+}
+
static void __init test_fill_set(void)
{
DECLARE_BITMAP(bmap, 1024);
@@ -557,8 +600,6 @@ static void __init test_bitmap_parse(void)
}
}
-#define EXP1_IN_BITS (sizeof(exp1) * 8)
-
static void __init test_bitmap_arr32(void)
{
unsigned int nbits, next_bit;
@@ -685,6 +726,239 @@ static void __init test_for_each_set_clump8(void)
expect_eq_clump8(start, CLUMP_EXP_NUMBITS, clump_exp, &clump);
}
+static void __init test_for_each_set_bit_wrap(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, bit;
+
+ bitmap_zero(orig, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_set(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ bitmap_zero(copy, 500);
+
+ for_each_set_bit_wrap(bit, orig, 500, wr)
+ bitmap_set(copy, bit, 1);
+
+ expect_eq_bitmap(orig, copy, 500);
+ }
+}
+
+static void __init test_for_each_set_bit(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int bit;
+
+ bitmap_zero(orig, 500);
+ bitmap_zero(copy, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_set(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for_each_set_bit(bit, orig, 500)
+ bitmap_set(copy, bit, 1);
+
+ expect_eq_bitmap(orig, copy, 500);
+}
+
+static void __init test_for_each_set_bit_from(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, bit;
+
+ bitmap_zero(orig, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_set(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ DECLARE_BITMAP(tmp, 500);
+
+ bitmap_zero(copy, 500);
+ bit = wr;
+
+ for_each_set_bit_from(bit, orig, 500)
+ bitmap_set(copy, bit, 1);
+
+ bitmap_copy(tmp, orig, 500);
+ bitmap_clear(tmp, 0, wr);
+ expect_eq_bitmap(tmp, copy, 500);
+ }
+}
+
+static void __init test_for_each_clear_bit(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int bit;
+
+ bitmap_fill(orig, 500);
+ bitmap_fill(copy, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_clear(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_clear(orig, 100, 50);
+
+ for_each_clear_bit(bit, orig, 500)
+ bitmap_clear(copy, bit, 1);
+
+ expect_eq_bitmap(orig, copy, 500);
+}
+
+static void __init test_for_each_clear_bit_from(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, bit;
+
+ bitmap_fill(orig, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_clear(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_clear(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ DECLARE_BITMAP(tmp, 500);
+
+ bitmap_fill(copy, 500);
+ bit = wr;
+
+ for_each_clear_bit_from(bit, orig, 500)
+ bitmap_clear(copy, bit, 1);
+
+ bitmap_copy(tmp, orig, 500);
+ bitmap_set(tmp, 0, wr);
+ expect_eq_bitmap(tmp, copy, 500);
+ }
+}
+
+static void __init test_for_each_set_bitrange(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int s, e;
+
+ bitmap_zero(orig, 500);
+ bitmap_zero(copy, 500);
+
+ /* Set individual bits */
+ for (s = 0; s < 500; s += 10)
+ bitmap_set(orig, s, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for_each_set_bitrange(s, e, orig, 500)
+ bitmap_set(copy, s, e-s);
+
+ expect_eq_bitmap(orig, copy, 500);
+}
+
+static void __init test_for_each_clear_bitrange(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int s, e;
+
+ bitmap_fill(orig, 500);
+ bitmap_fill(copy, 500);
+
+ /* Set individual bits */
+ for (s = 0; s < 500; s += 10)
+ bitmap_clear(orig, s, 1);
+
+ /* Set range of bits */
+ bitmap_clear(orig, 100, 50);
+
+ for_each_clear_bitrange(s, e, orig, 500)
+ bitmap_clear(copy, s, e-s);
+
+ expect_eq_bitmap(orig, copy, 500);
+}
+
+static void __init test_for_each_set_bitrange_from(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, s, e;
+
+ bitmap_zero(orig, 500);
+
+ /* Set individual bits */
+ for (s = 0; s < 500; s += 10)
+ bitmap_set(orig, s, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ DECLARE_BITMAP(tmp, 500);
+
+ bitmap_zero(copy, 500);
+ s = wr;
+
+ for_each_set_bitrange_from(s, e, orig, 500)
+ bitmap_set(copy, s, e - s);
+
+ bitmap_copy(tmp, orig, 500);
+ bitmap_clear(tmp, 0, wr);
+ expect_eq_bitmap(tmp, copy, 500);
+ }
+}
+
+static void __init test_for_each_clear_bitrange_from(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, s, e;
+
+ bitmap_fill(orig, 500);
+
+ /* Set individual bits */
+ for (s = 0; s < 500; s += 10)
+ bitmap_clear(orig, s, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ DECLARE_BITMAP(tmp, 500);
+
+ bitmap_fill(copy, 500);
+ s = wr;
+
+ for_each_clear_bitrange_from(s, e, orig, 500)
+ bitmap_clear(copy, s, e - s);
+
+ bitmap_copy(tmp, orig, 500);
+ bitmap_set(tmp, 0, wr);
+ expect_eq_bitmap(tmp, copy, 500);
+ }
+}
+
struct test_bitmap_cut {
unsigned int first;
unsigned int cut;
@@ -948,10 +1222,21 @@ static void __init selftest(void)
test_bitmap_parselist();
test_bitmap_printlist();
test_mem_optimisations();
- test_for_each_set_clump8();
test_bitmap_cut();
test_bitmap_print_buf();
test_bitmap_const_eval();
+
+ test_find_nth_bit();
+ test_for_each_set_bit();
+ test_for_each_set_bit_from();
+ test_for_each_clear_bit();
+ test_for_each_clear_bit_from();
+ test_for_each_set_bitrange();
+ test_for_each_clear_bitrange();
+ test_for_each_set_bitrange_from();
+ test_for_each_clear_bitrange_from();
+ test_for_each_set_clump8();
+ test_for_each_set_bit_wrap();
}
KSTM_MODULE_LOADERS(test_bitmap);
diff --git a/lib/test_dynamic_debug.c b/lib/test_dynamic_debug.c
new file mode 100644
index 000000000000..8dd250ad022b
--- /dev/null
+++ b/lib/test_dynamic_debug.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel module for testing dynamic_debug
+ *
+ * Authors:
+ * Jim Cromie <jim.cromie@gmail.com>
+ */
+
+#define pr_fmt(fmt) "test_dd: " fmt
+
+#include <linux/module.h>
+
+/* run tests by reading or writing sysfs node: do_prints */
+
+static void do_prints(void); /* device under test */
+static int param_set_do_prints(const char *instr, const struct kernel_param *kp)
+{
+ do_prints();
+ return 0;
+}
+static int param_get_do_prints(char *buffer, const struct kernel_param *kp)
+{
+ do_prints();
+ return scnprintf(buffer, PAGE_SIZE, "did do_prints\n");
+}
+static const struct kernel_param_ops param_ops_do_prints = {
+ .set = param_set_do_prints,
+ .get = param_get_do_prints,
+};
+module_param_cb(do_prints, &param_ops_do_prints, NULL, 0600);
+
+/*
+ * Using the CLASSMAP api:
+ * - classmaps must have corresponding enum
+ * - enum symbols must match/correlate with class-name strings in the map.
+ * - base must equal enum's 1st value
+ * - multiple maps must set their base to share the 0-30 class_id space !!
+ * (build-bug-on tips welcome)
+ * Additionally, here:
+ * - tie together sysname, mapname, bitsname, flagsname
+ */
+#define DD_SYS_WRAP(_model, _flags) \
+ static unsigned long bits_##_model; \
+ static struct ddebug_class_param _flags##_model = { \
+ .bits = &bits_##_model, \
+ .flags = #_flags, \
+ .map = &map_##_model, \
+ }; \
+ module_param_cb(_flags##_##_model, &param_ops_dyndbg_classes, &_flags##_model, 0600)
+
+/* numeric input, independent bits */
+enum cat_disjoint_bits {
+ D2_CORE = 0,
+ D2_DRIVER,
+ D2_KMS,
+ D2_PRIME,
+ D2_ATOMIC,
+ D2_VBL,
+ D2_STATE,
+ D2_LEASE,
+ D2_DP,
+ D2_DRMRES };
+DECLARE_DYNDBG_CLASSMAP(map_disjoint_bits, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "D2_CORE",
+ "D2_DRIVER",
+ "D2_KMS",
+ "D2_PRIME",
+ "D2_ATOMIC",
+ "D2_VBL",
+ "D2_STATE",
+ "D2_LEASE",
+ "D2_DP",
+ "D2_DRMRES");
+DD_SYS_WRAP(disjoint_bits, p);
+DD_SYS_WRAP(disjoint_bits, T);
+
+/* symbolic input, independent bits */
+enum cat_disjoint_names { LOW = 11, MID, HI };
+DECLARE_DYNDBG_CLASSMAP(map_disjoint_names, DD_CLASS_TYPE_DISJOINT_NAMES, 10,
+ "LOW", "MID", "HI");
+DD_SYS_WRAP(disjoint_names, p);
+DD_SYS_WRAP(disjoint_names, T);
+
+/* numeric verbosity, V2 > V1 related */
+enum cat_level_num { V0 = 14, V1, V2, V3, V4, V5, V6, V7 };
+DECLARE_DYNDBG_CLASSMAP(map_level_num, DD_CLASS_TYPE_LEVEL_NUM, 14,
+ "V0", "V1", "V2", "V3", "V4", "V5", "V6", "V7");
+DD_SYS_WRAP(level_num, p);
+DD_SYS_WRAP(level_num, T);
+
+/* symbolic verbosity */
+enum cat_level_names { L0 = 22, L1, L2, L3, L4, L5, L6, L7 };
+DECLARE_DYNDBG_CLASSMAP(map_level_names, DD_CLASS_TYPE_LEVEL_NAMES, 22,
+ "L0", "L1", "L2", "L3", "L4", "L5", "L6", "L7");
+DD_SYS_WRAP(level_names, p);
+DD_SYS_WRAP(level_names, T);
+
+/* stand-in for all pr_debug etc */
+#define prdbg(SYM) __pr_debug_cls(SYM, #SYM " msg\n")
+
+static void do_cats(void)
+{
+ pr_debug("doing categories\n");
+
+ prdbg(LOW);
+ prdbg(MID);
+ prdbg(HI);
+
+ prdbg(D2_CORE);
+ prdbg(D2_DRIVER);
+ prdbg(D2_KMS);
+ prdbg(D2_PRIME);
+ prdbg(D2_ATOMIC);
+ prdbg(D2_VBL);
+ prdbg(D2_STATE);
+ prdbg(D2_LEASE);
+ prdbg(D2_DP);
+ prdbg(D2_DRMRES);
+}
+
+static void do_levels(void)
+{
+ pr_debug("doing levels\n");
+
+ prdbg(V1);
+ prdbg(V2);
+ prdbg(V3);
+ prdbg(V4);
+ prdbg(V5);
+ prdbg(V6);
+ prdbg(V7);
+
+ prdbg(L1);
+ prdbg(L2);
+ prdbg(L3);
+ prdbg(L4);
+ prdbg(L5);
+ prdbg(L6);
+ prdbg(L7);
+}
+
+static void do_prints(void)
+{
+ do_cats();
+ do_levels();
+}
+
+static int __init test_dynamic_debug_init(void)
+{
+ pr_debug("init start\n");
+ do_prints();
+ pr_debug("init done\n");
+ return 0;
+}
+
+static void __exit test_dynamic_debug_exit(void)
+{
+ pr_debug("exited\n");
+}
+
+module_init(test_dynamic_debug_init);
+module_exit(test_dynamic_debug_exit);
+
+MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 3c1853a9d1c0..24f37bab8bc1 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -750,37 +750,42 @@ static int __init debug_boot_weak_hash_enable(char *str)
}
early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
-static DEFINE_STATIC_KEY_FALSE(filled_random_ptr_key);
+static bool filled_random_ptr_key __read_mostly;
+static siphash_key_t ptr_key __read_mostly;
+static void fill_ptr_key_workfn(struct work_struct *work);
+static DECLARE_DELAYED_WORK(fill_ptr_key_work, fill_ptr_key_workfn);
-static void enable_ptr_key_workfn(struct work_struct *work)
+static void fill_ptr_key_workfn(struct work_struct *work)
{
- static_branch_enable(&filled_random_ptr_key);
+ if (!rng_is_initialized()) {
+ queue_delayed_work(system_unbound_wq, &fill_ptr_key_work, HZ * 2);
+ return;
+ }
+
+ get_random_bytes(&ptr_key, sizeof(ptr_key));
+
+ /* Pairs with smp_rmb() before reading ptr_key. */
+ smp_wmb();
+ WRITE_ONCE(filled_random_ptr_key, true);
+}
+
+static int __init vsprintf_init_hashval(void)
+{
+ fill_ptr_key_workfn(NULL);
+ return 0;
}
+subsys_initcall(vsprintf_init_hashval)
/* Maps a pointer to a 32 bit unique identifier. */
static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
{
- static siphash_key_t ptr_key __read_mostly;
unsigned long hashval;
- if (!static_branch_likely(&filled_random_ptr_key)) {
- static bool filled = false;
- static DEFINE_SPINLOCK(filling);
- static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
- unsigned long flags;
-
- if (!system_unbound_wq || !rng_is_initialized() ||
- !spin_trylock_irqsave(&filling, flags))
- return -EAGAIN;
-
- if (!filled) {
- get_random_bytes(&ptr_key, sizeof(ptr_key));
- queue_work(system_unbound_wq, &enable_ptr_key_work);
- filled = true;
- }
- spin_unlock_irqrestore(&filling, flags);
- }
+ if (!READ_ONCE(filled_random_ptr_key))
+ return -EBUSY;
+ /* Pairs with smp_wmb() after writing ptr_key. */
+ smp_rmb();
#ifdef CONFIG_64BIT
hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
@@ -1189,7 +1194,7 @@ char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
}
static noinline_for_stack
-char *bitmap_string(char *buf, char *end, unsigned long *bitmap,
+char *bitmap_string(char *buf, char *end, const unsigned long *bitmap,
struct printf_spec spec, const char *fmt)
{
const int CHUNKSZ = 32;
@@ -1233,7 +1238,7 @@ char *bitmap_string(char *buf, char *end, unsigned long *bitmap,
}
static noinline_for_stack
-char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
+char *bitmap_list_string(char *buf, char *end, const unsigned long *bitmap,
struct printf_spec spec, const char *fmt)
{
int nr_bits = max_t(int, spec.field_width, 0);
@@ -2246,6 +2251,9 @@ int __init no_hash_pointers_enable(char *str)
}
early_param("no_hash_pointers", no_hash_pointers_enable);
+/* Used for Rust formatting ('%pA'). */
+char *rust_fmt_argument(char *buf, char *end, void *ptr);
+
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@@ -2372,6 +2380,10 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
*
* Note: The default behaviour (unadorned %p) is to hash the address,
* rendering it useful as a unique identifier.
+ *
+ * There is also a '%pA' format specifier, but it is only intended to be used
+ * from Rust code to format core::fmt::Arguments. Do *not* use it from C.
+ * See rust/kernel/print.rs for details.
*/
static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
@@ -2444,6 +2456,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return device_node_string(buf, end, ptr, spec, fmt + 1);
case 'f':
return fwnode_string(buf, end, ptr, spec, fmt + 1);
+ case 'A':
+ if (!IS_ENABLED(CONFIG_RUST)) {
+ WARN_ONCE(1, "Please remove %%pA from non-Rust code\n");
+ return error_string(buf, end, "(%pA?)", spec);
+ }
+ return rust_fmt_argument(buf, end, ptr);
case 'x':
return pointer_string(buf, end, ptr, spec);
case 'e':
diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
index fc45339fc3a3..440bd0007ae2 100644
--- a/lib/zstd/Makefile
+++ b/lib/zstd/Makefile
@@ -10,14 +10,10 @@
# ################################################################
obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o
obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
+obj-$(CONFIG_ZSTD_COMMON) += zstd_common.o
zstd_compress-y := \
zstd_compress_module.o \
- common/debug.o \
- common/entropy_common.o \
- common/error_private.o \
- common/fse_decompress.o \
- common/zstd_common.o \
compress/fse_compress.o \
compress/hist.o \
compress/huf_compress.o \
@@ -33,12 +29,14 @@ zstd_compress-y := \
zstd_decompress-y := \
zstd_decompress_module.o \
+ decompress/huf_decompress.o \
+ decompress/zstd_ddict.o \
+ decompress/zstd_decompress.o \
+ decompress/zstd_decompress_block.o \
+
+zstd_common-y := \
common/debug.o \
common/entropy_common.o \
common/error_private.o \
common/fse_decompress.o \
common/zstd_common.o \
- decompress/huf_decompress.o \
- decompress/zstd_ddict.o \
- decompress/zstd_decompress.o \
- decompress/zstd_decompress_block.o \
diff --git a/lib/zstd/common/entropy_common.c b/lib/zstd/common/entropy_common.c
index 53b47a2b52ff..a311808c0d56 100644
--- a/lib/zstd/common/entropy_common.c
+++ b/lib/zstd/common/entropy_common.c
@@ -15,6 +15,7 @@
/* *************************************
* Dependencies
***************************************/
+#include <linux/module.h>
#include "mem.h"
#include "error_private.h" /* ERR_*, ERROR */
#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
@@ -239,7 +240,7 @@ size_t FSE_readNCount(
{
return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
}
-
+EXPORT_SYMBOL_GPL(FSE_readNCount);
/*! HUF_readStats() :
Read compact Huffman tree, saved by HUF_writeCTable().
@@ -255,6 +256,7 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
}
+EXPORT_SYMBOL_GPL(HUF_readStats);
FORCE_INLINE_TEMPLATE size_t
HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
@@ -355,3 +357,4 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
(void)bmi2;
return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
}
+EXPORT_SYMBOL_GPL(HUF_readStats_wksp);
diff --git a/lib/zstd/common/zstd_common.c b/lib/zstd/common/zstd_common.c
index 3d7e35b309b5..0f1f63be25d9 100644
--- a/lib/zstd/common/zstd_common.c
+++ b/lib/zstd/common/zstd_common.c
@@ -13,6 +13,7 @@
/*-*************************************
* Dependencies
***************************************/
+#include <linux/module.h>
#define ZSTD_DEPS_NEED_MALLOC
#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
#include "error_private.h"
@@ -35,14 +36,17 @@ const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
* tells if a return value is an error code
* symbol is required for external callers */
unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+EXPORT_SYMBOL_GPL(ZSTD_isError);
/*! ZSTD_getErrorName() :
* provides error code string from function result (useful for debugging) */
const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
+EXPORT_SYMBOL_GPL(ZSTD_getErrorName);
/*! ZSTD_getError() :
* convert a `size_t` function result into a proper ZSTD_errorCode enum */
ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
+EXPORT_SYMBOL_GPL(ZSTD_getErrorCode);
/*! ZSTD_getErrorString() :
* provides error code string from enum */
@@ -59,6 +63,7 @@ void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
return customMem.customAlloc(customMem.opaque, size);
return ZSTD_malloc(size);
}
+EXPORT_SYMBOL_GPL(ZSTD_customMalloc);
void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
{
@@ -71,6 +76,7 @@ void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
}
return ZSTD_calloc(1, size);
}
+EXPORT_SYMBOL_GPL(ZSTD_customCalloc);
void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
{
@@ -81,3 +87,7 @@ void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
ZSTD_free(ptr);
}
}
+EXPORT_SYMBOL_GPL(ZSTD_customFree);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Zstd Common");