summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug48
-rw-r--r--lib/Kconfig.kasan16
-rw-r--r--lib/Makefile3
-rw-r--r--lib/atomic64.c36
-rw-r--r--lib/bitmap.c9
-rw-r--r--lib/bootconfig.c76
-rw-r--r--lib/cmdline_kunit.c2
-rw-r--r--lib/crc64.c2
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/decompress_bunzip2.c6
-rw-r--r--lib/decompress_unlz4.c8
-rw-r--r--lib/decompress_unlzo.c3
-rw-r--r--lib/decompress_unxz.c2
-rw-r--r--lib/decompress_unzstd.c4
-rw-r--r--lib/dump_stack.c48
-rw-r--r--lib/dynamic_debug.c20
-rw-r--r--lib/iov_iter.c1230
-rw-r--r--lib/kstrtox.c18
-rw-r--r--lib/kstrtox.h2
-rw-r--r--lib/kunit/debugfs.c2
-rw-r--r--lib/kunit/executor.c53
-rw-r--r--lib/kunit/executor_test.c133
-rw-r--r--lib/kunit/kunit-example-test.c31
-rw-r--r--lib/kunit/kunit-test.c42
-rw-r--r--lib/kunit/string-stream.h6
-rw-r--r--lib/kunit/test.c95
-rw-r--r--lib/locking-selftest.c83
-rw-r--r--lib/lz4/lz4_decompress.c2
-rw-r--r--lib/math/Makefile1
-rw-r--r--lib/math/rational-test.c56
-rw-r--r--lib/math/rational.c16
-rw-r--r--lib/mpi/longlong.h4
-rw-r--r--lib/mpi/mpicoder.c6
-rw-r--r--lib/mpi/mpiutil.c2
-rw-r--r--lib/parser.c1
-rw-r--r--lib/percpu-refcount.c6
-rw-r--r--lib/seq_buf.c8
-rw-r--r--lib/slub_kunit.c152
-rw-r--r--lib/smp_processor_id.c6
-rw-r--r--lib/string.c2
-rw-r--r--lib/string_helpers.c102
-rw-r--r--lib/syscall.c4
-rw-r--r--lib/test-string_helpers.c157
-rw-r--r--lib/test_bitmap.c7
-rw-r--r--lib/test_hmm.c132
-rw-r--r--lib/test_hmm_uapi.h2
-rw-r--r--lib/test_kasan.c52
-rw-r--r--lib/test_list_sort.c129
-rw-r--r--lib/test_printf.c5
-rw-r--r--lib/test_scanf.c750
-rw-r--r--lib/test_string.c5
-rw-r--r--lib/vsprintf.c113
-rw-r--r--lib/xz/xz_dec_bcj.c2
-rw-r--r--lib/xz/xz_dec_lzma2.c8
-rw-r--r--lib/zlib_inflate/inffast.c2
-rw-r--r--lib/zstd/huf.h2
56 files changed, 2615 insertions, 1099 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 678c13967580..8acc01d7d816 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -313,6 +313,9 @@ config DEBUG_INFO_BTF
config PAHOLE_HAS_SPLIT_BTF
def_bool $(success, test `$(PAHOLE) --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/'` -ge "119")
+config PAHOLE_HAS_ZEROSIZE_PERCPU_SUPPORT
+ def_bool $(success, test `$(PAHOLE) --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/'` -ge "122")
+
config DEBUG_INFO_BTF_MODULES
def_bool y
depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
@@ -1372,7 +1375,6 @@ config LOCKDEP
bool
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select STACKTRACE
- depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
select KALLSYMS
select KALLSYMS_ALL
@@ -2047,8 +2049,9 @@ config LKDTM
Documentation/fault-injection/provoke-crashes.rst
config TEST_LIST_SORT
- tristate "Linked list sorting test"
- depends on DEBUG_KERNEL || m
+ tristate "Linked list sorting test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
help
Enable this to turn on 'list_sort()' function test. This test is
executed only once during system boot (so affects only boot time),
@@ -2181,6 +2184,9 @@ config TEST_KSTRTOX
config TEST_PRINTF
tristate "Test printf() family of functions at runtime"
+config TEST_SCANF
+ tristate "Test scanf() family of functions at runtime"
+
config TEST_BITMAP
tristate "Test bitmap_*() family of functions at runtime"
help
@@ -2429,6 +2435,30 @@ config BITS_TEST
If unsure, say N.
+config SLUB_KUNIT_TEST
+ tristate "KUnit test for SLUB cache error detection" if !KUNIT_ALL_TESTS
+ depends on SLUB_DEBUG && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds SLUB allocator unit test.
+ Tests SLUB cache debugging functionality.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config RATIONAL_KUNIT_TEST
+ tristate "KUnit test for rational.c" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ select RATIONAL
+ default KUNIT_ALL_TESTS
+ help
+ This builds the rational math unit test.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
help
@@ -2571,6 +2601,18 @@ config TEST_FPU
If unsure, say N.
+config TEST_CLOCKSOURCE_WATCHDOG
+ tristate "Test clocksource watchdog in kernel space"
+ depends on CLOCKSOURCE_WATCHDOG
+ help
+ Enable this option to create a kernel module that will trigger
+ a test of the clocksource watchdog. This module may be loaded
+ via modprobe or insmod in which case it will run upon being
+ loaded, or it may be built in, in which case it will run
+ shortly after boot.
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config ARCH_USE_MEMTEST
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index cffc2ebbf185..1e2d10f86011 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -12,6 +12,13 @@ config HAVE_ARCH_KASAN_HW_TAGS
config HAVE_ARCH_KASAN_VMALLOC
bool
+config ARCH_DISABLE_KASAN_INLINE
+ bool
+ help
+ An architecture might not support inline instrumentation.
+ When this option is selected, inline and stack instrumentation are
+ disabled.
+
config CC_HAS_KASAN_GENERIC
def_bool $(cc-option, -fsanitize=kernel-address)
@@ -130,6 +137,7 @@ config KASAN_OUTLINE
config KASAN_INLINE
bool "Inline instrumentation"
+ depends on !ARCH_DISABLE_KASAN_INLINE
help
Compiler directly inserts code checking shadow memory before
memory accesses. This is faster than outline (in some workloads
@@ -141,6 +149,7 @@ endchoice
config KASAN_STACK
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
depends on KASAN_GENERIC || KASAN_SW_TAGS
+ depends on !ARCH_DISABLE_KASAN_INLINE
default y if CC_IS_GCC
help
The LLVM stack address sanitizer has a know problem that
@@ -154,10 +163,13 @@ config KASAN_STACK
but clang users can still enable it for builds without
CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe
to use and enabled by default.
+ If the architecture disables inline instrumentation, stack
+ instrumentation is also disabled as it adds inline-style
+ instrumentation that is run unconditionally.
-config KASAN_SW_TAGS_IDENTIFY
+config KASAN_TAGS_IDENTIFY
bool "Enable memory corruption identification"
- depends on KASAN_SW_TAGS
+ depends on KASAN_SW_TAGS || KASAN_HW_TAGS
help
This option enables best-effort identification of bug type
(use-after-free or out-of-bounds) at the cost of increased
diff --git a/lib/Makefile b/lib/Makefile
index e11cfc18b6c0..6d765d5fb8ac 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -83,6 +83,7 @@ obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
+obj-$(CONFIG_TEST_SCANF) += test_scanf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o
@@ -348,10 +349,12 @@ obj-$(CONFIG_OBJAGG) += objagg.o
obj-$(CONFIG_PLDMFW) += pldmfw/
# KUnit tests
+CFLAGS_bitfield_kunit.o := $(call cc-option,-Wframe-larger-than=10240)
obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
obj-$(CONFIG_BITS_TEST) += test_bits.o
obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
+obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
diff --git a/lib/atomic64.c b/lib/atomic64.c
index e98c85a99787..3df653994177 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -42,7 +42,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}
-s64 atomic64_read(const atomic64_t *v)
+s64 generic_atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -53,9 +53,9 @@ s64 atomic64_read(const atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_read);
+EXPORT_SYMBOL(generic_atomic64_read);
-void atomic64_set(atomic64_t *v, s64 i)
+void generic_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -64,10 +64,10 @@ void atomic64_set(atomic64_t *v, s64 i)
v->counter = i;
raw_spin_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(atomic64_set);
+EXPORT_SYMBOL(generic_atomic64_set);
#define ATOMIC64_OP(op, c_op) \
-void atomic64_##op(s64 a, atomic64_t *v) \
+void generic_atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -76,10 +76,10 @@ void atomic64_##op(s64 a, atomic64_t *v) \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
} \
-EXPORT_SYMBOL(atomic64_##op);
+EXPORT_SYMBOL(generic_atomic64_##op);
#define ATOMIC64_OP_RETURN(op, c_op) \
-s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
+s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -90,10 +90,10 @@ s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
-EXPORT_SYMBOL(atomic64_##op##_return);
+EXPORT_SYMBOL(generic_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op, c_op) \
-s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
+s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -105,7 +105,7 @@ s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
-EXPORT_SYMBOL(atomic64_fetch_##op);
+EXPORT_SYMBOL(generic_atomic64_fetch_##op);
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
@@ -130,7 +130,7 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-s64 atomic64_dec_if_positive(atomic64_t *v)
+s64 generic_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -143,9 +143,9 @@ s64 atomic64_dec_if_positive(atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_dec_if_positive);
+EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
-s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
+s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -158,9 +158,9 @@ s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_cmpxchg);
+EXPORT_SYMBOL(generic_atomic64_cmpxchg);
-s64 atomic64_xchg(atomic64_t *v, s64 new)
+s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -172,9 +172,9 @@ s64 atomic64_xchg(atomic64_t *v, s64 new)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_xchg);
+EXPORT_SYMBOL(generic_atomic64_xchg);
-s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -188,4 +188,4 @@ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return val;
}
-EXPORT_SYMBOL(atomic64_fetch_add_unless);
+EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 7b6b2a67a6a6..9401d39e4722 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -581,6 +581,14 @@ static const char *bitmap_parse_region(const char *str, struct region *r)
{
unsigned int lastbit = r->nbits - 1;
+ if (!strncasecmp(str, "all", 3)) {
+ r->start = 0;
+ r->end = lastbit;
+ str += 3;
+
+ goto check_pattern;
+ }
+
str = bitmap_getnum(str, &r->start, lastbit);
if (IS_ERR(str))
return str;
@@ -595,6 +603,7 @@ static const char *bitmap_parse_region(const char *str, struct region *r)
if (IS_ERR(str))
return str;
+check_pattern:
if (end_of_region(*str))
goto no_pattern;
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 9f8c70a98fcf..927017431fb6 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -156,7 +156,7 @@ xbc_node_find_child(struct xbc_node *parent, const char *key)
struct xbc_node *node;
if (parent)
- node = xbc_node_get_child(parent);
+ node = xbc_node_get_subkey(parent);
else
node = xbc_root_node();
@@ -164,7 +164,7 @@ xbc_node_find_child(struct xbc_node *parent, const char *key)
if (!xbc_node_match_prefix(node, &key))
node = xbc_node_get_next(node);
else if (*key != '\0')
- node = xbc_node_get_child(node);
+ node = xbc_node_get_subkey(node);
else
break;
}
@@ -274,6 +274,8 @@ int __init xbc_node_compose_key_after(struct xbc_node *root,
struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
struct xbc_node *node)
{
+ struct xbc_node *next;
+
if (unlikely(!xbc_data))
return NULL;
@@ -282,6 +284,13 @@ struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
if (!node)
node = xbc_nodes;
} else {
+ /* Leaf node may have a subkey */
+ next = xbc_node_get_subkey(node);
+ if (next) {
+ node = next;
+ goto found;
+ }
+
if (node == root) /* @root was a leaf, no child node. */
return NULL;
@@ -296,6 +305,7 @@ struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
node = xbc_node_get_next(node);
}
+found:
while (node && !xbc_node_is_leaf(node))
node = xbc_node_get_child(node);
@@ -367,18 +377,28 @@ static inline __init struct xbc_node *xbc_last_sibling(struct xbc_node *node)
return node;
}
-static struct xbc_node * __init xbc_add_sibling(char *data, u32 flag)
+static inline __init struct xbc_node *xbc_last_child(struct xbc_node *node)
+{
+ while (node->child)
+ node = xbc_node_get_child(node);
+
+ return node;
+}
+
+static struct xbc_node * __init __xbc_add_sibling(char *data, u32 flag, bool head)
{
struct xbc_node *sib, *node = xbc_add_node(data, flag);
if (node) {
if (!last_parent) {
+ /* Ignore @head in this case */
node->parent = XBC_NODE_MAX;
sib = xbc_last_sibling(xbc_nodes);
sib->next = xbc_node_index(node);
} else {
node->parent = xbc_node_index(last_parent);
- if (!last_parent->child) {
+ if (!last_parent->child || head) {
+ node->next = last_parent->child;
last_parent->child = xbc_node_index(node);
} else {
sib = xbc_node_get_child(last_parent);
@@ -392,6 +412,16 @@ static struct xbc_node * __init xbc_add_sibling(char *data, u32 flag)
return node;
}
+static inline struct xbc_node * __init xbc_add_sibling(char *data, u32 flag)
+{
+ return __xbc_add_sibling(data, flag, false);
+}
+
+static inline struct xbc_node * __init xbc_add_head_sibling(char *data, u32 flag)
+{
+ return __xbc_add_sibling(data, flag, true);
+}
+
static inline __init struct xbc_node *xbc_add_child(char *data, u32 flag)
{
struct xbc_node *node = xbc_add_sibling(data, flag);
@@ -517,17 +547,20 @@ static int __init xbc_parse_array(char **__v)
char *next;
int c = 0;
+ if (last_parent->child)
+ last_parent = xbc_node_get_child(last_parent);
+
do {
c = __xbc_parse_value(__v, &next);
if (c < 0)
return c;
- node = xbc_add_sibling(*__v, XBC_VALUE);
+ node = xbc_add_child(*__v, XBC_VALUE);
if (!node)
return -ENOMEM;
*__v = next;
} while (c == ',');
- node->next = 0;
+ node->child = 0;
return c;
}
@@ -557,8 +590,9 @@ static int __init __xbc_add_key(char *k)
node = find_match_node(xbc_nodes, k);
else {
child = xbc_node_get_child(last_parent);
+ /* Since the value node is the first child, skip it. */
if (child && xbc_node_is_value(child))
- return xbc_parse_error("Subkey is mixed with value", k);
+ child = xbc_node_get_next(child);
node = find_match_node(child, k);
}
@@ -601,23 +635,29 @@ static int __init xbc_parse_kv(char **k, char *v, int op)
if (ret)
return ret;
- child = xbc_node_get_child(last_parent);
- if (child) {
- if (xbc_node_is_key(child))
- return xbc_parse_error("Value is mixed with subkey", v);
- else if (op == '=')
- return xbc_parse_error("Value is redefined", v);
- }
-
c = __xbc_parse_value(&v, &next);
if (c < 0)
return c;
- if (op == ':' && child) {
- xbc_init_node(child, v, XBC_VALUE);
- } else if (!xbc_add_sibling(v, XBC_VALUE))
+ child = xbc_node_get_child(last_parent);
+ if (child && xbc_node_is_value(child)) {
+ if (op == '=')
+ return xbc_parse_error("Value is redefined", v);
+ if (op == ':') {
+ unsigned short nidx = child->next;
+
+ xbc_init_node(child, v, XBC_VALUE);
+ child->next = nidx; /* keep subkeys */
+ goto array;
+ }
+ /* op must be '+' */
+ last_parent = xbc_last_child(child);
+ }
+ /* The value node should always be the first child */
+ if (!xbc_add_head_sibling(v, XBC_VALUE))
return -ENOMEM;
+array:
if (c == ',') { /* Array */
c = xbc_parse_array(&next);
if (c < 0)
diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c
index 018bfc8113c4..a72a2c16066e 100644
--- a/lib/cmdline_kunit.c
+++ b/lib/cmdline_kunit.c
@@ -124,7 +124,7 @@ static void cmdline_do_one_range_test(struct kunit *test, const char *in,
n, e[0], r[0]);
p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0]));
- KUNIT_EXPECT_PTR_EQ_MSG(test, p, (int *)0, "in test %u at %u out of bound", n, p - r);
+ KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %u out of bound", n, p - r);
}
static void cmdline_test_range(struct kunit *test)
diff --git a/lib/crc64.c b/lib/crc64.c
index 47cfa054827f..9f852a89ee2a 100644
--- a/lib/crc64.c
+++ b/lib/crc64.c
@@ -37,7 +37,7 @@ MODULE_LICENSE("GPL v2");
/**
* crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
* @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
- or the previous crc64 value if computing incrementally.
+ * or the previous crc64 value if computing incrementally.
* @p: pointer to buffer over which CRC64 is run
* @len: length of buffer @p
*/
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 06d3135bd184..a75ee30b77cb 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -36,7 +36,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
/*
* Generic 'turn off all lock debugging' function:
*/
-noinstr int debug_locks_off(void)
+int debug_locks_off(void)
{
if (debug_locks && __debug_locks_off()) {
if (!debug_locks_silent) {
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index c72c865032fa..3518e7394eca 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -80,7 +80,7 @@
/* This is what we know about each Huffman coding group */
struct group_data {
- /* We have an extra slot at the end of limit[] for a sentinal value. */
+ /* We have an extra slot at the end of limit[] for a sentinel value. */
int limit[MAX_HUFCODE_BITS+1];
int base[MAX_HUFCODE_BITS];
int permute[MAX_SYMBOLS];
@@ -337,7 +337,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
pp <<= 1;
base[i+1] = pp-(t += temp[i]);
}
- limit[maxLen+1] = INT_MAX; /* Sentinal value for
+ limit[maxLen+1] = INT_MAX; /* Sentinel value for
* reading next sym. */
limit[maxLen] = pp+temp[maxLen]-1;
base[minLen] = 0;
@@ -385,7 +385,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
bd->inbufBits =
(bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
bd->inbufBitCount += 8;
- };
+ }
bd->inbufBitCount -= hufGroup->maxLen;
j = (bd->inbufBits >> bd->inbufBitCount)&
((1 << hufGroup->maxLen)-1);
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index c0cfcfd486be..e6327391b6b6 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -112,6 +112,9 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
error("data corrupted");
goto exit_2;
}
+ } else if (size < 4) {
+ /* empty or end-of-file */
+ goto exit_3;
}
chunksize = get_unaligned_le32(inp);
@@ -125,6 +128,10 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
continue;
}
+ if (!fill && chunksize == 0) {
+ /* empty or end-of-file */
+ goto exit_3;
+ }
if (posp)
*posp += 4;
@@ -184,6 +191,7 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
}
}
+exit_3:
ret = 0;
exit_2:
if (!input)
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index 1f439a622076..64c1358500ce 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -43,7 +43,6 @@ STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len)
int l;
u8 *parse = input;
u8 *end = input + in_len;
- u8 level = 0;
u16 version;
/*
@@ -65,7 +64,7 @@ STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len)
version = get_unaligned_be16(parse);
parse += 7;
if (version >= 0x0940)
- level = *parse++;
+ parse++;
if (get_unaligned_be32(parse) & HEADER_HAS_FILTER)
parse += 8; /* flags + filter info */
else
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
index 25d59a95bd66..a2f38e23004a 100644
--- a/lib/decompress_unxz.c
+++ b/lib/decompress_unxz.c
@@ -23,7 +23,7 @@
* uncompressible. Thus, we must look for worst-case expansion when the
* compressor is encoding uncompressible data.
*
- * The structure of the .xz file in case of a compresed kernel is as follows.
+ * The structure of the .xz file in case of a compressed kernel is as follows.
* Sizes (as bytes) of the fields are in parenthesis.
*
* Stream Header (12)
diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c
index 790abc472f5b..6b629ab31c1e 100644
--- a/lib/decompress_unzstd.c
+++ b/lib/decompress_unzstd.c
@@ -16,7 +16,7 @@
* uncompressible. Thus, we must look for worst-case expansion when the
* compressor is encoding uncompressible data.
*
- * The structure of the .zst file in case of a compresed kernel is as follows.
+ * The structure of the .zst file in case of a compressed kernel is as follows.
* Maximum sizes (as bytes) of the fields are in parenthesis.
*
* Frame Header: (18)
@@ -56,7 +56,7 @@
/*
* Preboot environments #include "path/to/decompress_unzstd.c".
* All of the source files we depend on must be #included.
- * zstd's only source dependeny is xxhash, which has no source
+ * zstd's only source dependency is xxhash, which has no source
* dependencies.
*
* When UNZSTD_PREBOOT is defined we declare __decompress(), which is
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index f5a33b6f773f..27f16872320d 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -73,10 +73,10 @@ void show_regs_print_info(const char *log_lvl)
dump_stack_print_info(log_lvl);
}
-static void __dump_stack(void)
+static void __dump_stack(const char *log_lvl)
{
- dump_stack_print_info(KERN_DEFAULT);
- show_stack(NULL, NULL, KERN_DEFAULT);
+ dump_stack_print_info(log_lvl);
+ show_stack(NULL, NULL, log_lvl);
}
/**
@@ -84,50 +84,22 @@ static void __dump_stack(void)
*
* Architectures can override this implementation by implementing its own.
*/
-#ifdef CONFIG_SMP
-static atomic_t dump_lock = ATOMIC_INIT(-1);
-
-asmlinkage __visible void dump_stack(void)
+asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
{
unsigned long flags;
- int was_locked;
- int old;
- int cpu;
/*
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
-retry:
- local_irq_save(flags);
- cpu = smp_processor_id();
- old = atomic_cmpxchg(&dump_lock, -1, cpu);
- if (old == -1) {
- was_locked = 0;
- } else if (old == cpu) {
- was_locked = 1;
- } else {
- local_irq_restore(flags);
- /*
- * Wait for the lock to release before jumping to
- * atomic_cmpxchg() in order to mitigate the thundering herd
- * problem.
- */
- do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
- goto retry;
- }
-
- __dump_stack();
-
- if (!was_locked)
- atomic_set(&dump_lock, -1);
-
- local_irq_restore(flags);
+ printk_cpu_lock_irqsave(flags);
+ __dump_stack(log_lvl);
+ printk_cpu_unlock_irqrestore(flags);
}
-#else
+EXPORT_SYMBOL(dump_stack_lvl);
+
asmlinkage __visible void dump_stack(void)
{
- __dump_stack();
+ dump_stack_lvl(KERN_DEFAULT);
}
-#endif
EXPORT_SYMBOL(dump_stack);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 921d0a654243..641767b0dce2 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -586,13 +586,11 @@ static int remaining(int wrote)
return 0;
}
-static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
+static char *__dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
{
int pos_after_tid;
int pos = 0;
- *buf = '\0';
-
if (desc->flags & _DPRINTK_FLAGS_INCL_TID) {
if (in_interrupt())
pos += snprintf(buf + pos, remaining(pos), "<intr> ");
@@ -618,11 +616,18 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
return buf;
}
+static inline char *dynamic_emit_prefix(struct _ddebug *desc, char *buf)
+{
+ if (unlikely(desc->flags & _DPRINTK_FLAGS_INCL_ANY))
+ return __dynamic_emit_prefix(desc, buf);
+ return buf;
+}
+
void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
{
va_list args;
struct va_format vaf;
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
BUG_ON(!descriptor);
BUG_ON(!fmt);
@@ -655,7 +660,7 @@ void __dynamic_dev_dbg(struct _ddebug *descriptor,
if (!dev) {
printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
} else {
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
dev_printk_emit(LOGLEVEL_DEBUG, dev, "%s%s %s: %pV",
dynamic_emit_prefix(descriptor, buf),
@@ -684,7 +689,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
vaf.va = &args;
if (dev && dev->dev.parent) {
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
dev_printk_emit(LOGLEVEL_DEBUG, dev->dev.parent,
"%s%s %s %s%s: %pV",
@@ -720,7 +725,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
vaf.va = &args;
if (ibdev && ibdev->dev.parent) {
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
dev_printk_emit(LOGLEVEL_DEBUG, ibdev->dev.parent,
"%s%s %s %s: %pV",
@@ -915,7 +920,6 @@ static const struct seq_operations ddebug_proc_seqops = {
static int ddebug_proc_open(struct inode *inode, struct file *file)
{
- vpr_info("called\n");
return seq_open_private(file, &ddebug_proc_seqops,
sizeof(struct ddebug_iter));
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index c701b7a187f2..e23123ae3a13 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -16,170 +16,137 @@
#define PIPE_PARANOIA /* for now */
-#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
- size_t left; \
- size_t wanted = n; \
- __p = i->iov; \
- __v.iov_len = min(n, __p->iov_len - skip); \
- if (likely(__v.iov_len)) { \
- __v.iov_base = __p->iov_base + skip; \
- left = (STEP); \
- __v.iov_len -= left; \
- skip += __v.iov_len; \
- n -= __v.iov_len; \
- } else { \
- left = 0; \
- } \
- while (unlikely(!left && n)) { \
- __p++; \
- __v.iov_len = min(n, __p->iov_len); \
- if (unlikely(!__v.iov_len)) \
- continue; \
- __v.iov_base = __p->iov_base; \
- left = (STEP); \
- __v.iov_len -= left; \
- skip = __v.iov_len; \
- n -= __v.iov_len; \
- } \
- n = wanted - n; \
-}
-
-#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
- size_t wanted = n; \
- __p = i->kvec; \
- __v.iov_len = min(n, __p->iov_len - skip); \
- if (likely(__v.iov_len)) { \
- __v.iov_base = __p->iov_base + skip; \
- (void)(STEP); \
- skip += __v.iov_len; \
- n -= __v.iov_len; \
- } \
- while (unlikely(n)) { \
- __p++; \
- __v.iov_len = min(n, __p->iov_len); \
- if (unlikely(!__v.iov_len)) \
- continue; \
- __v.iov_base = __p->iov_base; \
- (void)(STEP); \
- skip = __v.iov_len; \
- n -= __v.iov_len; \
- } \
- n = wanted; \
-}
-
-#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
- struct bvec_iter __start; \
- __start.bi_size = n; \
- __start.bi_bvec_done = skip; \
- __start.bi_idx = 0; \
- for_each_bvec(__v, i->bvec, __bi, __start) { \
- (void)(STEP); \
- } \
-}
-
-#define iterate_xarray(i, n, __v, skip, STEP) { \
+/* covers iovec and kvec alike */
+#define iterate_iovec(i, n, base, len, off, __p, STEP) { \
+ size_t off = 0; \
+ size_t skip = i->iov_offset; \
+ do { \
+ len = min(n, __p->iov_len - skip); \
+ if (likely(len)) { \
+ base = __p->iov_base + skip; \
+ len -= (STEP); \
+ off += len; \
+ skip += len; \
+ n -= len; \
+ if (skip < __p->iov_len) \
+ break; \
+ } \
+ __p++; \
+ skip = 0; \
+ } while (n); \
+ i->iov_offset = skip; \
+ n = off; \
+}
+
+#define iterate_bvec(i, n, base, len, off, p, STEP) { \
+ size_t off = 0; \
+ unsigned skip = i->iov_offset; \
+ while (n) { \
+ unsigned offset = p->bv_offset + skip; \
+ unsigned left; \
+ void *kaddr = kmap_local_page(p->bv_page + \
+ offset / PAGE_SIZE); \
+ base = kaddr + offset % PAGE_SIZE; \
+ len = min(min(n, (size_t)(p->bv_len - skip)), \
+ (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
+ left = (STEP); \
+ kunmap_local(kaddr); \
+ len -= left; \
+ off += len; \
+ skip += len; \
+ if (skip == p->bv_len) { \
+ skip = 0; \
+ p++; \
+ } \
+ n -= len; \
+ if (left) \
+ break; \
+ } \
+ i->iov_offset = skip; \
+ n = off; \
+}
+
+#define iterate_xarray(i, n, base, len, __off, STEP) { \
+ __label__ __out; \
+ size_t __off = 0; \
struct page *head = NULL; \
- size_t wanted = n, seg, offset; \
- loff_t start = i->xarray_start + skip; \
- pgoff_t index = start >> PAGE_SHIFT; \
+ loff_t start = i->xarray_start + i->iov_offset; \
+ unsigned offset = start % PAGE_SIZE; \
+ pgoff_t index = start / PAGE_SIZE; \
int j; \
\
XA_STATE(xas, i->xarray, index); \
\
- rcu_read_lock(); \
- xas_for_each(&xas, head, ULONG_MAX) { \
- if (xas_retry(&xas, head)) \
- continue; \
- if (WARN_ON(xa_is_value(head))) \
- break; \
- if (WARN_ON(PageHuge(head))) \
- break; \
+ rcu_read_lock(); \
+ xas_for_each(&xas, head, ULONG_MAX) { \
+ unsigned left; \
+ if (xas_retry(&xas, head)) \
+ continue; \
+ if (WARN_ON(xa_is_value(head))) \
+ break; \
+ if (WARN_ON(PageHuge(head))) \
+ break; \
for (j = (head->index < index) ? index - head->index : 0; \
- j < thp_nr_pages(head); j++) { \
- __v.bv_page = head + j; \
- offset = (i->xarray_start + skip) & ~PAGE_MASK; \
- seg = PAGE_SIZE - offset; \
- __v.bv_offset = offset; \
- __v.bv_len = min(n, seg); \
- (void)(STEP); \
- n -= __v.bv_len; \
- skip += __v.bv_len; \
- if (n == 0) \
- break; \
- } \
- if (n == 0) \
- break; \
- } \
- rcu_read_unlock(); \
- n = wanted - n; \
-}
-
-#define iterate_all_kinds(i, n, v, I, B, K, X) { \
- if (likely(n)) { \
- size_t skip = i->iov_offset; \
- if (unlikely(i->type & ITER_BVEC)) { \
- struct bio_vec v; \
- struct bvec_iter __bi; \
- iterate_bvec(i, n, v, __bi, skip, (B)) \
- } else if (unlikely(i->type & ITER_KVEC)) { \
- const struct kvec *kvec; \
- struct kvec v; \
- iterate_kvec(i, n, v, kvec, skip, (K)) \
- } else if (unlikely(i->type & ITER_DISCARD)) { \
- } else if (unlikely(i->type & ITER_XARRAY)) { \
- struct bio_vec v; \
- iterate_xarray(i, n, v, skip, (X)); \
- } else { \
- const struct iovec *iov; \
- struct iovec v; \
- iterate_iovec(i, n, v, iov, skip, (I)) \
+ j < thp_nr_pages(head); j++) { \
+ void *kaddr = kmap_local_page(head + j); \
+ base = kaddr + offset; \
+ len = PAGE_SIZE - offset; \
+ len = min(n, len); \
+ left = (STEP); \
+ kunmap_local(kaddr); \
+ len -= left; \
+ __off += len; \
+ n -= len; \
+ if (left || n == 0) \
+ goto __out; \
+ offset = 0; \
} \
} \
+__out: \
+ rcu_read_unlock(); \
+ i->iov_offset += __off; \
+ n = __off; \
}
-#define iterate_and_advance(i, n, v, I, B, K, X) { \
+#define __iterate_and_advance(i, n, base, len, off, I, K) { \
if (unlikely(i->count < n)) \
n = i->count; \
- if (i->count) { \
- size_t skip = i->iov_offset; \
- if (unlikely(i->type & ITER_BVEC)) { \
+ if (likely(n)) { \
+ if (likely(iter_is_iovec(i))) { \
+ const struct iovec *iov = i->iov; \
+ void __user *base; \
+ size_t len; \
+ iterate_iovec(i, n, base, len, off, \
+ iov, (I)) \
+ i->nr_segs -= iov - i->iov; \
+ i->iov = iov; \
+ } else if (iov_iter_is_bvec(i)) { \
const struct bio_vec *bvec = i->bvec; \
- struct bio_vec v; \
- struct bvec_iter __bi; \
- iterate_bvec(i, n, v, __bi, skip, (B)) \
- i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
- i->nr_segs -= i->bvec - bvec; \
- skip = __bi.bi_bvec_done; \
- } else if (unlikely(i->type & ITER_KVEC)) { \
- const struct kvec *kvec; \
- struct kvec v; \
- iterate_kvec(i, n, v, kvec, skip, (K)) \
- if (skip == kvec->iov_len) { \
- kvec++; \
- skip = 0; \
- } \
+ void *base; \
+ size_t len; \
+ iterate_bvec(i, n, base, len, off, \
+ bvec, (K)) \
+ i->nr_segs -= bvec - i->bvec; \
+ i->bvec = bvec; \
+ } else if (iov_iter_is_kvec(i)) { \
+ const struct kvec *kvec = i->kvec; \
+ void *base; \
+ size_t len; \
+ iterate_iovec(i, n, base, len, off, \
+ kvec, (K)) \
i->nr_segs -= kvec - i->kvec; \
i->kvec = kvec; \
- } else if (unlikely(i->type & ITER_DISCARD)) { \
- skip += n; \
- } else if (unlikely(i->type & ITER_XARRAY)) { \
- struct bio_vec v; \
- iterate_xarray(i, n, v, skip, (X)) \
- } else { \
- const struct iovec *iov; \
- struct iovec v; \
- iterate_iovec(i, n, v, iov, skip, (I)) \
- if (skip == iov->iov_len) { \
- iov++; \
- skip = 0; \
- } \
- i->nr_segs -= iov - i->iov; \
- i->iov = iov; \
+ } else if (iov_iter_is_xarray(i)) { \
+ void *base; \
+ size_t len; \
+ iterate_xarray(i, n, base, len, off, \
+ (K)) \
} \
i->count -= n; \
- i->iov_offset = skip; \
} \
}
+#define iterate_and_advance(i, n, base, len, off, I, K) \
+ __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
static int copyout(void __user *to, const void *from, size_t n)
{
@@ -469,19 +436,25 @@ out:
* Return 0 on success, or non-zero if the memory could not be accessed (i.e.
* because it is an invalid address).
*/
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes)
{
- size_t skip = i->iov_offset;
- const struct iovec *iov;
- int err;
- struct iovec v;
+ if (iter_is_iovec(i)) {
+ const struct iovec *p;
+ size_t skip;
- if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
- iterate_iovec(i, bytes, v, iov, skip, ({
- err = fault_in_pages_readable(v.iov_base, v.iov_len);
+ if (bytes > i->count)
+ bytes = i->count;
+ for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) {
+ size_t len = min(bytes, p->iov_len - skip);
+ int err;
+
+ if (unlikely(!len))
+ continue;
+ err = fault_in_pages_readable(p->iov_base + skip, len);
if (unlikely(err))
- return err;
- 0;}))
+ return err;
+ bytes -= len;
+ }
}
return 0;
}
@@ -492,19 +465,14 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
- direction &= READ | WRITE;
-
- /* It will get better. Eventually... */
- if (uaccess_kernel()) {
- i->type = ITER_KVEC | direction;
- i->kvec = (struct kvec *)iov;
- } else {
- i->type = ITER_IOVEC | direction;
- i->iov = iov;
- }
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
+ *i = (struct iov_iter) {
+ .iter_type = ITER_IOVEC,
+ .data_source = direction,
+ .iov = iov,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_init);
@@ -613,55 +581,45 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
}
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
- struct csum_state *csstate,
- struct iov_iter *i)
+ struct iov_iter *i, __wsum *sump)
{
struct pipe_inode_info *pipe = i->pipe;
unsigned int p_mask = pipe->ring_size - 1;
- __wsum sum = csstate->csum;
- size_t off = csstate->off;
+ __wsum sum = *sump;
+ size_t off = 0;
unsigned int i_head;
- size_t n, r;
+ size_t r;
if (!sanity(i))
return 0;
- bytes = n = push_pipe(i, bytes, &i_head, &r);
- if (unlikely(!n))
- return 0;
- do {
- size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
- char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
- sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
- kunmap_atomic(p);
+ bytes = push_pipe(i, bytes, &i_head, &r);
+ while (bytes) {
+ size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r);
+ char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
+ sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
+ kunmap_local(p);
i->head = i_head;
i->iov_offset = r + chunk;
- n -= chunk;
+ bytes -= chunk;
off += chunk;
- addr += chunk;
r = 0;
i_head++;
- } while (n);
- i->count -= bytes;
- csstate->csum = sum;
- csstate->off = off;
- return bytes;
+ }
+ *sump = sum;
+ i->count -= off;
+ return off;
}
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
- const char *from = addr;
if (unlikely(iov_iter_is_pipe(i)))
return copy_pipe_to_iter(addr, bytes, i);
if (iter_is_iovec(i))
might_fault();
- iterate_and_advance(i, bytes, v,
- copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
- memcpy_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len),
- memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
- memcpy_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ copyout(base, addr + off, len),
+ memcpy(base, addr + off, len)
)
return bytes;
@@ -678,19 +636,6 @@ static int copyout_mc(void __user *to, const void *from, size_t n)
return n;
}
-static unsigned long copy_mc_to_page(struct page *page, size_t offset,
- const char *from, size_t len)
-{
- unsigned long ret;
- char *to;
-
- to = kmap_atomic(page);
- ret = copy_mc_to_kernel(to + offset, from, len);
- kunmap_atomic(to);
-
- return ret;
-}
-
static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
struct iov_iter *i)
{
@@ -702,25 +647,23 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
if (!sanity(i))
return 0;
- bytes = n = push_pipe(i, bytes, &i_head, &off);
- if (unlikely(!n))
- return 0;
- do {
+ n = push_pipe(i, bytes, &i_head, &off);
+ while (n) {
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
+ char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
unsigned long rem;
-
- rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
- off, addr, chunk);
+ rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
+ chunk -= rem;
+ kunmap_local(p);
i->head = i_head;
- i->iov_offset = off + chunk - rem;
- xfer += chunk - rem;
+ i->iov_offset = off + chunk;
+ xfer += chunk;
if (rem)
break;
n -= chunk;
- addr += chunk;
off = 0;
i_head++;
- } while (n);
+ }
i->count -= xfer;
return xfer;
}
@@ -750,46 +693,13 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
*/
size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
- const char *from = addr;
- unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
-
if (unlikely(iov_iter_is_pipe(i)))
return copy_mc_pipe_to_iter(addr, bytes, i);
if (iter_is_iovec(i))
might_fault();
- iterate_and_advance(i, bytes, v,
- copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
- v.iov_len),
- ({
- rem = copy_mc_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len);
- if (rem) {
- curr_addr = (unsigned long) from;
- bytes = curr_addr - s_addr - rem;
- return bytes;
- }
- }),
- ({
- rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
- - v.iov_len, v.iov_len);
- if (rem) {
- curr_addr = (unsigned long) from;
- bytes = curr_addr - s_addr - rem;
- return bytes;
- }
- }),
- ({
- rem = copy_mc_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len);
- if (rem) {
- curr_addr = (unsigned long) from;
- bytes = curr_addr - s_addr - rem;
- rcu_read_unlock();
- i->iov_offset += bytes;
- i->count -= bytes;
- return bytes;
- }
- })
+ __iterate_and_advance(i, bytes, base, len, off,
+ copyout_mc(base, addr + off, len),
+ copy_mc_to_kernel(base, addr + off, len)
)
return bytes;
@@ -799,70 +709,30 @@ EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- char *to = addr;
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
if (iter_is_iovec(i))
might_fault();
- iterate_and_advance(i, bytes, v,
- copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ copyin(addr + off, base, len),
+ memcpy(addr + off, base, len)
)
return bytes;
}
EXPORT_SYMBOL(_copy_from_iter);
-bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
-{
- char *to = addr;
- if (unlikely(iov_iter_is_pipe(i))) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
-
- if (iter_is_iovec(i))
- might_fault();
- iterate_all_kinds(i, bytes, v, ({
- if (copyin((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len))
- return false;
- 0;}),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
- )
-
- iov_iter_advance(i, bytes);
- return true;
-}
-EXPORT_SYMBOL(_copy_from_iter_full);
-
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
- char *to = addr;
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
- iterate_and_advance(i, bytes, v,
- __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ __copy_from_user_inatomic_nocache(addr + off, base, len),
+ memcpy(addr + off, base, len)
)
return bytes;
@@ -886,20 +756,13 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
*/
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
- char *to = addr;
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
- iterate_and_advance(i, bytes, v,
- __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len),
- memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
- v.iov_len),
- memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ __copy_from_user_flushcache(addr + off, base, len),
+ memcpy_flushcache(addr + off, base, len)
)
return bytes;
@@ -907,32 +770,6 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif
-bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
-{
- char *to = addr;
- if (unlikely(iov_iter_is_pipe(i))) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
- iterate_all_kinds(i, bytes, v, ({
- if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len))
- return false;
- 0;}),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
- )
-
- iov_iter_advance(i, bytes);
- return true;
-}
-EXPORT_SYMBOL(_copy_from_iter_full_nocache);
-
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{
struct page *head;
@@ -957,22 +794,51 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
return false;
}
+static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i)
+{
+ if (likely(iter_is_iovec(i)))
+ return copy_page_to_iter_iovec(page, offset, bytes, i);
+ if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
+ void *kaddr = kmap_local_page(page);
+ size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
+ kunmap_local(kaddr);
+ return wanted;
+ }
+ if (iov_iter_is_pipe(i))
+ return copy_page_to_iter_pipe(page, offset, bytes, i);
+ if (unlikely(iov_iter_is_discard(i))) {
+ if (unlikely(i->count < bytes))
+ bytes = i->count;
+ i->count -= bytes;
+ return bytes;
+ }
+ WARN_ON(1);
+ return 0;
+}
+
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
+ size_t res = 0;
if (unlikely(!page_copy_sane(page, offset, bytes)))
return 0;
- if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
- void *kaddr = kmap_atomic(page);
- size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
- kunmap_atomic(kaddr);
- return wanted;
- } else if (unlikely(iov_iter_is_discard(i)))
- return bytes;
- else if (likely(!iov_iter_is_pipe(i)))
- return copy_page_to_iter_iovec(page, offset, bytes, i);
- else
- return copy_page_to_iter_pipe(page, offset, bytes, i);
+ page += offset / PAGE_SIZE; // first subpage
+ offset %= PAGE_SIZE;
+ while (1) {
+ size_t n = __copy_page_to_iter(page, offset,
+ min(bytes, (size_t)PAGE_SIZE - offset), i);
+ res += n;
+ bytes -= n;
+ if (!bytes || !n)
+ break;
+ offset += n;
+ if (offset == PAGE_SIZE) {
+ page++;
+ offset = 0;
+ }
+ }
+ return res;
}
EXPORT_SYMBOL(copy_page_to_iter);
@@ -981,17 +847,16 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
{
if (unlikely(!page_copy_sane(page, offset, bytes)))
return 0;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- WARN_ON(1);
- return 0;
- }
- if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
- void *kaddr = kmap_atomic(page);
+ if (likely(iter_is_iovec(i)))
+ return copy_page_from_iter_iovec(page, offset, bytes, i);
+ if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
+ void *kaddr = kmap_local_page(page);
size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
return wanted;
- } else
- return copy_page_from_iter_iovec(page, offset, bytes, i);
+ }
+ WARN_ON(1);
+ return 0;
}
EXPORT_SYMBOL(copy_page_from_iter);
@@ -1011,7 +876,9 @@ static size_t pipe_zero(size_t bytes, struct iov_iter *i)
do {
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
- memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
+ char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
+ memset(p + off, 0, chunk);
+ kunmap_local(p);
i->head = i_head;
i->iov_offset = off + chunk;
n -= chunk;
@@ -1026,19 +893,17 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
{
if (unlikely(iov_iter_is_pipe(i)))
return pipe_zero(bytes, i);
- iterate_and_advance(i, bytes, v,
- clear_user(v.iov_base, v.iov_len),
- memzero_page(v.bv_page, v.bv_offset, v.bv_len),
- memset(v.iov_base, 0, v.iov_len),
- memzero_page(v.bv_page, v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, count,
+ clear_user(base, len),
+ memset(base, 0, len)
)
return bytes;
}
EXPORT_SYMBOL(iov_iter_zero);
-size_t iov_iter_copy_from_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes)
+size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
+ struct iov_iter *i)
{
char *kaddr = kmap_atomic(page), *p = kaddr + offset;
if (unlikely(!page_copy_sane(page, offset, bytes))) {
@@ -1050,18 +915,14 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
WARN_ON(1);
return 0;
}
- iterate_all_kinds(i, bytes, v,
- copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ copyin(p + off, base, len),
+ memcpy(p + off, base, len)
)
kunmap_atomic(kaddr);
return bytes;
}
-EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+EXPORT_SYMBOL(copy_page_from_iter_atomic);
static inline void pipe_truncate(struct iov_iter *i)
{
@@ -1092,8 +953,6 @@ static inline void pipe_truncate(struct iov_iter *i)
static void pipe_advance(struct iov_iter *i, size_t size)
{
struct pipe_inode_info *pipe = i->pipe;
- if (unlikely(i->count < size))
- size = i->count;
if (size) {
struct pipe_buffer *buf;
unsigned int p_mask = pipe->ring_size - 1;
@@ -1132,27 +991,42 @@ static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
i->iov_offset = bi.bi_bvec_done;
}
-void iov_iter_advance(struct iov_iter *i, size_t size)
+static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
{
- if (unlikely(iov_iter_is_pipe(i))) {
- pipe_advance(i, size);
- return;
- }
- if (unlikely(iov_iter_is_discard(i))) {
- i->count -= size;
+ const struct iovec *iov, *end;
+
+ if (!i->count)
return;
+ i->count -= size;
+
+ size += i->iov_offset; // from beginning of current segment
+ for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
+ if (likely(size < iov->iov_len))
+ break;
+ size -= iov->iov_len;
}
- if (unlikely(iov_iter_is_xarray(i))) {
- size = min(size, i->count);
+ i->iov_offset = size;
+ i->nr_segs -= iov - i->iov;
+ i->iov = iov;
+}
+
+void iov_iter_advance(struct iov_iter *i, size_t size)
+{
+ if (unlikely(i->count < size))
+ size = i->count;
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
+ /* iovec and kvec have identical layouts */
+ iov_iter_iovec_advance(i, size);
+ } else if (iov_iter_is_bvec(i)) {
+ iov_iter_bvec_advance(i, size);
+ } else if (iov_iter_is_pipe(i)) {
+ pipe_advance(i, size);
+ } else if (unlikely(iov_iter_is_xarray(i))) {
i->iov_offset += size;
i->count -= size;
- return;
- }
- if (iov_iter_is_bvec(i)) {
- iov_iter_bvec_advance(i, size);
- return;
+ } else if (iov_iter_is_discard(i)) {
+ i->count -= size;
}
- iterate_and_advance(i, size, v, 0, 0, 0, 0)
}
EXPORT_SYMBOL(iov_iter_advance);
@@ -1234,16 +1108,13 @@ EXPORT_SYMBOL(iov_iter_revert);
*/
size_t iov_iter_single_seg_count(const struct iov_iter *i)
{
- if (unlikely(iov_iter_is_pipe(i)))
- return i->count; // it is a silly place, anyway
- if (i->nr_segs == 1)
- return i->count;
- if (unlikely(iov_iter_is_discard(i) || iov_iter_is_xarray(i)))
- return i->count;
- if (iov_iter_is_bvec(i))
- return min(i->count, i->bvec->bv_len - i->iov_offset);
- else
- return min(i->count, i->iov->iov_len - i->iov_offset);
+ if (i->nr_segs > 1) {
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return min(i->count, i->iov->iov_len - i->iov_offset);
+ if (iov_iter_is_bvec(i))
+ return min(i->count, i->bvec->bv_len - i->iov_offset);
+ }
+ return i->count;
}
EXPORT_SYMBOL(iov_iter_single_seg_count);
@@ -1252,11 +1123,14 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
- i->type = ITER_KVEC | (direction & (READ | WRITE));
- i->kvec = kvec;
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
+ *i = (struct iov_iter){
+ .iter_type = ITER_KVEC,
+ .data_source = direction,
+ .kvec = kvec,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_kvec);
@@ -1265,11 +1139,14 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
- i->type = ITER_BVEC | (direction & (READ | WRITE));
- i->bvec = bvec;
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
+ *i = (struct iov_iter){
+ .iter_type = ITER_BVEC,
+ .data_source = direction,
+ .bvec = bvec,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_bvec);
@@ -1279,12 +1156,15 @@ void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
{
BUG_ON(direction != READ);
WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
- i->type = ITER_PIPE | READ;
- i->pipe = pipe;
- i->head = pipe->head;
- i->iov_offset = 0;
- i->count = count;
- i->start_head = i->head;
+ *i = (struct iov_iter){
+ .iter_type = ITER_PIPE,
+ .data_source = false,
+ .pipe = pipe,
+ .head = pipe->head,
+ .start_head = pipe->head,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_pipe);
@@ -1305,11 +1185,14 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
struct xarray *xarray, loff_t start, size_t count)
{
BUG_ON(direction & ~1);
- i->type = ITER_XARRAY | (direction & (READ | WRITE));
- i->xarray = xarray;
- i->xarray_start = start;
- i->count = count;
- i->iov_offset = 0;
+ *i = (struct iov_iter) {
+ .iter_type = ITER_XARRAY,
+ .data_source = direction,
+ .xarray = xarray,
+ .xarray_start = start,
+ .count = count,
+ .iov_offset = 0
+ };
}
EXPORT_SYMBOL(iov_iter_xarray);
@@ -1325,56 +1208,103 @@ EXPORT_SYMBOL(iov_iter_xarray);
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
{
BUG_ON(direction != READ);
- i->type = ITER_DISCARD | READ;
- i->count = count;
- i->iov_offset = 0;
+ *i = (struct iov_iter){
+ .iter_type = ITER_DISCARD,
+ .data_source = false,
+ .count = count,
+ .iov_offset = 0
+ };
}
EXPORT_SYMBOL(iov_iter_discard);
-unsigned long iov_iter_alignment(const struct iov_iter *i)
+static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
{
unsigned long res = 0;
size_t size = i->count;
+ size_t skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ size_t len = i->iov[k].iov_len - skip;
+ if (len) {
+ res |= (unsigned long)i->iov[k].iov_base + skip;
+ if (len > size)
+ len = size;
+ res |= len;
+ size -= len;
+ if (!size)
+ break;
+ }
+ }
+ return res;
+}
- if (unlikely(iov_iter_is_pipe(i))) {
+static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
+{
+ unsigned res = 0;
+ size_t size = i->count;
+ unsigned skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ size_t len = i->bvec[k].bv_len - skip;
+ res |= (unsigned long)i->bvec[k].bv_offset + skip;
+ if (len > size)
+ len = size;
+ res |= len;
+ size -= len;
+ if (!size)
+ break;
+ }
+ return res;
+}
+
+unsigned long iov_iter_alignment(const struct iov_iter *i)
+{
+ /* iovec and kvec have identical layouts */
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return iov_iter_alignment_iovec(i);
+
+ if (iov_iter_is_bvec(i))
+ return iov_iter_alignment_bvec(i);
+
+ if (iov_iter_is_pipe(i)) {
unsigned int p_mask = i->pipe->ring_size - 1;
+ size_t size = i->count;
if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
return size | i->iov_offset;
return size;
}
- if (unlikely(iov_iter_is_xarray(i)))
+
+ if (iov_iter_is_xarray(i))
return (i->xarray_start + i->iov_offset) | i->count;
- iterate_all_kinds(i, size, v,
- (res |= (unsigned long)v.iov_base | v.iov_len, 0),
- res |= v.bv_offset | v.bv_len,
- res |= (unsigned long)v.iov_base | v.iov_len,
- res |= v.bv_offset | v.bv_len
- )
- return res;
+
+ return 0;
}
EXPORT_SYMBOL(iov_iter_alignment);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
{
unsigned long res = 0;
+ unsigned long v = 0;
size_t size = i->count;
+ unsigned k;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- WARN_ON(1);
+ if (WARN_ON(!iter_is_iovec(i)))
return ~0U;
- }
- iterate_all_kinds(i, size, v,
- (res |= (!res ? 0 : (unsigned long)v.iov_base) |
- (size != v.iov_len ? size : 0), 0),
- (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
- (size != v.bv_len ? size : 0)),
- (res |= (!res ? 0 : (unsigned long)v.iov_base) |
- (size != v.iov_len ? size : 0)),
- (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
- (size != v.bv_len ? size : 0))
- );
+ for (k = 0; k < i->nr_segs; k++) {
+ if (i->iov[k].iov_len) {
+ unsigned long base = (unsigned long)i->iov[k].iov_base;
+ if (v) // if not the first one
+ res |= base | v; // this start | previous end
+ v = base + i->iov[k].iov_len;
+ if (size <= i->iov[k].iov_len)
+ break;
+ size -= i->iov[k].iov_len;
+ }
+ }
return res;
}
EXPORT_SYMBOL(iov_iter_gap_alignment);
@@ -1409,9 +1339,6 @@ static ssize_t pipe_get_pages(struct iov_iter *i,
unsigned int iter_head, npages;
size_t capacity;
- if (!maxsize)
- return 0;
-
if (!sanity(i))
return -EFAULT;
@@ -1492,29 +1419,67 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i,
return actual;
}
+/* must be done on non-empty ITER_IOVEC one */
+static unsigned long first_iovec_segment(const struct iov_iter *i,
+ size_t *size, size_t *start,
+ size_t maxsize, unsigned maxpages)
+{
+ size_t skip;
+ long k;
+
+ for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
+ unsigned long addr = (unsigned long)i->iov[k].iov_base + skip;
+ size_t len = i->iov[k].iov_len - skip;
+
+ if (unlikely(!len))
+ continue;
+ if (len > maxsize)
+ len = maxsize;
+ len += (*start = addr % PAGE_SIZE);
+ if (len > maxpages * PAGE_SIZE)
+ len = maxpages * PAGE_SIZE;
+ *size = len;
+ return addr & PAGE_MASK;
+ }
+ BUG(); // if it had been empty, we wouldn't get called
+}
+
+/* must be done on non-empty ITER_BVEC one */
+static struct page *first_bvec_segment(const struct iov_iter *i,
+ size_t *size, size_t *start,
+ size_t maxsize, unsigned maxpages)
+{
+ struct page *page;
+ size_t skip = i->iov_offset, len;
+
+ len = i->bvec->bv_len - skip;
+ if (len > maxsize)
+ len = maxsize;
+ skip += i->bvec->bv_offset;
+ page = i->bvec->bv_page + skip / PAGE_SIZE;
+ len += (*start = skip % PAGE_SIZE);
+ if (len > maxpages * PAGE_SIZE)
+ len = maxpages * PAGE_SIZE;
+ *size = len;
+ return page;
+}
+
ssize_t iov_iter_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
size_t *start)
{
+ size_t len;
+ int n, res;
+
if (maxsize > i->count)
maxsize = i->count;
+ if (!maxsize)
+ return 0;
- if (unlikely(iov_iter_is_pipe(i)))
- return pipe_get_pages(i, pages, maxsize, maxpages, start);
- if (unlikely(iov_iter_is_xarray(i)))
- return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
- if (unlikely(iov_iter_is_discard(i)))
- return -EFAULT;
-
- iterate_all_kinds(i, maxsize, v, ({
- unsigned long addr = (unsigned long)v.iov_base;
- size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
- int n;
- int res;
+ if (likely(iter_is_iovec(i))) {
+ unsigned long addr;
- if (len > maxpages * PAGE_SIZE)
- len = maxpages * PAGE_SIZE;
- addr &= ~(PAGE_SIZE - 1);
+ addr = first_iovec_segment(i, &len, start, maxsize, maxpages);
n = DIV_ROUND_UP(len, PAGE_SIZE);
res = get_user_pages_fast(addr, n,
iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
@@ -1522,17 +1487,21 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
if (unlikely(res < 0))
return res;
return (res == n ? len : res * PAGE_SIZE) - *start;
- 0;}),({
- /* can't be more than PAGE_SIZE */
- *start = v.bv_offset;
- get_page(*pages = v.bv_page);
- return v.bv_len;
- }),({
- return -EFAULT;
- }),
- 0
- )
- return 0;
+ }
+ if (iov_iter_is_bvec(i)) {
+ struct page *page;
+
+ page = first_bvec_segment(i, &len, start, maxsize, maxpages);
+ n = DIV_ROUND_UP(len, PAGE_SIZE);
+ while (n--)
+ get_page(*pages++ = page++);
+ return len - *start;
+ }
+ if (iov_iter_is_pipe(i))
+ return pipe_get_pages(i, pages, maxsize, maxpages, start);
+ if (iov_iter_is_xarray(i))
+ return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
+ return -EFAULT;
}
EXPORT_SYMBOL(iov_iter_get_pages);
@@ -1549,9 +1518,6 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
unsigned int iter_head, npages;
ssize_t n;
- if (!maxsize)
- return 0;
-
if (!sanity(i))
return -EFAULT;
@@ -1624,24 +1590,18 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
size_t *start)
{
struct page **p;
+ size_t len;
+ int n, res;
if (maxsize > i->count)
maxsize = i->count;
+ if (!maxsize)
+ return 0;
- if (unlikely(iov_iter_is_pipe(i)))
- return pipe_get_pages_alloc(i, pages, maxsize, start);
- if (unlikely(iov_iter_is_xarray(i)))
- return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
- if (unlikely(iov_iter_is_discard(i)))
- return -EFAULT;
-
- iterate_all_kinds(i, maxsize, v, ({
- unsigned long addr = (unsigned long)v.iov_base;
- size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
- int n;
- int res;
+ if (likely(iter_is_iovec(i))) {
+ unsigned long addr;
- addr &= ~(PAGE_SIZE - 1);
+ addr = first_iovec_segment(i, &len, start, maxsize, ~0U);
n = DIV_ROUND_UP(len, PAGE_SIZE);
p = get_pages_array(n);
if (!p)
@@ -1654,61 +1614,42 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
}
*pages = p;
return (res == n ? len : res * PAGE_SIZE) - *start;
- 0;}),({
- /* can't be more than PAGE_SIZE */
- *start = v.bv_offset;
- *pages = p = get_pages_array(1);
+ }
+ if (iov_iter_is_bvec(i)) {
+ struct page *page;
+
+ page = first_bvec_segment(i, &len, start, maxsize, ~0U);
+ n = DIV_ROUND_UP(len, PAGE_SIZE);
+ *pages = p = get_pages_array(n);
if (!p)
return -ENOMEM;
- get_page(*p = v.bv_page);
- return v.bv_len;
- }),({
- return -EFAULT;
- }), 0
- )
- return 0;
+ while (n--)
+ get_page(*p++ = page++);
+ return len - *start;
+ }
+ if (iov_iter_is_pipe(i))
+ return pipe_get_pages_alloc(i, pages, maxsize, start);
+ if (iov_iter_is_xarray(i))
+ return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
+ return -EFAULT;
}
EXPORT_SYMBOL(iov_iter_get_pages_alloc);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
struct iov_iter *i)
{
- char *to = addr;
__wsum sum, next;
- size_t off = 0;
sum = *csum;
if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return 0;
}
- iterate_and_advance(i, bytes, v, ({
- next = csum_and_copy_from_user(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len);
- if (next) {
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- }
- next ? 0 : v.iov_len;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- }),({
- sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len,
- sum, off);
- off += v.iov_len;
+ iterate_and_advance(i, bytes, base, len, off, ({
+ next = csum_and_copy_from_user(base, addr + off, len);
+ sum = csum_block_add(sum, next, off);
+ next ? 0 : len;
}), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
+ sum = csum_and_memcpy(addr + off, base, len, sum, off);
})
)
*csum = sum;
@@ -1716,104 +1657,30 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
}
EXPORT_SYMBOL(csum_and_copy_from_iter);
-bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
- struct iov_iter *i)
-{
- char *to = addr;
- __wsum sum, next;
- size_t off = 0;
- sum = *csum;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
- iterate_all_kinds(i, bytes, v, ({
- next = csum_and_copy_from_user(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len);
- if (!next)
- return false;
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- 0;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- }),({
- sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len,
- sum, off);
- off += v.iov_len;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- })
- )
- *csum = sum;
- iov_iter_advance(i, bytes);
- return true;
-}
-EXPORT_SYMBOL(csum_and_copy_from_iter_full);
-
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
struct iov_iter *i)
{
struct csum_state *csstate = _csstate;
- const char *from = addr;
__wsum sum, next;
- size_t off;
-
- if (unlikely(iov_iter_is_pipe(i)))
- return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
- sum = csstate->csum;
- off = csstate->off;
if (unlikely(iov_iter_is_discard(i))) {
WARN_ON(1); /* for now */
return 0;
}
- iterate_and_advance(i, bytes, v, ({
- next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
- v.iov_base,
- v.iov_len);
- if (next) {
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- }
- next ? 0 : v.iov_len;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy(p + v.bv_offset,
- (from += v.bv_len) - v.bv_len,
- v.bv_len, sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- }),({
- sum = csum_and_memcpy(v.iov_base,
- (from += v.iov_len) - v.iov_len,
- v.iov_len, sum, off);
- off += v.iov_len;
+
+ sum = csum_shift(csstate->csum, csstate->off);
+ if (unlikely(iov_iter_is_pipe(i)))
+ bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
+ else iterate_and_advance(i, bytes, base, len, off, ({
+ next = csum_and_copy_to_user(addr + off, base, len);
+ sum = csum_block_add(sum, next, off);
+ next ? 0 : len;
}), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy(p + v.bv_offset,
- (from += v.bv_len) - v.bv_len,
- v.bv_len, sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
+ sum = csum_and_memcpy(base, addr + off, len, sum, off);
})
)
- csstate->csum = sum;
- csstate->off = off;
+ csstate->csum = csum_shift(sum, csstate->off);
+ csstate->off += bytes;
return bytes;
}
EXPORT_SYMBOL(csum_and_copy_to_iter);
@@ -1837,19 +1704,56 @@ size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
}
EXPORT_SYMBOL(hash_and_copy_to_iter);
-int iov_iter_npages(const struct iov_iter *i, int maxpages)
+static int iov_npages(const struct iov_iter *i, int maxpages)
{
- size_t size = i->count;
+ size_t skip = i->iov_offset, size = i->count;
+ const struct iovec *p;
int npages = 0;
- if (!size)
- return 0;
- if (unlikely(iov_iter_is_discard(i)))
- return 0;
+ for (p = i->iov; size; skip = 0, p++) {
+ unsigned offs = offset_in_page(p->iov_base + skip);
+ size_t len = min(p->iov_len - skip, size);
- if (unlikely(iov_iter_is_pipe(i))) {
- struct pipe_inode_info *pipe = i->pipe;
+ if (len) {
+ size -= len;
+ npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
+ if (unlikely(npages > maxpages))
+ return maxpages;
+ }
+ }
+ return npages;
+}
+
+static int bvec_npages(const struct iov_iter *i, int maxpages)
+{
+ size_t skip = i->iov_offset, size = i->count;
+ const struct bio_vec *p;
+ int npages = 0;
+
+ for (p = i->bvec; size; skip = 0, p++) {
+ unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
+ size_t len = min(p->bv_len - skip, size);
+
+ size -= len;
+ npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
+ if (unlikely(npages > maxpages))
+ return maxpages;
+ }
+ return npages;
+}
+
+int iov_iter_npages(const struct iov_iter *i, int maxpages)
+{
+ if (unlikely(!i->count))
+ return 0;
+ /* iovec and kvec have identical layouts */
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return iov_npages(i, maxpages);
+ if (iov_iter_is_bvec(i))
+ return bvec_npages(i, maxpages);
+ if (iov_iter_is_pipe(i)) {
unsigned int iter_head;
+ int npages;
size_t off;
if (!sanity(i))
@@ -1857,44 +1761,15 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
data_start(i, &iter_head, &off);
/* some of this one + all after this one */
- npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
- if (npages >= maxpages)
- return maxpages;
- } else if (unlikely(iov_iter_is_xarray(i))) {
- unsigned offset;
-
- offset = (i->xarray_start + i->iov_offset) & ~PAGE_MASK;
-
- npages = 1;
- if (size > PAGE_SIZE - offset) {
- size -= PAGE_SIZE - offset;
- npages += size >> PAGE_SHIFT;
- size &= ~PAGE_MASK;
- if (size)
- npages++;
- }
- if (npages >= maxpages)
- return maxpages;
- } else iterate_all_kinds(i, size, v, ({
- unsigned long p = (unsigned long)v.iov_base;
- npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
- - p / PAGE_SIZE;
- if (npages >= maxpages)
- return maxpages;
- 0;}),({
- npages++;
- if (npages >= maxpages)
- return maxpages;
- }),({
- unsigned long p = (unsigned long)v.iov_base;
- npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
- - p / PAGE_SIZE;
- if (npages >= maxpages)
- return maxpages;
- }),
- 0
- )
- return npages;
+ npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
+ return min(npages, maxpages);
+ }
+ if (iov_iter_is_xarray(i)) {
+ unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
+ int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
+ return min(npages, maxpages);
+ }
+ return 0;
}
EXPORT_SYMBOL(iov_iter_npages);
@@ -2093,30 +1968,3 @@ int import_single_range(int rw, void __user *buf, size_t len,
return 0;
}
EXPORT_SYMBOL(import_single_range);
-
-int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
- int (*f)(struct kvec *vec, void *context),
- void *context)
-{
- struct kvec w;
- int err = -EINVAL;
- if (!bytes)
- return 0;
-
- iterate_all_kinds(i, bytes, v, -EINVAL, ({
- w.iov_base = kmap(v.bv_page) + v.bv_offset;
- w.iov_len = v.bv_len;
- err = f(&w, context);
- kunmap(v.bv_page);
- err;}), ({
- w = v;
- err = f(&w, context);}), ({
- w.iov_base = kmap(v.bv_page) + v.bv_offset;
- w.iov_len = v.bv_len;
- err = f(&w, context);
- kunmap(v.bv_page);
- err;})
- )
- return err;
-}
-EXPORT_SYMBOL(iov_iter_for_each_range);
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index a118b0b1e9b2..059b8b00dc53 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -14,11 +14,12 @@
*/
#include <linux/ctype.h>
#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/math64.h>
#include <linux/export.h>
+#include <linux/kstrtox.h>
+#include <linux/math64.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+
#include "kstrtox.h"
const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
@@ -39,20 +40,22 @@ const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
/*
* Convert non-negative integer string representation in explicitly given radix
- * to an integer.
+ * to an integer. A maximum of max_chars characters will be converted.
+ *
* Return number of characters consumed maybe or-ed with overflow bit.
* If overflow occurs, result integer (incorrect) is still returned.
*
* Don't you dare use this function.
*/
-unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
+unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *p,
+ size_t max_chars)
{
unsigned long long res;
unsigned int rv;
res = 0;
rv = 0;
- while (1) {
+ while (max_chars--) {
unsigned int c = *s;
unsigned int lc = c | 0x20; /* don't tolower() this line */
unsigned int val;
@@ -82,6 +85,11 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long
return rv;
}
+unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
+{
+ return _parse_integer_limit(s, base, p, INT_MAX);
+}
+
static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
{
unsigned long long _res;
diff --git a/lib/kstrtox.h b/lib/kstrtox.h
index 3b4637bcd254..158c400ca865 100644
--- a/lib/kstrtox.h
+++ b/lib/kstrtox.h
@@ -4,6 +4,8 @@
#define KSTRTOX_OVERFLOW (1U << 31)
const char *_parse_integer_fixup_radix(const char *s, unsigned int *base);
+unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *res,
+ size_t max_chars);
unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *res);
#endif
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index 9214c493d8b7..b71db0abc12b 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -64,7 +64,7 @@ static int debugfs_print_results(struct seq_file *seq, void *v)
debugfs_print_result(seq, suite, test_case);
seq_printf(seq, "%s %d - %s\n",
- kunit_status_to_string(success), 1, suite->name);
+ kunit_status_to_ok_not_ok(success), 1, suite->name);
return 0;
}
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 15832ed44668..acd1de436f59 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/reboot.h>
#include <kunit/test.h>
#include <linux/glob.h>
#include <linux/moduleparam.h>
@@ -13,13 +14,17 @@ extern struct kunit_suite * const * const __kunit_suites_end[];
#if IS_BUILTIN(CONFIG_KUNIT)
-static char *filter_glob;
-module_param(filter_glob, charp, 0);
+static char *filter_glob_param;
+module_param_named(filter_glob, filter_glob_param, charp, 0);
MODULE_PARM_DESC(filter_glob,
"Filter which KUnit test suites run at boot-time, e.g. list*");
+static char *kunit_shutdown;
+core_param(kunit_shutdown, kunit_shutdown, charp, 0644);
+
static struct kunit_suite * const *
-kunit_filter_subsuite(struct kunit_suite * const * const subsuite)
+kunit_filter_subsuite(struct kunit_suite * const * const subsuite,
+ const char *filter_glob)
{
int i, n = 0;
struct kunit_suite **filtered;
@@ -52,19 +57,14 @@ struct suite_set {
struct kunit_suite * const * const *end;
};
-static struct suite_set kunit_filter_suites(void)
+static struct suite_set kunit_filter_suites(const struct suite_set *suite_set,
+ const char *filter_glob)
{
int i;
struct kunit_suite * const **copy, * const *filtered_subsuite;
struct suite_set filtered;
- const size_t max = __kunit_suites_end - __kunit_suites_start;
-
- if (!filter_glob) {
- filtered.start = __kunit_suites_start;
- filtered.end = __kunit_suites_end;
- return filtered;
- }
+ const size_t max = suite_set->end - suite_set->start;
copy = kmalloc_array(max, sizeof(*filtered.start), GFP_KERNEL);
filtered.start = copy;
@@ -74,7 +74,7 @@ static struct suite_set kunit_filter_suites(void)
}
for (i = 0; i < max; ++i) {
- filtered_subsuite = kunit_filter_subsuite(__kunit_suites_start[i]);
+ filtered_subsuite = kunit_filter_subsuite(suite_set->start[i], filter_glob);
if (filtered_subsuite)
*copy++ = filtered_subsuite;
}
@@ -82,6 +82,20 @@ static struct suite_set kunit_filter_suites(void)
return filtered;
}
+static void kunit_handle_shutdown(void)
+{
+ if (!kunit_shutdown)
+ return;
+
+ if (!strcmp(kunit_shutdown, "poweroff"))
+ kernel_power_off();
+ else if (!strcmp(kunit_shutdown, "halt"))
+ kernel_halt();
+ else if (!strcmp(kunit_shutdown, "reboot"))
+ kernel_restart(NULL);
+
+}
+
static void kunit_print_tap_header(struct suite_set *suite_set)
{
struct kunit_suite * const * const *suites, * const *subsuite;
@@ -98,21 +112,32 @@ static void kunit_print_tap_header(struct suite_set *suite_set)
int kunit_run_all_tests(void)
{
struct kunit_suite * const * const *suites;
+ struct suite_set suite_set = {
+ .start = __kunit_suites_start,
+ .end = __kunit_suites_end,
+ };
- struct suite_set suite_set = kunit_filter_suites();
+ if (filter_glob_param)
+ suite_set = kunit_filter_suites(&suite_set, filter_glob_param);
kunit_print_tap_header(&suite_set);
for (suites = suite_set.start; suites < suite_set.end; suites++)
__kunit_test_suites_init(*suites);
- if (filter_glob) { /* a copy was made of each array */
+ if (filter_glob_param) { /* a copy was made of each array */
for (suites = suite_set.start; suites < suite_set.end; suites++)
kfree(*suites);
kfree(suite_set.start);
}
+ kunit_handle_shutdown();
+
return 0;
}
+#if IS_BUILTIN(CONFIG_KUNIT_TEST)
+#include "executor_test.c"
+#endif
+
#endif /* IS_BUILTIN(CONFIG_KUNIT) */
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
new file mode 100644
index 000000000000..cdbe54b16501
--- /dev/null
+++ b/lib/kunit/executor_test.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the KUnit executor.
+ *
+ * Copyright (C) 2021, Google LLC.
+ * Author: Daniel Latypov <dlatypov@google.com>
+ */
+
+#include <kunit/test.h>
+
+static void kfree_at_end(struct kunit *test, const void *to_free);
+static struct kunit_suite *alloc_fake_suite(struct kunit *test,
+ const char *suite_name);
+
+static void filter_subsuite_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL, NULL};
+ struct kunit_suite * const *filtered;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1");
+ subsuite[1] = alloc_fake_suite(test, "suite2");
+
+ /* Want: suite1, suite2, NULL -> suite2, NULL */
+ filtered = kunit_filter_subsuite(subsuite, "suite2*");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered);
+ kfree_at_end(test, filtered);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]);
+ KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->name, "suite2");
+
+ KUNIT_EXPECT_FALSE(test, filtered[1]);
+}
+
+static void filter_subsuite_to_empty_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL, NULL};
+ struct kunit_suite * const *filtered;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1");
+ subsuite[1] = alloc_fake_suite(test, "suite2");
+
+ filtered = kunit_filter_subsuite(subsuite, "not_found");
+ kfree_at_end(test, filtered); /* just in case */
+
+ KUNIT_EXPECT_FALSE_MSG(test, filtered,
+ "should be NULL to indicate no match");
+}
+
+static void kfree_subsuites_at_end(struct kunit *test, struct suite_set *suite_set)
+{
+ struct kunit_suite * const * const *suites;
+
+ kfree_at_end(test, suite_set->start);
+ for (suites = suite_set->start; suites < suite_set->end; suites++)
+ kfree_at_end(test, *suites);
+}
+
+static void filter_suites_test(struct kunit *test)
+{
+ /* Suites per-file are stored as a NULL terminated array */
+ struct kunit_suite *subsuites[2][2] = {
+ {NULL, NULL},
+ {NULL, NULL},
+ };
+ /* Match the memory layout of suite_set */
+ struct kunit_suite * const * const suites[2] = {
+ subsuites[0], subsuites[1],
+ };
+
+ const struct suite_set suite_set = {
+ .start = suites,
+ .end = suites + 2,
+ };
+ struct suite_set filtered = {.start = NULL, .end = NULL};
+
+ /* Emulate two files, each having one suite */
+ subsuites[0][0] = alloc_fake_suite(test, "suite0");
+ subsuites[1][0] = alloc_fake_suite(test, "suite1");
+
+ /* Filter out suite1 */
+ filtered = kunit_filter_suites(&suite_set, "suite0");
+ kfree_subsuites_at_end(test, &filtered); /* let us use ASSERTs without leaking */
+ KUNIT_ASSERT_EQ(test, filtered.end - filtered.start, (ptrdiff_t)1);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0]);
+ KUNIT_EXPECT_STREQ(test, (const char *)filtered.start[0][0]->name, "suite0");
+}
+
+static struct kunit_case executor_test_cases[] = {
+ KUNIT_CASE(filter_subsuite_test),
+ KUNIT_CASE(filter_subsuite_to_empty_test),
+ KUNIT_CASE(filter_suites_test),
+ {}
+};
+
+static struct kunit_suite executor_test_suite = {
+ .name = "kunit_executor_test",
+ .test_cases = executor_test_cases,
+};
+
+kunit_test_suites(&executor_test_suite);
+
+/* Test helpers */
+
+static void kfree_res_free(struct kunit_resource *res)
+{
+ kfree(res->data);
+}
+
+/* Use the resource API to register a call to kfree(to_free).
+ * Since we never actually use the resource, it's safe to use on const data.
+ */
+static void kfree_at_end(struct kunit *test, const void *to_free)
+{
+ /* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
+ if (IS_ERR_OR_NULL(to_free))
+ return;
+ kunit_alloc_and_get_resource(test, NULL, kfree_res_free, GFP_KERNEL,
+ (void *)to_free);
+}
+
+static struct kunit_suite *alloc_fake_suite(struct kunit *test,
+ const char *suite_name)
+{
+ struct kunit_suite *suite;
+
+ /* We normally never expect to allocate suites, hence the non-const cast. */
+ suite = kunit_kzalloc(test, sizeof(*suite), GFP_KERNEL);
+ strncpy((char *)suite->name, suite_name, sizeof(suite->name) - 1);
+
+ return suite;
+}
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index be1164ecc476..51099b0ca29c 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -41,6 +41,35 @@ static int example_test_init(struct kunit *test)
}
/*
+ * This test should always be skipped.
+ */
+static void example_skip_test(struct kunit *test)
+{
+ /* This line should run */
+ kunit_info(test, "You should not see a line below.");
+
+ /* Skip (and abort) the test */
+ kunit_skip(test, "this test should be skipped");
+
+ /* This line should not execute */
+ KUNIT_FAIL(test, "You should not see this line.");
+}
+
+/*
+ * This test should always be marked skipped.
+ */
+static void example_mark_skipped_test(struct kunit *test)
+{
+ /* This line should run */
+ kunit_info(test, "You should see a line below.");
+
+ /* Skip (but do not abort) the test */
+ kunit_mark_skipped(test, "this test should be skipped");
+
+ /* This line should run */
+ kunit_info(test, "You should see this line.");
+}
+/*
* Here we make a list of all the test cases we want to add to the test suite
* below.
*/
@@ -52,6 +81,8 @@ static struct kunit_case example_test_cases[] = {
* test suite.
*/
KUNIT_CASE(example_simple_test),
+ KUNIT_CASE(example_skip_test),
+ KUNIT_CASE(example_mark_skipped_test),
{}
};
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index 69f902440a0e..d69efcbed624 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -437,7 +437,47 @@ static void kunit_log_test(struct kunit *test)
#endif
}
+static void kunit_status_set_failure_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SUCCESS);
+ kunit_set_failure(&fake);
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
+}
+
+static void kunit_status_mark_skipped_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+
+ /* Before: Should be SUCCESS with no comment. */
+ KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
+ KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
+
+ /* Mark the test as skipped. */
+ kunit_mark_skipped(&fake, "Accepts format string: %s", "YES");
+
+ /* After: Should be SKIPPED with our comment. */
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED);
+ KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
+}
+
+static struct kunit_case kunit_status_test_cases[] = {
+ KUNIT_CASE(kunit_status_set_failure_test),
+ KUNIT_CASE(kunit_status_mark_skipped_test),
+ {}
+};
+
+static struct kunit_suite kunit_status_test_suite = {
+ .name = "kunit_status",
+ .test_cases = kunit_status_test_cases,
+};
+
kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
- &kunit_log_test_suite);
+ &kunit_log_test_suite, &kunit_status_test_suite);
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/string-stream.h b/lib/kunit/string-stream.h
index fe98a00b75a9..5e94b623454f 100644
--- a/lib/kunit/string-stream.h
+++ b/lib/kunit/string-stream.h
@@ -35,9 +35,9 @@ struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp);
int __printf(2, 3) string_stream_add(struct string_stream *stream,
const char *fmt, ...);
-int string_stream_vadd(struct string_stream *stream,
- const char *fmt,
- va_list args);
+int __printf(2, 0) string_stream_vadd(struct string_stream *stream,
+ const char *fmt,
+ va_list args);
char *string_stream_get_string(struct string_stream *stream);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 2f6cc0123232..d79ecb86ea57 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -98,12 +98,14 @@ static void kunit_print_subtest_start(struct kunit_suite *suite)
static void kunit_print_ok_not_ok(void *test_or_suite,
bool is_test,
- bool is_ok,
+ enum kunit_status status,
size_t test_number,
- const char *description)
+ const char *description,
+ const char *directive)
{
struct kunit_suite *suite = is_test ? NULL : test_or_suite;
struct kunit *test = is_test ? test_or_suite : NULL;
+ const char *directive_header = (status == KUNIT_SKIPPED) ? " # SKIP " : "";
/*
* We do not log the test suite results as doing so would
@@ -114,25 +116,31 @@ static void kunit_print_ok_not_ok(void *test_or_suite,
* representation.
*/
if (suite)
- pr_info("%s %zd - %s\n",
- kunit_status_to_string(is_ok),
- test_number, description);
+ pr_info("%s %zd - %s%s%s\n",
+ kunit_status_to_ok_not_ok(status),
+ test_number, description, directive_header,
+ (status == KUNIT_SKIPPED) ? directive : "");
else
- kunit_log(KERN_INFO, test, KUNIT_SUBTEST_INDENT "%s %zd - %s",
- kunit_status_to_string(is_ok),
- test_number, description);
+ kunit_log(KERN_INFO, test,
+ KUNIT_SUBTEST_INDENT "%s %zd - %s%s%s",
+ kunit_status_to_ok_not_ok(status),
+ test_number, description, directive_header,
+ (status == KUNIT_SKIPPED) ? directive : "");
}
-bool kunit_suite_has_succeeded(struct kunit_suite *suite)
+enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite)
{
const struct kunit_case *test_case;
+ enum kunit_status status = KUNIT_SKIPPED;
kunit_suite_for_each_test_case(suite, test_case) {
- if (!test_case->success)
- return false;
+ if (test_case->status == KUNIT_FAILURE)
+ return KUNIT_FAILURE;
+ else if (test_case->status == KUNIT_SUCCESS)
+ status = KUNIT_SUCCESS;
}
- return true;
+ return status;
}
EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
@@ -143,7 +151,8 @@ static void kunit_print_subtest_end(struct kunit_suite *suite)
kunit_print_ok_not_ok((void *)suite, false,
kunit_suite_has_succeeded(suite),
kunit_suite_counter++,
- suite->name);
+ suite->name,
+ suite->status_comment);
}
unsigned int kunit_test_case_num(struct kunit_suite *suite,
@@ -252,7 +261,8 @@ void kunit_init_test(struct kunit *test, const char *name, char *log)
test->log = log;
if (test->log)
test->log[0] = '\0';
- test->success = true;
+ test->status = KUNIT_SUCCESS;
+ test->status_comment[0] = '\0';
}
EXPORT_SYMBOL_GPL(kunit_init_test);
@@ -376,7 +386,11 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
context.test_case = test_case;
kunit_try_catch_run(try_catch, &context);
- test_case->success = test->success;
+ /* Propagate the parameter result to the test case. */
+ if (test->status == KUNIT_FAILURE)
+ test_case->status = KUNIT_FAILURE;
+ else if (test_case->status != KUNIT_FAILURE && test->status == KUNIT_SUCCESS)
+ test_case->status = KUNIT_SUCCESS;
}
int kunit_run_tests(struct kunit_suite *suite)
@@ -388,7 +402,7 @@ int kunit_run_tests(struct kunit_suite *suite)
kunit_suite_for_each_test_case(suite, test_case) {
struct kunit test = { .param_value = NULL, .param_index = 0 };
- bool test_success = true;
+ test_case->status = KUNIT_SKIPPED;
if (test_case->generate_params) {
/* Get initial param. */
@@ -398,7 +412,6 @@ int kunit_run_tests(struct kunit_suite *suite)
do {
kunit_run_case_catch_errors(suite, test_case, &test);
- test_success &= test_case->success;
if (test_case->generate_params) {
if (param_desc[0] == '\0') {
@@ -410,7 +423,7 @@ int kunit_run_tests(struct kunit_suite *suite)
KUNIT_SUBTEST_INDENT
"# %s: %s %d - %s",
test_case->name,
- kunit_status_to_string(test.success),
+ kunit_status_to_ok_not_ok(test.status),
test.param_index + 1, param_desc);
/* Get next param. */
@@ -420,9 +433,10 @@ int kunit_run_tests(struct kunit_suite *suite)
}
} while (test.param_value);
- kunit_print_ok_not_ok(&test, true, test_success,
+ kunit_print_ok_not_ok(&test, true, test_case->status,
kunit_test_case_num(suite, test_case),
- test_case->name);
+ test_case->name,
+ test.status_comment);
}
kunit_print_subtest_end(suite);
@@ -434,6 +448,7 @@ EXPORT_SYMBOL_GPL(kunit_run_tests);
static void kunit_init_suite(struct kunit_suite *suite)
{
kunit_debugfs_create_suite(suite);
+ suite->status_comment[0] = '\0';
}
int __kunit_test_suites_init(struct kunit_suite * const * const suites)
@@ -475,6 +490,7 @@ int kunit_add_resource(struct kunit *test,
void *data)
{
int ret = 0;
+ unsigned long flags;
res->free = free;
kref_init(&res->refcount);
@@ -487,10 +503,10 @@ int kunit_add_resource(struct kunit *test,
res->data = data;
}
- spin_lock(&test->lock);
+ spin_lock_irqsave(&test->lock, flags);
list_add_tail(&res->node, &test->resources);
/* refcount for list is established by kref_init() */
- spin_unlock(&test->lock);
+ spin_unlock_irqrestore(&test->lock, flags);
return ret;
}
@@ -548,9 +564,11 @@ EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource);
void kunit_remove_resource(struct kunit *test, struct kunit_resource *res)
{
- spin_lock(&test->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&test->lock, flags);
list_del(&res->node);
- spin_unlock(&test->lock);
+ spin_unlock_irqrestore(&test->lock, flags);
kunit_put_resource(res);
}
EXPORT_SYMBOL_GPL(kunit_remove_resource);
@@ -573,41 +591,43 @@ int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match,
}
EXPORT_SYMBOL_GPL(kunit_destroy_resource);
-struct kunit_kmalloc_params {
+struct kunit_kmalloc_array_params {
+ size_t n;
size_t size;
gfp_t gfp;
};
-static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
+static int kunit_kmalloc_array_init(struct kunit_resource *res, void *context)
{
- struct kunit_kmalloc_params *params = context;
+ struct kunit_kmalloc_array_params *params = context;
- res->data = kmalloc(params->size, params->gfp);
+ res->data = kmalloc_array(params->n, params->size, params->gfp);
if (!res->data)
return -ENOMEM;
return 0;
}
-static void kunit_kmalloc_free(struct kunit_resource *res)
+static void kunit_kmalloc_array_free(struct kunit_resource *res)
{
kfree(res->data);
}
-void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp)
{
- struct kunit_kmalloc_params params = {
+ struct kunit_kmalloc_array_params params = {
.size = size,
+ .n = n,
.gfp = gfp
};
return kunit_alloc_resource(test,
- kunit_kmalloc_init,
- kunit_kmalloc_free,
+ kunit_kmalloc_array_init,
+ kunit_kmalloc_array_free,
gfp,
&params);
}
-EXPORT_SYMBOL_GPL(kunit_kmalloc);
+EXPORT_SYMBOL_GPL(kunit_kmalloc_array);
void kunit_kfree(struct kunit *test, const void *ptr)
{
@@ -630,6 +650,7 @@ EXPORT_SYMBOL_GPL(kunit_kfree);
void kunit_cleanup(struct kunit *test)
{
struct kunit_resource *res;
+ unsigned long flags;
/*
* test->resources is a stack - each allocation must be freed in the
@@ -641,9 +662,9 @@ void kunit_cleanup(struct kunit *test)
* protect against the current node being deleted, not the next.
*/
while (true) {
- spin_lock(&test->lock);
+ spin_lock_irqsave(&test->lock, flags);
if (list_empty(&test->resources)) {
- spin_unlock(&test->lock);
+ spin_unlock_irqrestore(&test->lock, flags);
break;
}
res = list_last_entry(&test->resources,
@@ -654,7 +675,7 @@ void kunit_cleanup(struct kunit *test)
* resource, and this can't happen if the test->lock
* is held.
*/
- spin_unlock(&test->lock);
+ spin_unlock_irqrestore(&test->lock, flags);
kunit_remove_resource(test, res);
}
current->kunit_test = NULL;
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 2d85abac1744..161108e5d2fe 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -53,6 +53,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
#define LOCKTYPE_WW 0x10
#define LOCKTYPE_RTMUTEX 0x20
#define LOCKTYPE_LL 0x40
+#define LOCKTYPE_SPECIAL 0x80
static struct ww_acquire_ctx t, t2;
static struct ww_mutex o, o2, o3;
@@ -194,6 +195,7 @@ static void init_shared_classes(void)
#define HARDIRQ_ENTER() \
local_irq_disable(); \
__irq_enter(); \
+ lockdep_hardirq_threaded(); \
WARN_ON(!in_irq());
#define HARDIRQ_EXIT() \
@@ -2492,16 +2494,6 @@ static void rcu_sched_exit(int *_)
int rcu_sched_guard_##name __guard(rcu_sched_exit); \
rcu_read_lock_sched();
-static void rcu_callback_exit(int *_)
-{
- rcu_lock_release(&rcu_callback_map);
-}
-
-#define RCU_CALLBACK_CONTEXT(name, ...) \
- int rcu_callback_guard_##name __guard(rcu_callback_exit); \
- rcu_lock_acquire(&rcu_callback_map);
-
-
static void raw_spinlock_exit(raw_spinlock_t **lock)
{
raw_spin_unlock(*lock);
@@ -2558,8 +2550,6 @@ static void __maybe_unused inner##_in_##outer(void) \
* ---------------+-------+----------+------+-------
* RCU_BH | o | o | o | x
* ---------------+-------+----------+------+-------
- * RCU_CALLBACK | o | o | o | x
- * ---------------+-------+----------+------+-------
* RCU_SCHED | o | o | x | x
* ---------------+-------+----------+------+-------
* RAW_SPIN | o | o | x | x
@@ -2576,7 +2566,6 @@ GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
-GENERATE_2_CONTEXT_TESTCASE(RCU_CALLBACK, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \
@@ -2638,10 +2627,6 @@ static void wait_context_tests(void)
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH);
pr_cont("\n");
- print_testname("in RCU callback context");
- DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_CALLBACK);
- pr_cont("\n");
-
print_testname("in RCU-sched context");
DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED);
pr_cont("\n");
@@ -2744,6 +2729,66 @@ static void local_lock_tests(void)
pr_cont("\n");
}
+static void hardirq_deadlock_softirq_not_deadlock(void)
+{
+ /* mutex_A is hardirq-unsafe and softirq-unsafe */
+ /* mutex_A -> lock_C */
+ mutex_lock(&mutex_A);
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_C);
+ spin_unlock(&lock_C);
+ HARDIRQ_ENABLE();
+ mutex_unlock(&mutex_A);
+
+ /* lock_A is hardirq-safe */
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT();
+
+ /* lock_A -> lock_B */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ spin_lock(&lock_B);
+ spin_unlock(&lock_B);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+
+ /* lock_B -> lock_C */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_B);
+ spin_lock(&lock_C);
+ spin_unlock(&lock_C);
+ spin_unlock(&lock_B);
+ HARDIRQ_ENABLE();
+
+ /* lock_D is softirq-safe */
+ SOFTIRQ_ENTER();
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ SOFTIRQ_EXIT();
+
+ /* And lock_D is hardirq-unsafe */
+ SOFTIRQ_DISABLE();
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ SOFTIRQ_ENABLE();
+
+ /*
+ * mutex_A -> lock_C -> lock_D is softirq-unsafe -> softirq-safe, not
+ * deadlock.
+ *
+ * lock_A -> lock_B -> lock_C -> lock_D is hardirq-safe ->
+ * hardirq-unsafe, deadlock.
+ */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_C);
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ spin_unlock(&lock_C);
+ HARDIRQ_ENABLE();
+}
+
void locking_selftest(void)
{
/*
@@ -2872,6 +2917,10 @@ void locking_selftest(void)
local_lock_tests();
+ print_testname("hardirq_unsafe_softirq_safe");
+ dotest(hardirq_deadlock_softirq_not_deadlock, FAILURE, LOCKTYPE_SPECIAL);
+ pr_cont("\n");
+
if (unexpected_testcase_failures) {
printk("-----------------------------------------------------------------\n");
debug_locks = 0;
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 8a7724a6ce2f..926f4823d5ea 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -481,7 +481,7 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
/* ===== Instantiate a few more decoding cases, used more than once. ===== */
-int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
+static int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
int compressedSize, int maxOutputSize)
{
return LZ4_decompress_generic(source, dest,
diff --git a/lib/math/Makefile b/lib/math/Makefile
index 7456edb864fc..bfac26ddfc22 100644
--- a/lib/math/Makefile
+++ b/lib/math/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
obj-$(CONFIG_RATIONAL) += rational.o
obj-$(CONFIG_TEST_DIV64) += test_div64.o
+obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o
diff --git a/lib/math/rational-test.c b/lib/math/rational-test.c
new file mode 100644
index 000000000000..01611ddff420
--- /dev/null
+++ b/lib/math/rational-test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test.h>
+
+#include <linux/rational.h>
+
+struct rational_test_param {
+ unsigned long num, den;
+ unsigned long max_num, max_den;
+ unsigned long exp_num, exp_den;
+
+ const char *name;
+};
+
+static const struct rational_test_param test_parameters[] = {
+ { 1230, 10, 100, 20, 100, 1, "Exceeds bounds, semi-convergent term > 1/2 last term" },
+ { 34567,100, 120, 20, 120, 1, "Exceeds bounds, semi-convergent term < 1/2 last term" },
+ { 1, 30, 100, 10, 0, 1, "Closest to zero" },
+ { 1, 19, 100, 10, 1, 10, "Closest to smallest non-zero" },
+ { 27,32, 16, 16, 11, 13, "Use convergent" },
+ { 1155, 7735, 255, 255, 33, 221, "Exact answer" },
+ { 87, 32, 70, 32, 68, 25, "Semiconvergent, numerator limit" },
+ { 14533, 4626, 15000, 2400, 7433, 2366, "Semiconvergent, denominator limit" },
+};
+
+static void get_desc(const struct rational_test_param *param, char *desc)
+{
+ strscpy(desc, param->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+/* Creates function rational_gen_params */
+KUNIT_ARRAY_PARAM(rational, test_parameters, get_desc);
+
+static void rational_test(struct kunit *test)
+{
+ const struct rational_test_param *param = (const struct rational_test_param *)test->param_value;
+ unsigned long n = 0, d = 0;
+
+ rational_best_approximation(param->num, param->den, param->max_num, param->max_den, &n, &d);
+ KUNIT_EXPECT_EQ(test, n, param->exp_num);
+ KUNIT_EXPECT_EQ(test, d, param->exp_den);
+}
+
+static struct kunit_case rational_test_cases[] = {
+ KUNIT_CASE_PARAM(rational_test, rational_gen_params),
+ {}
+};
+
+static struct kunit_suite rational_test_suite = {
+ .name = "rational",
+ .test_cases = rational_test_cases,
+};
+
+kunit_test_suites(&rational_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/math/rational.c b/lib/math/rational.c
index 9781d521963d..c0ab51d8fbb9 100644
--- a/lib/math/rational.c
+++ b/lib/math/rational.c
@@ -12,6 +12,7 @@
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/minmax.h>
+#include <linux/limits.h>
/*
* calculate best rational approximation for a given fraction
@@ -78,13 +79,18 @@ void rational_best_approximation(
* found below as 't'.
*/
if ((n2 > max_numerator) || (d2 > max_denominator)) {
- unsigned long t = min((max_numerator - n0) / n1,
- (max_denominator - d0) / d1);
+ unsigned long t = ULONG_MAX;
- /* This tests if the semi-convergent is closer
- * than the previous convergent.
+ if (d1)
+ t = (max_denominator - d0) / d1;
+ if (n1)
+ t = min(t, (max_numerator - n0) / n1);
+
+ /* This tests if the semi-convergent is closer than the previous
+ * convergent. If d1 is zero there is no previous convergent as this
+ * is the 1st iteration, so always choose the semi-convergent.
*/
- if (2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
+ if (!d1 || 2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
n1 = n0 + t * n1;
d1 = d0 + t * d1;
}
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index afbd99987cf8..b6fa1d08fb55 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -48,8 +48,8 @@
/* Define auxiliary asm macros.
*
- * 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
- * UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
+ * 1) umul_ppmm(high_prod, low_prod, multiplier, multiplicand) multiplies two
+ * UWtype integers MULTIPLIER and MULTIPLICAND, and generates a two UWtype
* word product in HIGH_PROD and LOW_PROD.
*
* 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 7ea225b2204f..39c4c6731094 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -234,11 +234,11 @@ static int count_lzeros(MPI a)
}
/**
- * mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
+ * mpi_read_buffer() - read MPI to a buffer provided by user (msb first)
*
* @a: a multi precision integer
- * @buf: bufer to which the output will be written to. Needs to be at
- * leaset mpi_get_size(a) long.
+ * @buf: buffer to which the output will be written to. Needs to be at
+ * least mpi_get_size(a) long.
* @buf_len: size of the buf.
* @nbytes: receives the actual length of the data written on success and
* the data to-be-written on -EOVERFLOW in case buf_len was too
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 3c63710c20c6..9a75ca3f7edf 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(mpi_const);
/****************
* Note: It was a bad idea to use the number of limbs to allocate
* because on a alpha the limbs are large but we normally need
- * integers of n bits - So we should chnage this to bits (or bytes).
+ * integers of n bits - So we should change this to bits (or bytes).
*
* But mpi_alloc is used in a lot of places :-)
*/
diff --git a/lib/parser.c b/lib/parser.c
index f1a6d90b8c34..bcb23484100e 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -6,6 +6,7 @@
#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/export.h>
+#include <linux/kstrtox.h>
#include <linux/parser.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index a1071cdefb5a..af9302141bcf 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -275,7 +275,7 @@ static void __percpu_ref_switch_mode(struct percpu_ref *ref,
wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
percpu_ref_switch_lock);
- if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+ if (data->force_atomic || percpu_ref_is_dying(ref))
__percpu_ref_switch_to_atomic(ref, confirm_switch);
else
__percpu_ref_switch_to_percpu(ref);
@@ -385,7 +385,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
+ WARN_ONCE(percpu_ref_is_dying(ref),
"%s called more than once on %ps!", __func__,
ref->data->release);
@@ -465,7 +465,7 @@ void percpu_ref_resurrect(struct percpu_ref *ref)
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
+ WARN_ON_ONCE(!percpu_ref_is_dying(ref));
WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 707453f5d58e..6dafde851333 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -229,8 +229,10 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
WARN_ON(s->size == 0);
+ BUILD_BUG_ON(MAX_MEMHEX_BYTES * 2 >= HEX_CHARS);
+
while (len) {
- start_len = min(len, HEX_CHARS - 1);
+ start_len = min(len, MAX_MEMHEX_BYTES);
#ifdef __BIG_ENDIAN
for (i = 0, j = 0; i < start_len; i++) {
#else
@@ -243,12 +245,14 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
break;
/* j increments twice per loop */
- len -= j / 2;
hex[j++] = ' ';
seq_buf_putmem(s, hex, j);
if (seq_buf_has_overflowed(s))
return -1;
+
+ len -= start_len;
+ data += start_len;
}
return 0;
}
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
new file mode 100644
index 000000000000..8662dc6cb509
--- /dev/null
+++ b/lib/slub_kunit.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "../mm/slab.h"
+
+static struct kunit_resource resource;
+static int slab_errors;
+
+static void test_clobber_zone(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0,
+ SLAB_RED_ZONE, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kasan_disable_current();
+ p[64] = 0x12;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_free(s, p);
+ kmem_cache_destroy(s);
+}
+
+#ifndef CONFIG_KASAN
+static void test_next_pointer(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0,
+ SLAB_POISON, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+ unsigned long tmp;
+ unsigned long *ptr_addr;
+
+ kmem_cache_free(s, p);
+
+ ptr_addr = (unsigned long *)(p + s->offset);
+ tmp = *ptr_addr;
+ p[s->offset] = 0x12;
+
+ /*
+ * Expecting three errors.
+ * One for the corrupted freechain and the other one for the wrong
+ * count of objects in use. The third error is fixing broken cache.
+ */
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 3, slab_errors);
+
+ /*
+ * Try to repair corrupted freepointer.
+ * Still expecting two errors. The first for the wrong count
+ * of objects in use.
+ * The second error is for fixing broken cache.
+ */
+ *ptr_addr = tmp;
+ slab_errors = 0;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ /*
+ * Previous validation repaired the count of objects in use.
+ * Now expecting no error.
+ */
+ slab_errors = 0;
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+
+static void test_first_word(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0,
+ SLAB_POISON, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_free(s, p);
+ *p = 0x78;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+
+static void test_clobber_50th_byte(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0,
+ SLAB_POISON, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_free(s, p);
+ p[50] = 0x9a;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+#endif
+
+static void test_clobber_redzone_free(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0,
+ SLAB_RED_ZONE, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kasan_disable_current();
+ kmem_cache_free(s, p);
+ p[64] = 0xab;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_destroy(s);
+}
+
+static int test_init(struct kunit *test)
+{
+ slab_errors = 0;
+
+ kunit_add_named_resource(test, NULL, NULL, &resource,
+ "slab_errors", &slab_errors);
+ return 0;
+}
+
+static struct kunit_case test_cases[] = {
+ KUNIT_CASE(test_clobber_zone),
+
+#ifndef CONFIG_KASAN
+ KUNIT_CASE(test_next_pointer),
+ KUNIT_CASE(test_first_word),
+ KUNIT_CASE(test_clobber_50th_byte),
+#endif
+
+ KUNIT_CASE(test_clobber_redzone_free),
+ {}
+};
+
+static struct kunit_suite test_suite = {
+ .name = "slub_test",
+ .init = test_init,
+ .test_cases = test_cases,
+};
+kunit_test_suite(test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 1c1dbd300325..046ac6297c78 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -19,11 +19,7 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
if (irqs_disabled())
goto out;
- /*
- * Kernel threads bound to a single CPU can safely use
- * smp_processor_id():
- */
- if (current->nr_cpus_allowed == 1)
+ if (is_percpu_thread())
goto out;
#ifdef CONFIG_SMP
diff --git a/lib/string.c b/lib/string.c
index 7548eb715ddb..77bd0b1d3296 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -977,7 +977,7 @@ void *memscan(void *addr, int c, size_t size)
unsigned char *p = addr;
while (size) {
- if (*p == c)
+ if (*p == (unsigned char)c)
return (void *)p;
p++;
size--;
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 7f2d5fbaf243..5a35c7e16e96 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -452,18 +452,20 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* The process of escaping byte buffer includes several parts. They are applied
* in the following sequence.
*
- * 1. The character is matched to the printable class, if asked, and in
- * case of match it passes through to the output.
- * 2. The character is not matched to the one from @only string and thus
+ * 1. The character is not matched to the one from @only string and thus
* must go as-is to the output.
- * 3. The character is checked if it falls into the class given by @flags.
+ * 2. The character is matched to the printable and ASCII classes, if asked,
+ * and in case of match it passes through to the output.
+ * 3. The character is matched to the printable or ASCII class, if asked,
+ * and in case of match it passes through to the output.
+ * 4. The character is checked if it falls into the class given by @flags.
* %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
* character. Note that they actually can't go together, otherwise
* %ESCAPE_HEX will be ignored.
*
* Caller must provide valid source and destination pointers. Be aware that
* destination buffer will not be NULL-terminated, thus caller have to append
- * it if needs. The supported flags are::
+ * it if needs. The supported flags are::
*
* %ESCAPE_SPACE: (special white space, not space itself)
* '\f' - form feed
@@ -482,11 +484,27 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* %ESCAPE_ANY:
* all previous together
* %ESCAPE_NP:
- * escape only non-printable characters (checked by isprint)
+ * escape only non-printable characters, checked by isprint()
* %ESCAPE_ANY_NP:
* all previous together
* %ESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (2 digits)
+ * %ESCAPE_NA:
+ * escape only non-ascii characters, checked by isascii()
+ * %ESCAPE_NAP:
+ * escape only non-printable or non-ascii characters
+ * %ESCAPE_APPEND:
+ * append characters from @only to be escaped by the given classes
+ *
+ * %ESCAPE_APPEND would help to pass additional characters to the escaped, when
+ * one of %ESCAPE_NP, %ESCAPE_NA, or %ESCAPE_NAP is provided.
+ *
+ * One notable caveat, the %ESCAPE_NAP, %ESCAPE_NP and %ESCAPE_NA have the
+ * higher priority than the rest of the flags (%ESCAPE_NAP is the highest).
+ * It doesn't make much sense to use either of them without %ESCAPE_OCTAL
+ * or %ESCAPE_HEX, because they cover most of the other character classes.
+ * %ESCAPE_NAP can utilize %ESCAPE_SPACE or %ESCAPE_SPECIAL in addition to
+ * the above.
*
* Return:
* The total size of the escaped output that would be generated for
@@ -500,67 +518,69 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
char *p = dst;
char *end = p + osz;
bool is_dict = only && *only;
+ bool is_append = flags & ESCAPE_APPEND;
while (isz--) {
unsigned char c = *src++;
+ bool in_dict = is_dict && strchr(only, c);
/*
* Apply rules in the following sequence:
- * - the character is printable, when @flags has
- * %ESCAPE_NP bit set
* - the @only string is supplied and does not contain a
* character under question
+ * - the character is printable and ASCII, when @flags has
+ * %ESCAPE_NAP bit set
+ * - the character is printable, when @flags has
+ * %ESCAPE_NP bit set
+ * - the character is ASCII, when @flags has
+ * %ESCAPE_NA bit set
* - the character doesn't fall into a class of symbols
* defined by given @flags
* In these cases we just pass through a character to the
* output buffer.
+ *
+ * When %ESCAPE_APPEND is passed, the characters from @only
+ * have been excluded from the %ESCAPE_NAP, %ESCAPE_NP, and
+ * %ESCAPE_NA cases.
*/
- if ((flags & ESCAPE_NP && isprint(c)) ||
- (is_dict && !strchr(only, c))) {
- /* do nothing */
- } else {
- if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
- continue;
+ if (!(is_append || in_dict) && is_dict &&
+ escape_passthrough(c, &p, end))
+ continue;
- if (flags & ESCAPE_SPECIAL && escape_special(c, &p, end))
- continue;
+ if (!(is_append && in_dict) && isascii(c) && isprint(c) &&
+ flags & ESCAPE_NAP && escape_passthrough(c, &p, end))
+ continue;
- if (flags & ESCAPE_NULL && escape_null(c, &p, end))
- continue;
+ if (!(is_append && in_dict) && isprint(c) &&
+ flags & ESCAPE_NP && escape_passthrough(c, &p, end))
+ continue;
- /* ESCAPE_OCTAL and ESCAPE_HEX always go last */
- if (flags & ESCAPE_OCTAL && escape_octal(c, &p, end))
- continue;
+ if (!(is_append && in_dict) && isascii(c) &&
+ flags & ESCAPE_NA && escape_passthrough(c, &p, end))
+ continue;
- if (flags & ESCAPE_HEX && escape_hex(c, &p, end))
- continue;
- }
+ if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
+ continue;
- escape_passthrough(c, &p, end);
- }
+ if (flags & ESCAPE_SPECIAL && escape_special(c, &p, end))
+ continue;
- return p - dst;
-}
-EXPORT_SYMBOL(string_escape_mem);
+ if (flags & ESCAPE_NULL && escape_null(c, &p, end))
+ continue;
-int string_escape_mem_ascii(const char *src, size_t isz, char *dst,
- size_t osz)
-{
- char *p = dst;
- char *end = p + osz;
+ /* ESCAPE_OCTAL and ESCAPE_HEX always go last */
+ if (flags & ESCAPE_OCTAL && escape_octal(c, &p, end))
+ continue;
- while (isz--) {
- unsigned char c = *src++;
+ if (flags & ESCAPE_HEX && escape_hex(c, &p, end))
+ continue;
- if (!isprint(c) || !isascii(c) || c == '"' || c == '\\')
- escape_hex(c, &p, end);
- else
- escape_passthrough(c, &p, end);
+ escape_passthrough(c, &p, end);
}
return p - dst;
}
-EXPORT_SYMBOL(string_escape_mem_ascii);
+EXPORT_SYMBOL(string_escape_mem);
/*
* Return an allocated string that has been escaped of special characters
diff --git a/lib/syscall.c b/lib/syscall.c
index ba13e924c430..006e256d2264 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -68,13 +68,13 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info
*/
int task_current_syscall(struct task_struct *target, struct syscall_info *info)
{
- long state;
unsigned long ncsw;
+ unsigned int state;
if (target == current)
return collect_syscall(target, info);
- state = target->state;
+ state = READ_ONCE(target->__state);
if (unlikely(!state))
return -EAGAIN;
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index 10360d4ea273..2185d71704f0 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -19,7 +19,7 @@ static __init bool test_string_check_buf(const char *name, unsigned int flags,
if (q_real == q_test && !memcmp(out_test, out_real, q_test))
return true;
- pr_warn("Test '%s' failed: flags = %u\n", name, flags);
+ pr_warn("Test '%s' failed: flags = %#x\n", name, flags);
print_hex_dump(KERN_WARNING, "Input: ", DUMP_PREFIX_NONE, 16, 1,
in, p, true);
@@ -136,7 +136,7 @@ static const struct test_string_2 escape0[] __initconst = {{
.flags = ESCAPE_SPACE | ESCAPE_HEX,
},{
/* terminator */
- }},
+ }}
},{
.in = "\\h\\\"\a\e\\",
.s1 = {{
@@ -150,7 +150,7 @@ static const struct test_string_2 escape0[] __initconst = {{
.flags = ESCAPE_SPECIAL | ESCAPE_HEX,
},{
/* terminator */
- }},
+ }}
},{
.in = "\eb \\C\007\"\x90\r]",
.s1 = {{
@@ -201,12 +201,26 @@ static const struct test_string_2 escape0[] __initconst = {{
.flags = ESCAPE_NP | ESCAPE_HEX,
},{
/* terminator */
- }},
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\\220\\317\r",
+ .flags = ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\\x90\\xcf\r",
+ .flags = ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
},{
/* terminator */
}};
-#define TEST_STRING_2_DICT_1 "b\\ \t\r"
+#define TEST_STRING_2_DICT_1 "b\\ \t\r\xCF"
static const struct test_string_2 escape1[] __initconst = {{
.in = "\f\\ \n\r\t\v",
.s1 = {{
@@ -216,16 +230,40 @@ static const struct test_string_2 escape1[] __initconst = {{
.out = "\f\\x5c\\x20\n\\x0d\\x09\v",
.flags = ESCAPE_HEX,
},{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_ANY | ESCAPE_APPEND,
+ },{
+ .out = "\\014\\134\\040\\012\\015\\011\\013",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\\x0c\\x5c\\x20\\x0a\\x0d\\x09\\x0b",
+ .flags = ESCAPE_HEX | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NA,
+ },{
+ .out = "\f\\x5c\\x20\n\\x0d\\x09\v",
+ .flags = ESCAPE_HEX | ESCAPE_APPEND | ESCAPE_NA,
+ },{
/* terminator */
- }},
+ }}
},{
- .in = "\\h\\\"\a\e\\",
+ .in = "\\h\\\"\a\xCF\e\\",
.s1 = {{
- .out = "\\134h\\134\"\a\e\\134",
+ .out = "\\134h\\134\"\a\\317\e\\134",
.flags = ESCAPE_OCTAL,
},{
+ .out = "\\134h\\134\"\a\\317\e\\134",
+ .flags = ESCAPE_ANY | ESCAPE_APPEND,
+ },{
+ .out = "\\134h\\134\"\\007\\317\\033\\134",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\\134h\\134\"\a\\317\e\\134",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NA,
+ },{
/* terminator */
- }},
+ }}
},{
.in = "\eb \\C\007\"\x90\r]",
.s1 = {{
@@ -233,7 +271,89 @@ static const struct test_string_2 escape1[] __initconst = {{
.flags = ESCAPE_OCTAL,
},{
/* terminator */
- }},
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPACE | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_ANY | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\015",
+ .flags = ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\015",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_ANY | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\x0d",
+ .flags = ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\x0d",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ /* terminator */
+ }}
},{
/* terminator */
}};
@@ -290,7 +410,7 @@ test_string_escape_overflow(const char *in, int p, unsigned int flags, const cha
q_real = string_escape_mem(in, p, NULL, 0, flags, esc);
if (q_real != q_test)
- pr_warn("Test '%s' failed: flags = %u, osz = 0, expected %d, got %d\n",
+ pr_warn("Test '%s' failed: flags = %#x, osz = 0, expected %d, got %d\n",
name, flags, q_test, q_real);
}
@@ -315,8 +435,13 @@ static __init void test_string_escape(const char *name,
/* NULL injection */
if (flags & ESCAPE_NULL) {
in[p++] = '\0';
- out_test[q_test++] = '\\';
- out_test[q_test++] = '0';
+ /* '\0' passes isascii() test */
+ if (flags & ESCAPE_NA && !(flags & ESCAPE_APPEND && esc)) {
+ out_test[q_test++] = '\0';
+ } else {
+ out_test[q_test++] = '\\';
+ out_test[q_test++] = '0';
+ }
}
/* Don't try strings that have no output */
@@ -459,17 +584,17 @@ static int __init test_string_helpers_init(void)
unsigned int i;
pr_info("Running tests...\n");
- for (i = 0; i < UNESCAPE_ANY + 1; i++)
+ for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++)
test_string_unescape("unescape", i, false);
test_string_unescape("unescape inplace",
get_random_int() % (UNESCAPE_ANY + 1), true);
/* Without dictionary */
- for (i = 0; i < (ESCAPE_ANY_NP | ESCAPE_HEX) + 1; i++)
+ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
test_string_escape("escape 0", escape0, i, TEST_STRING_2_DICT_0);
/* With dictionary */
- for (i = 0; i < (ESCAPE_ANY_NP | ESCAPE_HEX) + 1; i++)
+ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
test_string_escape("escape 1", escape1, i, TEST_STRING_2_DICT_1);
/* Test string_get_size() */
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 9cd575583180..4ea73f5aed41 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -366,6 +366,13 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
{0, "0-31:1/3,1-31:1/3,2-31:1/3", &exp1[8 * step], 32, 0},
{0, "1-10:8/12,8-31:24/29,0-31:0/3", &exp1[9 * step], 32, 0},
+ {0, "all", &exp1[8 * step], 32, 0},
+ {0, "0, 1, all, ", &exp1[8 * step], 32, 0},
+ {0, "all:1/2", &exp1[4 * step], 32, 0},
+ {0, "ALL:1/2", &exp1[4 * step], 32, 0},
+ {-EINVAL, "al", NULL, 8, 0},
+ {-EINVAL, "alll", NULL, 8, 0},
+
{-EINVAL, "-1", NULL, 8, 0},
{-EINVAL, "-0", NULL, 8, 0},
{-EINVAL, "10-1", NULL, 8, 0},
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 80a78877bd93..8c55c4723692 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -25,6 +25,7 @@
#include <linux/swapops.h>
#include <linux/sched/mm.h>
#include <linux/platform_device.h>
+#include <linux/rmap.h>
#include "test_hmm_uapi.h"
@@ -46,6 +47,7 @@ struct dmirror_bounce {
unsigned long cpages;
};
+#define DPT_XA_TAG_ATOMIC 1UL
#define DPT_XA_TAG_WRITE 3UL
/*
@@ -218,7 +220,7 @@ static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
* the invalidation is handled as part of the migration process.
*/
if (range->event == MMU_NOTIFY_MIGRATE &&
- range->migrate_pgmap_owner == dmirror->mdevice)
+ range->owner == dmirror->mdevice)
return true;
if (mmu_notifier_range_blockable(range))
@@ -619,6 +621,54 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
}
}
+static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
+ unsigned long end)
+{
+ unsigned long pfn;
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
+ void *entry;
+ struct page *page;
+
+ entry = xa_load(&dmirror->pt, pfn);
+ page = xa_untag_pointer(entry);
+ if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC)
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int dmirror_atomic_map(unsigned long start, unsigned long end,
+ struct page **pages, struct dmirror *dmirror)
+{
+ unsigned long pfn, mapped = 0;
+ int i;
+
+ /* Map the migrated pages into the device's page tables. */
+ mutex_lock(&dmirror->mutex);
+
+ for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
+ void *entry;
+
+ if (!pages[i])
+ continue;
+
+ entry = pages[i];
+ entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
+ entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
+ if (xa_is_err(entry)) {
+ mutex_unlock(&dmirror->mutex);
+ return xa_err(entry);
+ }
+
+ mapped++;
+ }
+
+ mutex_unlock(&dmirror->mutex);
+ return mapped;
+}
+
static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
struct dmirror *dmirror)
{
@@ -661,6 +711,72 @@ static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
return 0;
}
+static int dmirror_exclusive(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ unsigned long start, end, addr;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct page *pages[64];
+ struct dmirror_bounce bounce;
+ unsigned long next;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ mmap_read_lock(mm);
+ for (addr = start; addr < end; addr = next) {
+ unsigned long mapped;
+ int i;
+
+ if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT))
+ next = end;
+ else
+ next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT);
+
+ ret = make_device_exclusive_range(mm, addr, next, pages, NULL);
+ mapped = dmirror_atomic_map(addr, next, pages, dmirror);
+ for (i = 0; i < ret; i++) {
+ if (pages[i]) {
+ unlock_page(pages[i]);
+ put_page(pages[i]);
+ }
+ }
+
+ if (addr + (mapped << PAGE_SHIFT) < next) {
+ mmap_read_unlock(mm);
+ mmput(mm);
+ return -EBUSY;
+ }
+ }
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ /* Return the migrated data for verification. */
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_read(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret == 0) {
+ if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
+ bounce.size))
+ ret = -EFAULT;
+ }
+
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+}
+
static int dmirror_migrate(struct dmirror *dmirror,
struct hmm_dmirror_cmd *cmd)
{
@@ -686,9 +802,8 @@ static int dmirror_migrate(struct dmirror *dmirror,
mmap_read_lock(mm);
for (addr = start; addr < end; addr = next) {
- vma = find_vma(mm, addr);
- if (!vma || addr < vma->vm_start ||
- !(vma->vm_flags & VM_READ)) {
+ vma = vma_lookup(mm, addr);
+ if (!vma || !(vma->vm_flags & VM_READ)) {
ret = -EINVAL;
goto out;
}
@@ -949,6 +1064,15 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp,
ret = dmirror_migrate(dmirror, &cmd);
break;
+ case HMM_DMIRROR_EXCLUSIVE:
+ ret = dmirror_exclusive(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_CHECK_EXCLUSIVE:
+ ret = dmirror_check_atomic(dmirror, cmd.addr,
+ cmd.addr + (cmd.npages << PAGE_SHIFT));
+ break;
+
case HMM_DMIRROR_SNAPSHOT:
ret = dmirror_snapshot(dmirror, &cmd);
break;
diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h
index 670b4ef2a5b6..f14dea5dcd06 100644
--- a/lib/test_hmm_uapi.h
+++ b/lib/test_hmm_uapi.h
@@ -33,6 +33,8 @@ struct hmm_dmirror_cmd {
#define HMM_DMIRROR_WRITE _IOWR('H', 0x01, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_MIGRATE _IOWR('H', 0x02, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x03, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x04, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd)
/*
* Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT.
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index dc05cfc2d12f..f5f54766cbc2 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -55,7 +55,6 @@ static int kasan_test_init(struct kunit *test)
multishot = kasan_save_enable_multi_shot();
kasan_set_tagging_report_once(false);
fail_data.report_found = false;
- fail_data.report_expected = false;
kunit_add_named_resource(test, NULL, NULL, &resource,
"kasan_data", &fail_data);
return 0;
@@ -94,34 +93,30 @@ static void kasan_test_exit(struct kunit *test)
!kasan_async_mode_enabled()) \
migrate_disable(); \
KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
- WRITE_ONCE(fail_data.report_expected, true); \
barrier(); \
expression; \
barrier(); \
- KUNIT_EXPECT_EQ(test, \
- READ_ONCE(fail_data.report_expected), \
- READ_ONCE(fail_data.report_found)); \
+ if (!READ_ONCE(fail_data.report_found)) { \
+ KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
+ "expected in \"" #expression \
+ "\", but none occurred"); \
+ } \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
if (READ_ONCE(fail_data.report_found)) \
kasan_enable_tagging_sync(); \
migrate_enable(); \
} \
WRITE_ONCE(fail_data.report_found, false); \
- WRITE_ONCE(fail_data.report_expected, false); \
} while (0)
#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
- if (!IS_ENABLED(config)) { \
- kunit_info((test), "skipping, " #config " required"); \
- return; \
- } \
+ if (!IS_ENABLED(config)) \
+ kunit_skip((test), "Test requires " #config "=y"); \
} while (0)
#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
- if (IS_ENABLED(config)) { \
- kunit_info((test), "skipping, " #config " enabled"); \
- return; \
- } \
+ if (IS_ENABLED(config)) \
+ kunit_skip((test), "Test requires " #config "=n"); \
} while (0)
static void kmalloc_oob_right(struct kunit *test)
@@ -654,8 +649,20 @@ static char global_array[10];
static void kasan_global_oob(struct kunit *test)
{
- volatile int i = 3;
- char *p = &global_array[ARRAY_SIZE(global_array) + i];
+ /*
+ * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
+ * from failing here and panicing the kernel, access the array via a
+ * volatile pointer, which will prevent the compiler from being able to
+ * determine the array bounds.
+ *
+ * This access uses a volatile pointer to char (char *volatile) rather
+ * than the more conventional pointer to volatile char (volatile char *)
+ * because we want to prevent the compiler from making inferences about
+ * the pointer itself (i.e. its array bounds), not the data that it
+ * refers to.
+ */
+ char *volatile array = global_array;
+ char *p = &array[ARRAY_SIZE(global_array) + 3];
/* Only generic mode instruments globals. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
@@ -703,8 +710,9 @@ static void ksize_uaf(struct kunit *test)
static void kasan_stack_oob(struct kunit *test)
{
char stack_array[10];
- volatile int i = OOB_TAG_OFF;
- char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
+ /* See comment in kasan_global_oob. */
+ char *volatile array = stack_array;
+ char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
@@ -715,7 +723,9 @@ static void kasan_alloca_oob_left(struct kunit *test)
{
volatile int i = 10;
char alloca_array[i];
- char *p = alloca_array - 1;
+ /* See comment in kasan_global_oob. */
+ char *volatile array = alloca_array;
+ char *p = array - 1;
/* Only generic mode instruments dynamic allocas. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
@@ -728,7 +738,9 @@ static void kasan_alloca_oob_right(struct kunit *test)
{
volatile int i = 10;
char alloca_array[i];
- char *p = alloca_array + i;
+ /* See comment in kasan_global_oob. */
+ char *volatile array = alloca_array;
+ char *p = array + i;
/* Only generic mode instruments dynamic allocas. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c
index 00daaf23316f..ade7a1ea0c8e 100644
--- a/lib/test_list_sort.c
+++ b/lib/test_list_sort.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define pr_fmt(fmt) "list_sort_test: " fmt
+#include <kunit/test.h>
#include <linux/kernel.h>
#include <linux/list_sort.h>
@@ -23,68 +23,52 @@ struct debug_el {
struct list_head list;
unsigned int poison2;
int value;
- unsigned serial;
+ unsigned int serial;
};
-/* Array, containing pointers to all elements in the test list */
-static struct debug_el **elts __initdata;
-
-static int __init check(struct debug_el *ela, struct debug_el *elb)
+static void check(struct kunit *test, struct debug_el *ela, struct debug_el *elb)
{
- if (ela->serial >= TEST_LIST_LEN) {
- pr_err("error: incorrect serial %d\n", ela->serial);
- return -EINVAL;
- }
- if (elb->serial >= TEST_LIST_LEN) {
- pr_err("error: incorrect serial %d\n", elb->serial);
- return -EINVAL;
- }
- if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
- pr_err("error: phantom element\n");
- return -EINVAL;
- }
- if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
- pr_err("error: bad poison: %#x/%#x\n",
- ela->poison1, ela->poison2);
- return -EINVAL;
- }
- if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
- pr_err("error: bad poison: %#x/%#x\n",
- elb->poison1, elb->poison2);
- return -EINVAL;
- }
- return 0;
+ struct debug_el **elts = test->priv;
+
+ KUNIT_EXPECT_LT_MSG(test, ela->serial, (unsigned int)TEST_LIST_LEN, "incorrect serial");
+ KUNIT_EXPECT_LT_MSG(test, elb->serial, (unsigned int)TEST_LIST_LEN, "incorrect serial");
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, elts[ela->serial], ela, "phantom element");
+ KUNIT_EXPECT_PTR_EQ_MSG(test, elts[elb->serial], elb, "phantom element");
+
+ KUNIT_EXPECT_EQ_MSG(test, ela->poison1, TEST_POISON1, "bad poison");
+ KUNIT_EXPECT_EQ_MSG(test, ela->poison2, TEST_POISON2, "bad poison");
+
+ KUNIT_EXPECT_EQ_MSG(test, elb->poison1, TEST_POISON1, "bad poison");
+ KUNIT_EXPECT_EQ_MSG(test, elb->poison2, TEST_POISON2, "bad poison");
}
-static int __init cmp(void *priv, const struct list_head *a,
- const struct list_head *b)
+/* `priv` is the test pointer so check() can fail the test if the list is invalid. */
+static int cmp(void *priv, const struct list_head *a, const struct list_head *b)
{
struct debug_el *ela, *elb;
ela = container_of(a, struct debug_el, list);
elb = container_of(b, struct debug_el, list);
- check(ela, elb);
+ check(priv, ela, elb);
return ela->value - elb->value;
}
-static int __init list_sort_test(void)
+static void list_sort_test(struct kunit *test)
{
- int i, count = 1, err = -ENOMEM;
- struct debug_el *el;
+ int i, count = 1;
+ struct debug_el *el, **elts;
struct list_head *cur;
LIST_HEAD(head);
- pr_debug("start testing list_sort()\n");
-
- elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
- if (!elts)
- return err;
+ elts = kunit_kcalloc(test, TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elts);
+ test->priv = elts;
for (i = 0; i < TEST_LIST_LEN; i++) {
- el = kmalloc(sizeof(*el), GFP_KERNEL);
- if (!el)
- goto exit;
+ el = kunit_kmalloc(test, sizeof(*el), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, el);
/* force some equivalencies */
el->value = prandom_u32() % (TEST_LIST_LEN / 3);
@@ -95,55 +79,44 @@ static int __init list_sort_test(void)
list_add_tail(&el->list, &head);
}
- list_sort(NULL, &head, cmp);
+ list_sort(test, &head, cmp);
- err = -EINVAL;
for (cur = head.next; cur->next != &head; cur = cur->next) {
struct debug_el *el1;
int cmp_result;
- if (cur->next->prev != cur) {
- pr_err("error: list is corrupted\n");
- goto exit;
- }
+ KUNIT_ASSERT_PTR_EQ_MSG(test, cur->next->prev, cur,
+ "list is corrupted");
- cmp_result = cmp(NULL, cur, cur->next);
- if (cmp_result > 0) {
- pr_err("error: list is not sorted\n");
- goto exit;
- }
+ cmp_result = cmp(test, cur, cur->next);
+ KUNIT_ASSERT_LE_MSG(test, cmp_result, 0, "list is not sorted");
el = container_of(cur, struct debug_el, list);
el1 = container_of(cur->next, struct debug_el, list);
- if (cmp_result == 0 && el->serial >= el1->serial) {
- pr_err("error: order of equivalent elements not "
- "preserved\n");
- goto exit;
+ if (cmp_result == 0) {
+ KUNIT_ASSERT_LE_MSG(test, el->serial, el1->serial,
+ "order of equivalent elements not preserved");
}
- if (check(el, el1)) {
- pr_err("error: element check failed\n");
- goto exit;
- }
+ check(test, el, el1);
count++;
}
- if (head.prev != cur) {
- pr_err("error: list is corrupted\n");
- goto exit;
- }
+ KUNIT_EXPECT_PTR_EQ_MSG(test, head.prev, cur, "list is corrupted");
+ KUNIT_EXPECT_EQ_MSG(test, count, TEST_LIST_LEN,
+ "list length changed after sorting!");
+}
- if (count != TEST_LIST_LEN) {
- pr_err("error: bad list length %d", count);
- goto exit;
- }
+static struct kunit_case list_sort_cases[] = {
+ KUNIT_CASE(list_sort_test),
+ {}
+};
+
+static struct kunit_suite list_sort_suite = {
+ .name = "list_sort",
+ .test_cases = list_sort_cases,
+};
+
+kunit_test_suites(&list_sort_suite);
- err = 0;
-exit:
- for (i = 0; i < TEST_LIST_LEN; i++)
- kfree(elts[i]);
- kfree(elts);
- return err;
-}
-module_init(list_sort_test);
MODULE_LICENSE("GPL");
diff --git a/lib/test_printf.c b/lib/test_printf.c
index ec0d5976bb69..8ac71aee46af 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -528,6 +528,11 @@ time_and_date(void)
test("0119-00-04T15:32:23", "%ptTr", &t);
test("15:32:23|2019-01-04", "%ptTt|%ptTd", &t, &t);
test("15:32:23|0119-00-04", "%ptTtr|%ptTdr", &t, &t);
+
+ test("2019-01-04 15:32:23", "%ptTs", &t);
+ test("0119-00-04 15:32:23", "%ptTsr", &t);
+ test("15:32:23|2019-01-04", "%ptTts|%ptTds", &t, &t);
+ test("15:32:23|0119-00-04", "%ptTtrs|%ptTdrs", &t, &t);
}
static void __init
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
new file mode 100644
index 000000000000..48ff5747a4da
--- /dev/null
+++ b/lib/test_scanf.c
@@ -0,0 +1,750 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for sscanf facility.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "../tools/testing/selftests/kselftest_module.h"
+
+#define BUF_SIZE 1024
+
+KSTM_MODULE_GLOBALS();
+static char *test_buffer __initdata;
+static char *fmt_buffer __initdata;
+static struct rnd_state rnd_state __initdata;
+
+typedef int (*check_fn)(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap);
+
+static void __scanf(4, 6) __init
+_test(check_fn fn, const void *check_data, const char *string, const char *fmt,
+ int n_args, ...)
+{
+ va_list ap, ap_copy;
+ int ret;
+
+ total_tests++;
+
+ va_start(ap, n_args);
+ va_copy(ap_copy, ap);
+ ret = vsscanf(string, fmt, ap_copy);
+ va_end(ap_copy);
+
+ if (ret != n_args) {
+ pr_warn("vsscanf(\"%s\", \"%s\", ...) returned %d expected %d\n",
+ string, fmt, ret, n_args);
+ goto fail;
+ }
+
+ ret = (*fn)(check_data, string, fmt, n_args, ap);
+ if (ret)
+ goto fail;
+
+ va_end(ap);
+
+ return;
+
+fail:
+ failed_tests++;
+ va_end(ap);
+}
+
+#define _check_numbers_template(arg_fmt, expect, str, fmt, n_args, ap) \
+do { \
+ pr_debug("\"%s\", \"%s\" ->\n", str, fmt); \
+ for (; n_args > 0; n_args--, expect++) { \
+ typeof(*expect) got = *va_arg(ap, typeof(expect)); \
+ pr_debug("\t" arg_fmt "\n", got); \
+ if (got != *expect) { \
+ pr_warn("vsscanf(\"%s\", \"%s\", ...) expected " arg_fmt " got " arg_fmt "\n", \
+ str, fmt, *expect, got); \
+ return 1; \
+ } \
+ } \
+ return 0; \
+} while (0)
+
+static int __init check_ull(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned long long *pval = check_data;
+
+ _check_numbers_template("%llu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_ll(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const long long *pval = check_data;
+
+ _check_numbers_template("%lld", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_ulong(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned long *pval = check_data;
+
+ _check_numbers_template("%lu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_long(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const long *pval = check_data;
+
+ _check_numbers_template("%ld", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_uint(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned int *pval = check_data;
+
+ _check_numbers_template("%u", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_int(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const int *pval = check_data;
+
+ _check_numbers_template("%d", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_ushort(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned short *pval = check_data;
+
+ _check_numbers_template("%hu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_short(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const short *pval = check_data;
+
+ _check_numbers_template("%hd", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_uchar(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned char *pval = check_data;
+
+ _check_numbers_template("%hhu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_char(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const signed char *pval = check_data;
+
+ _check_numbers_template("%hhd", pval, string, fmt, n_args, ap);
+}
+
+/* Selection of interesting numbers to test, copied from test-kstrtox.c */
+static const unsigned long long numbers[] __initconst = {
+ 0x0ULL,
+ 0x1ULL,
+ 0x7fULL,
+ 0x80ULL,
+ 0x81ULL,
+ 0xffULL,
+ 0x100ULL,
+ 0x101ULL,
+ 0x7fffULL,
+ 0x8000ULL,
+ 0x8001ULL,
+ 0xffffULL,
+ 0x10000ULL,
+ 0x10001ULL,
+ 0x7fffffffULL,
+ 0x80000000ULL,
+ 0x80000001ULL,
+ 0xffffffffULL,
+ 0x100000000ULL,
+ 0x100000001ULL,
+ 0x7fffffffffffffffULL,
+ 0x8000000000000000ULL,
+ 0x8000000000000001ULL,
+ 0xfffffffffffffffeULL,
+ 0xffffffffffffffffULL,
+};
+
+#define value_representable_in_type(T, val) \
+(is_signed_type(T) \
+ ? ((long long)(val) >= type_min(T)) && ((long long)(val) <= type_max(T)) \
+ : ((unsigned long long)(val) <= type_max(T)))
+
+
+#define test_one_number(T, gen_fmt, scan_fmt, val, fn) \
+do { \
+ const T expect_val = (T)(val); \
+ T result = ~expect_val; /* should be overwritten */ \
+ \
+ snprintf(test_buffer, BUF_SIZE, gen_fmt, expect_val); \
+ _test(fn, &expect_val, test_buffer, "%" scan_fmt, 1, &result); \
+} while (0)
+
+#define simple_numbers_loop(T, gen_fmt, scan_fmt, fn) \
+do { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(numbers); i++) { \
+ if (value_representable_in_type(T, numbers[i])) \
+ test_one_number(T, gen_fmt, scan_fmt, \
+ numbers[i], fn); \
+ \
+ if (value_representable_in_type(T, -numbers[i])) \
+ test_one_number(T, gen_fmt, scan_fmt, \
+ -numbers[i], fn); \
+ } \
+} while (0)
+
+static void __init numbers_simple(void)
+{
+ simple_numbers_loop(unsigned long long, "%llu", "llu", check_ull);
+ simple_numbers_loop(long long, "%lld", "lld", check_ll);
+ simple_numbers_loop(long long, "%lld", "lli", check_ll);
+ simple_numbers_loop(unsigned long long, "%llx", "llx", check_ull);
+ simple_numbers_loop(long long, "%llx", "llx", check_ll);
+ simple_numbers_loop(long long, "0x%llx", "lli", check_ll);
+ simple_numbers_loop(unsigned long long, "0x%llx", "llx", check_ull);
+ simple_numbers_loop(long long, "0x%llx", "llx", check_ll);
+
+ simple_numbers_loop(unsigned long, "%lu", "lu", check_ulong);
+ simple_numbers_loop(long, "%ld", "ld", check_long);
+ simple_numbers_loop(long, "%ld", "li", check_long);
+ simple_numbers_loop(unsigned long, "%lx", "lx", check_ulong);
+ simple_numbers_loop(long, "%lx", "lx", check_long);
+ simple_numbers_loop(long, "0x%lx", "li", check_long);
+ simple_numbers_loop(unsigned long, "0x%lx", "lx", check_ulong);
+ simple_numbers_loop(long, "0x%lx", "lx", check_long);
+
+ simple_numbers_loop(unsigned int, "%u", "u", check_uint);
+ simple_numbers_loop(int, "%d", "d", check_int);
+ simple_numbers_loop(int, "%d", "i", check_int);
+ simple_numbers_loop(unsigned int, "%x", "x", check_uint);
+ simple_numbers_loop(int, "%x", "x", check_int);
+ simple_numbers_loop(int, "0x%x", "i", check_int);
+ simple_numbers_loop(unsigned int, "0x%x", "x", check_uint);
+ simple_numbers_loop(int, "0x%x", "x", check_int);
+
+ simple_numbers_loop(unsigned short, "%hu", "hu", check_ushort);
+ simple_numbers_loop(short, "%hd", "hd", check_short);
+ simple_numbers_loop(short, "%hd", "hi", check_short);
+ simple_numbers_loop(unsigned short, "%hx", "hx", check_ushort);
+ simple_numbers_loop(short, "%hx", "hx", check_short);
+ simple_numbers_loop(short, "0x%hx", "hi", check_short);
+ simple_numbers_loop(unsigned short, "0x%hx", "hx", check_ushort);
+ simple_numbers_loop(short, "0x%hx", "hx", check_short);
+
+ simple_numbers_loop(unsigned char, "%hhu", "hhu", check_uchar);
+ simple_numbers_loop(signed char, "%hhd", "hhd", check_char);
+ simple_numbers_loop(signed char, "%hhd", "hhi", check_char);
+ simple_numbers_loop(unsigned char, "%hhx", "hhx", check_uchar);
+ simple_numbers_loop(signed char, "%hhx", "hhx", check_char);
+ simple_numbers_loop(signed char, "0x%hhx", "hhi", check_char);
+ simple_numbers_loop(unsigned char, "0x%hhx", "hhx", check_uchar);
+ simple_numbers_loop(signed char, "0x%hhx", "hhx", check_char);
+}
+
+/*
+ * This gives a better variety of number "lengths" in a small sample than
+ * the raw prandom*() functions (Not mathematically rigorous!!).
+ * Variabilty of length and value is more important than perfect randomness.
+ */
+static u32 __init next_test_random(u32 max_bits)
+{
+ u32 n_bits = hweight32(prandom_u32_state(&rnd_state)) % (max_bits + 1);
+
+ return prandom_u32_state(&rnd_state) & (UINT_MAX >> (32 - n_bits));
+}
+
+static unsigned long long __init next_test_random_ull(void)
+{
+ u32 rand1 = prandom_u32_state(&rnd_state);
+ u32 n_bits = (hweight32(rand1) * 3) % 64;
+ u64 val = (u64)prandom_u32_state(&rnd_state) * rand1;
+
+ return val & (ULLONG_MAX >> (64 - n_bits));
+}
+
+#define random_for_type(T) \
+ ((T)(sizeof(T) <= sizeof(u32) \
+ ? next_test_random(BITS_PER_TYPE(T)) \
+ : next_test_random_ull()))
+
+/*
+ * Define a pattern of negative and positive numbers to ensure we get
+ * some of both within the small number of samples in a test string.
+ */
+#define NEGATIVES_PATTERN 0x3246 /* 00110010 01000110 */
+
+#define fill_random_array(arr) \
+do { \
+ unsigned int neg_pattern = NEGATIVES_PATTERN; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(arr); i++, neg_pattern >>= 1) { \
+ (arr)[i] = random_for_type(typeof((arr)[0])); \
+ if (is_signed_type(typeof((arr)[0])) && (neg_pattern & 1)) \
+ (arr)[i] = -(arr)[i]; \
+ } \
+} while (0)
+
+/*
+ * Convenience wrapper around snprintf() to append at buf_pos in buf,
+ * updating buf_pos and returning the number of characters appended.
+ * On error buf_pos is not changed and return value is 0.
+ */
+static int __init __printf(4, 5)
+append_fmt(char *buf, int *buf_pos, int buf_len, const char *val_fmt, ...)
+{
+ va_list ap;
+ int field_len;
+
+ va_start(ap, val_fmt);
+ field_len = vsnprintf(buf + *buf_pos, buf_len - *buf_pos, val_fmt, ap);
+ va_end(ap);
+
+ if (field_len < 0)
+ field_len = 0;
+
+ *buf_pos += field_len;
+
+ return field_len;
+}
+
+/*
+ * Convenience function to append the field delimiter string
+ * to both the value string and format string buffers.
+ */
+static void __init append_delim(char *str_buf, int *str_buf_pos, int str_buf_len,
+ char *fmt_buf, int *fmt_buf_pos, int fmt_buf_len,
+ const char *delim_str)
+{
+ append_fmt(str_buf, str_buf_pos, str_buf_len, delim_str);
+ append_fmt(fmt_buf, fmt_buf_pos, fmt_buf_len, delim_str);
+}
+
+#define test_array_8(fn, check_data, string, fmt, arr) \
+do { \
+ BUILD_BUG_ON(ARRAY_SIZE(arr) != 8); \
+ _test(fn, check_data, string, fmt, 8, \
+ &(arr)[0], &(arr)[1], &(arr)[2], &(arr)[3], \
+ &(arr)[4], &(arr)[5], &(arr)[6], &(arr)[7]); \
+} while (0)
+
+#define numbers_list_8(T, gen_fmt, field_sep, scan_fmt, fn) \
+do { \
+ int i, pos = 0, fmt_pos = 0; \
+ T expect[8], result[8]; \
+ \
+ fill_random_array(expect); \
+ \
+ for (i = 0; i < ARRAY_SIZE(expect); i++) { \
+ if (i != 0) \
+ append_delim(test_buffer, &pos, BUF_SIZE, \
+ fmt_buffer, &fmt_pos, BUF_SIZE, \
+ field_sep); \
+ \
+ append_fmt(test_buffer, &pos, BUF_SIZE, gen_fmt, expect[i]); \
+ append_fmt(fmt_buffer, &fmt_pos, BUF_SIZE, "%%%s", scan_fmt); \
+ } \
+ \
+ test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
+} while (0)
+
+#define numbers_list_fix_width(T, gen_fmt, field_sep, width, scan_fmt, fn) \
+do { \
+ char full_fmt[16]; \
+ \
+ snprintf(full_fmt, sizeof(full_fmt), "%u%s", width, scan_fmt); \
+ numbers_list_8(T, gen_fmt, field_sep, full_fmt, fn); \
+} while (0)
+
+#define numbers_list_val_width(T, gen_fmt, field_sep, scan_fmt, fn) \
+do { \
+ int i, val_len, pos = 0, fmt_pos = 0; \
+ T expect[8], result[8]; \
+ \
+ fill_random_array(expect); \
+ \
+ for (i = 0; i < ARRAY_SIZE(expect); i++) { \
+ if (i != 0) \
+ append_delim(test_buffer, &pos, BUF_SIZE, \
+ fmt_buffer, &fmt_pos, BUF_SIZE, field_sep);\
+ \
+ val_len = append_fmt(test_buffer, &pos, BUF_SIZE, gen_fmt, \
+ expect[i]); \
+ append_fmt(fmt_buffer, &fmt_pos, BUF_SIZE, \
+ "%%%u%s", val_len, scan_fmt); \
+ } \
+ \
+ test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
+} while (0)
+
+static void __init numbers_list(const char *delim)
+{
+ numbers_list_8(unsigned long long, "%llu", delim, "llu", check_ull);
+ numbers_list_8(long long, "%lld", delim, "lld", check_ll);
+ numbers_list_8(long long, "%lld", delim, "lli", check_ll);
+ numbers_list_8(unsigned long long, "%llx", delim, "llx", check_ull);
+ numbers_list_8(unsigned long long, "0x%llx", delim, "llx", check_ull);
+ numbers_list_8(long long, "0x%llx", delim, "lli", check_ll);
+
+ numbers_list_8(unsigned long, "%lu", delim, "lu", check_ulong);
+ numbers_list_8(long, "%ld", delim, "ld", check_long);
+ numbers_list_8(long, "%ld", delim, "li", check_long);
+ numbers_list_8(unsigned long, "%lx", delim, "lx", check_ulong);
+ numbers_list_8(unsigned long, "0x%lx", delim, "lx", check_ulong);
+ numbers_list_8(long, "0x%lx", delim, "li", check_long);
+
+ numbers_list_8(unsigned int, "%u", delim, "u", check_uint);
+ numbers_list_8(int, "%d", delim, "d", check_int);
+ numbers_list_8(int, "%d", delim, "i", check_int);
+ numbers_list_8(unsigned int, "%x", delim, "x", check_uint);
+ numbers_list_8(unsigned int, "0x%x", delim, "x", check_uint);
+ numbers_list_8(int, "0x%x", delim, "i", check_int);
+
+ numbers_list_8(unsigned short, "%hu", delim, "hu", check_ushort);
+ numbers_list_8(short, "%hd", delim, "hd", check_short);
+ numbers_list_8(short, "%hd", delim, "hi", check_short);
+ numbers_list_8(unsigned short, "%hx", delim, "hx", check_ushort);
+ numbers_list_8(unsigned short, "0x%hx", delim, "hx", check_ushort);
+ numbers_list_8(short, "0x%hx", delim, "hi", check_short);
+
+ numbers_list_8(unsigned char, "%hhu", delim, "hhu", check_uchar);
+ numbers_list_8(signed char, "%hhd", delim, "hhd", check_char);
+ numbers_list_8(signed char, "%hhd", delim, "hhi", check_char);
+ numbers_list_8(unsigned char, "%hhx", delim, "hhx", check_uchar);
+ numbers_list_8(unsigned char, "0x%hhx", delim, "hhx", check_uchar);
+ numbers_list_8(signed char, "0x%hhx", delim, "hhi", check_char);
+}
+
+/*
+ * List of numbers separated by delim. Each field width specifier is the
+ * maximum possible digits for the given type and base.
+ */
+static void __init numbers_list_field_width_typemax(const char *delim)
+{
+ numbers_list_fix_width(unsigned long long, "%llu", delim, 20, "llu", check_ull);
+ numbers_list_fix_width(long long, "%lld", delim, 20, "lld", check_ll);
+ numbers_list_fix_width(long long, "%lld", delim, 20, "lli", check_ll);
+ numbers_list_fix_width(unsigned long long, "%llx", delim, 16, "llx", check_ull);
+ numbers_list_fix_width(unsigned long long, "0x%llx", delim, 18, "llx", check_ull);
+ numbers_list_fix_width(long long, "0x%llx", delim, 18, "lli", check_ll);
+
+#if BITS_PER_LONG == 64
+ numbers_list_fix_width(unsigned long, "%lu", delim, 20, "lu", check_ulong);
+ numbers_list_fix_width(long, "%ld", delim, 20, "ld", check_long);
+ numbers_list_fix_width(long, "%ld", delim, 20, "li", check_long);
+ numbers_list_fix_width(unsigned long, "%lx", delim, 16, "lx", check_ulong);
+ numbers_list_fix_width(unsigned long, "0x%lx", delim, 18, "lx", check_ulong);
+ numbers_list_fix_width(long, "0x%lx", delim, 18, "li", check_long);
+#else
+ numbers_list_fix_width(unsigned long, "%lu", delim, 10, "lu", check_ulong);
+ numbers_list_fix_width(long, "%ld", delim, 11, "ld", check_long);
+ numbers_list_fix_width(long, "%ld", delim, 11, "li", check_long);
+ numbers_list_fix_width(unsigned long, "%lx", delim, 8, "lx", check_ulong);
+ numbers_list_fix_width(unsigned long, "0x%lx", delim, 10, "lx", check_ulong);
+ numbers_list_fix_width(long, "0x%lx", delim, 10, "li", check_long);
+#endif
+
+ numbers_list_fix_width(unsigned int, "%u", delim, 10, "u", check_uint);
+ numbers_list_fix_width(int, "%d", delim, 11, "d", check_int);
+ numbers_list_fix_width(int, "%d", delim, 11, "i", check_int);
+ numbers_list_fix_width(unsigned int, "%x", delim, 8, "x", check_uint);
+ numbers_list_fix_width(unsigned int, "0x%x", delim, 10, "x", check_uint);
+ numbers_list_fix_width(int, "0x%x", delim, 10, "i", check_int);
+
+ numbers_list_fix_width(unsigned short, "%hu", delim, 5, "hu", check_ushort);
+ numbers_list_fix_width(short, "%hd", delim, 6, "hd", check_short);
+ numbers_list_fix_width(short, "%hd", delim, 6, "hi", check_short);
+ numbers_list_fix_width(unsigned short, "%hx", delim, 4, "hx", check_ushort);
+ numbers_list_fix_width(unsigned short, "0x%hx", delim, 6, "hx", check_ushort);
+ numbers_list_fix_width(short, "0x%hx", delim, 6, "hi", check_short);
+
+ numbers_list_fix_width(unsigned char, "%hhu", delim, 3, "hhu", check_uchar);
+ numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhd", check_char);
+ numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhi", check_char);
+ numbers_list_fix_width(unsigned char, "%hhx", delim, 2, "hhx", check_uchar);
+ numbers_list_fix_width(unsigned char, "0x%hhx", delim, 4, "hhx", check_uchar);
+ numbers_list_fix_width(signed char, "0x%hhx", delim, 4, "hhi", check_char);
+}
+
+/*
+ * List of numbers separated by delim. Each field width specifier is the
+ * exact length of the corresponding value digits in the string being scanned.
+ */
+static void __init numbers_list_field_width_val_width(const char *delim)
+{
+ numbers_list_val_width(unsigned long long, "%llu", delim, "llu", check_ull);
+ numbers_list_val_width(long long, "%lld", delim, "lld", check_ll);
+ numbers_list_val_width(long long, "%lld", delim, "lli", check_ll);
+ numbers_list_val_width(unsigned long long, "%llx", delim, "llx", check_ull);
+ numbers_list_val_width(unsigned long long, "0x%llx", delim, "llx", check_ull);
+ numbers_list_val_width(long long, "0x%llx", delim, "lli", check_ll);
+
+ numbers_list_val_width(unsigned long, "%lu", delim, "lu", check_ulong);
+ numbers_list_val_width(long, "%ld", delim, "ld", check_long);
+ numbers_list_val_width(long, "%ld", delim, "li", check_long);
+ numbers_list_val_width(unsigned long, "%lx", delim, "lx", check_ulong);
+ numbers_list_val_width(unsigned long, "0x%lx", delim, "lx", check_ulong);
+ numbers_list_val_width(long, "0x%lx", delim, "li", check_long);
+
+ numbers_list_val_width(unsigned int, "%u", delim, "u", check_uint);
+ numbers_list_val_width(int, "%d", delim, "d", check_int);
+ numbers_list_val_width(int, "%d", delim, "i", check_int);
+ numbers_list_val_width(unsigned int, "%x", delim, "x", check_uint);
+ numbers_list_val_width(unsigned int, "0x%x", delim, "x", check_uint);
+ numbers_list_val_width(int, "0x%x", delim, "i", check_int);
+
+ numbers_list_val_width(unsigned short, "%hu", delim, "hu", check_ushort);
+ numbers_list_val_width(short, "%hd", delim, "hd", check_short);
+ numbers_list_val_width(short, "%hd", delim, "hi", check_short);
+ numbers_list_val_width(unsigned short, "%hx", delim, "hx", check_ushort);
+ numbers_list_val_width(unsigned short, "0x%hx", delim, "hx", check_ushort);
+ numbers_list_val_width(short, "0x%hx", delim, "hi", check_short);
+
+ numbers_list_val_width(unsigned char, "%hhu", delim, "hhu", check_uchar);
+ numbers_list_val_width(signed char, "%hhd", delim, "hhd", check_char);
+ numbers_list_val_width(signed char, "%hhd", delim, "hhi", check_char);
+ numbers_list_val_width(unsigned char, "%hhx", delim, "hhx", check_uchar);
+ numbers_list_val_width(unsigned char, "0x%hhx", delim, "hhx", check_uchar);
+ numbers_list_val_width(signed char, "0x%hhx", delim, "hhi", check_char);
+}
+
+/*
+ * Slice a continuous string of digits without field delimiters, containing
+ * numbers of varying length, using the field width to extract each group
+ * of digits. For example the hex values c0,3,bf01,303 would have a
+ * string representation of "c03bf01303" and extracted with "%2x%1x%4x%3x".
+ */
+static void __init numbers_slice(void)
+{
+ numbers_list_field_width_val_width("");
+}
+
+#define test_number_prefix(T, str, scan_fmt, expect0, expect1, n_args, fn) \
+do { \
+ const T expect[2] = { expect0, expect1 }; \
+ T result[2] = {~expect[0], ~expect[1]}; \
+ \
+ _test(fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]); \
+} while (0)
+
+/*
+ * Number prefix is >= field width.
+ * Expected behaviour is derived from testing userland sscanf.
+ */
+static void __init numbers_prefix_overflow(void)
+{
+ /*
+ * Negative decimal with a field of width 1, should quit scanning
+ * and return 0.
+ */
+ test_number_prefix(long long, "-1 1", "%1lld %lld", 0, 0, 0, check_ll);
+ test_number_prefix(long, "-1 1", "%1ld %ld", 0, 0, 0, check_long);
+ test_number_prefix(int, "-1 1", "%1d %d", 0, 0, 0, check_int);
+ test_number_prefix(short, "-1 1", "%1hd %hd", 0, 0, 0, check_short);
+ test_number_prefix(signed char, "-1 1", "%1hhd %hhd", 0, 0, 0, check_char);
+
+ test_number_prefix(long long, "-1 1", "%1lli %lli", 0, 0, 0, check_ll);
+ test_number_prefix(long, "-1 1", "%1li %li", 0, 0, 0, check_long);
+ test_number_prefix(int, "-1 1", "%1i %i", 0, 0, 0, check_int);
+ test_number_prefix(short, "-1 1", "%1hi %hi", 0, 0, 0, check_short);
+ test_number_prefix(signed char, "-1 1", "%1hhi %hhi", 0, 0, 0, check_char);
+
+ /*
+ * 0x prefix in a field of width 1: 0 is a valid digit so should
+ * convert. Next field scan starts at the 'x' which isn't a digit so
+ * scan quits with one field converted.
+ */
+ test_number_prefix(unsigned long long, "0xA7", "%1llx%llx", 0, 0, 1, check_ull);
+ test_number_prefix(unsigned long, "0xA7", "%1lx%lx", 0, 0, 1, check_ulong);
+ test_number_prefix(unsigned int, "0xA7", "%1x%x", 0, 0, 1, check_uint);
+ test_number_prefix(unsigned short, "0xA7", "%1hx%hx", 0, 0, 1, check_ushort);
+ test_number_prefix(unsigned char, "0xA7", "%1hhx%hhx", 0, 0, 1, check_uchar);
+ test_number_prefix(long long, "0xA7", "%1lli%llx", 0, 0, 1, check_ll);
+ test_number_prefix(long, "0xA7", "%1li%lx", 0, 0, 1, check_long);
+ test_number_prefix(int, "0xA7", "%1i%x", 0, 0, 1, check_int);
+ test_number_prefix(short, "0xA7", "%1hi%hx", 0, 0, 1, check_short);
+ test_number_prefix(char, "0xA7", "%1hhi%hhx", 0, 0, 1, check_char);
+
+ /*
+ * 0x prefix in a field of width 2 using %x conversion: first field
+ * converts to 0. Next field scan starts at the character after "0x".
+ * Both fields will convert.
+ */
+ test_number_prefix(unsigned long long, "0xA7", "%2llx%llx", 0, 0xa7, 2, check_ull);
+ test_number_prefix(unsigned long, "0xA7", "%2lx%lx", 0, 0xa7, 2, check_ulong);
+ test_number_prefix(unsigned int, "0xA7", "%2x%x", 0, 0xa7, 2, check_uint);
+ test_number_prefix(unsigned short, "0xA7", "%2hx%hx", 0, 0xa7, 2, check_ushort);
+ test_number_prefix(unsigned char, "0xA7", "%2hhx%hhx", 0, 0xa7, 2, check_uchar);
+
+ /*
+ * 0x prefix in a field of width 2 using %i conversion: first field
+ * converts to 0. Next field scan starts at the character after "0x",
+ * which will convert if can be intepreted as decimal but will fail
+ * if it contains any hex digits (since no 0x prefix).
+ */
+ test_number_prefix(long long, "0x67", "%2lli%lli", 0, 67, 2, check_ll);
+ test_number_prefix(long, "0x67", "%2li%li", 0, 67, 2, check_long);
+ test_number_prefix(int, "0x67", "%2i%i", 0, 67, 2, check_int);
+ test_number_prefix(short, "0x67", "%2hi%hi", 0, 67, 2, check_short);
+ test_number_prefix(char, "0x67", "%2hhi%hhi", 0, 67, 2, check_char);
+
+ test_number_prefix(long long, "0xA7", "%2lli%lli", 0, 0, 1, check_ll);
+ test_number_prefix(long, "0xA7", "%2li%li", 0, 0, 1, check_long);
+ test_number_prefix(int, "0xA7", "%2i%i", 0, 0, 1, check_int);
+ test_number_prefix(short, "0xA7", "%2hi%hi", 0, 0, 1, check_short);
+ test_number_prefix(char, "0xA7", "%2hhi%hhi", 0, 0, 1, check_char);
+}
+
+#define _test_simple_strtoxx(T, fn, gen_fmt, expect, base) \
+do { \
+ T got; \
+ char *endp; \
+ int len; \
+ bool fail = false; \
+ \
+ total_tests++; \
+ len = snprintf(test_buffer, BUF_SIZE, gen_fmt, expect); \
+ got = (fn)(test_buffer, &endp, base); \
+ pr_debug(#fn "(\"%s\", %d) -> " gen_fmt "\n", test_buffer, base, got); \
+ if (got != (expect)) { \
+ fail = true; \
+ pr_warn(#fn "(\"%s\", %d): got " gen_fmt " expected " gen_fmt "\n", \
+ test_buffer, base, got, expect); \
+ } else if (endp != test_buffer + len) { \
+ fail = true; \
+ pr_warn(#fn "(\"%s\", %d) startp=0x%px got endp=0x%px expected 0x%px\n", \
+ test_buffer, base, test_buffer, \
+ test_buffer + len, endp); \
+ } \
+ \
+ if (fail) \
+ failed_tests++; \
+} while (0)
+
+#define test_simple_strtoxx(T, fn, gen_fmt, base) \
+do { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(numbers); i++) { \
+ _test_simple_strtoxx(T, fn, gen_fmt, (T)numbers[i], base); \
+ \
+ if (is_signed_type(T)) \
+ _test_simple_strtoxx(T, fn, gen_fmt, \
+ -(T)numbers[i], base); \
+ } \
+} while (0)
+
+static void __init test_simple_strtoull(void)
+{
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 10);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 0);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llx", 16);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 16);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 0);
+}
+
+static void __init test_simple_strtoll(void)
+{
+ test_simple_strtoxx(long long, simple_strtoll, "%lld", 10);
+ test_simple_strtoxx(long long, simple_strtoll, "%lld", 0);
+ test_simple_strtoxx(long long, simple_strtoll, "%llx", 16);
+ test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 16);
+ test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 0);
+}
+
+static void __init test_simple_strtoul(void)
+{
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 10);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 0);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lx", 16);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 16);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 0);
+}
+
+static void __init test_simple_strtol(void)
+{
+ test_simple_strtoxx(long, simple_strtol, "%ld", 10);
+ test_simple_strtoxx(long, simple_strtol, "%ld", 0);
+ test_simple_strtoxx(long, simple_strtol, "%lx", 16);
+ test_simple_strtoxx(long, simple_strtol, "0x%lx", 16);
+ test_simple_strtoxx(long, simple_strtol, "0x%lx", 0);
+}
+
+/* Selection of common delimiters/separators between numbers in a string. */
+static const char * const number_delimiters[] __initconst = {
+ " ", ":", ",", "-", "/",
+};
+
+static void __init test_numbers(void)
+{
+ int i;
+
+ /* String containing only one number. */
+ numbers_simple();
+
+ /* String with multiple numbers separated by delimiter. */
+ for (i = 0; i < ARRAY_SIZE(number_delimiters); i++) {
+ numbers_list(number_delimiters[i]);
+
+ /* Field width may be longer than actual field digits. */
+ numbers_list_field_width_typemax(number_delimiters[i]);
+
+ /* Each field width exactly length of actual field digits. */
+ numbers_list_field_width_val_width(number_delimiters[i]);
+ }
+
+ /* Slice continuous sequence of digits using field widths. */
+ numbers_slice();
+
+ numbers_prefix_overflow();
+}
+
+static void __init selftest(void)
+{
+ test_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
+ if (!test_buffer)
+ return;
+
+ fmt_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
+ if (!fmt_buffer) {
+ kfree(test_buffer);
+ return;
+ }
+
+ prandom_seed_state(&rnd_state, 3141592653589793238ULL);
+
+ test_numbers();
+
+ test_simple_strtoull();
+ test_simple_strtoll();
+ test_simple_strtoul();
+ test_simple_strtol();
+
+ kfree(fmt_buffer);
+ kfree(test_buffer);
+}
+
+KSTM_MODULE_LOADERS(test_scanf);
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/test_string.c b/lib/test_string.c
index 7b31f4a505bf..9dfd6f52de92 100644
--- a/lib/test_string.c
+++ b/lib/test_string.c
@@ -179,6 +179,10 @@ static __init int strnchr_selftest(void)
return 0;
}
+static __exit void string_selftest_remove(void)
+{
+}
+
static __init int string_selftest_init(void)
{
int test, subtest;
@@ -216,4 +220,5 @@ fail:
}
module_init(string_selftest_init);
+module_exit(string_selftest_remove);
MODULE_LICENSE("GPL v2");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index f0c35d9b65bf..2926cc27623f 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -53,6 +53,31 @@
#include <linux/string_helpers.h>
#include "kstrtox.h"
+static unsigned long long simple_strntoull(const char *startp, size_t max_chars,
+ char **endp, unsigned int base)
+{
+ const char *cp;
+ unsigned long long result = 0ULL;
+ size_t prefix_chars;
+ unsigned int rv;
+
+ cp = _parse_integer_fixup_radix(startp, &base);
+ prefix_chars = cp - startp;
+ if (prefix_chars < max_chars) {
+ rv = _parse_integer_limit(cp, base, &result, max_chars - prefix_chars);
+ /* FIXME */
+ cp += (rv & ~KSTRTOX_OVERFLOW);
+ } else {
+ /* Field too short for prefix + digit, skip over without converting */
+ cp = startp + max_chars;
+ }
+
+ if (endp)
+ *endp = (char *)cp;
+
+ return result;
+}
+
/**
* simple_strtoull - convert a string to an unsigned long long
* @cp: The start of the string
@@ -61,20 +86,10 @@
*
* This function has caveats. Please use kstrtoull instead.
*/
+noinline
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
- unsigned long long result;
- unsigned int rv;
-
- cp = _parse_integer_fixup_radix(cp, &base);
- rv = _parse_integer(cp, base, &result);
- /* FIXME */
- cp += (rv & ~KSTRTOX_OVERFLOW);
-
- if (endp)
- *endp = (char *)cp;
-
- return result;
+ return simple_strntoull(cp, INT_MAX, endp, base);
}
EXPORT_SYMBOL(simple_strtoull);
@@ -109,6 +124,21 @@ long simple_strtol(const char *cp, char **endp, unsigned int base)
}
EXPORT_SYMBOL(simple_strtol);
+static long long simple_strntoll(const char *cp, size_t max_chars, char **endp,
+ unsigned int base)
+{
+ /*
+ * simple_strntoull() safely handles receiving max_chars==0 in the
+ * case cp[0] == '-' && max_chars == 1.
+ * If max_chars == 0 we can drop through and pass it to simple_strntoull()
+ * and the content of *cp is irrelevant.
+ */
+ if (*cp == '-' && max_chars > 0)
+ return -simple_strntoull(cp + 1, max_chars - 1, endp, base);
+
+ return simple_strntoull(cp, max_chars, endp, base);
+}
+
/**
* simple_strtoll - convert a string to a signed long long
* @cp: The start of the string
@@ -119,10 +149,7 @@ EXPORT_SYMBOL(simple_strtol);
*/
long long simple_strtoll(const char *cp, char **endp, unsigned int base)
{
- if (*cp == '-')
- return -simple_strtoull(cp + 1, endp, base);
-
- return simple_strtoull(cp, endp, base);
+ return simple_strntoll(cp, INT_MAX, endp, base);
}
EXPORT_SYMBOL(simple_strtoll);
@@ -1834,7 +1861,8 @@ char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
struct printf_spec spec, const char *fmt)
{
bool have_t = true, have_d = true;
- bool raw = false;
+ bool raw = false, iso8601_separator = true;
+ bool found = true;
int count = 2;
if (check_pointer(&buf, end, tm, spec))
@@ -1851,14 +1879,25 @@ char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
break;
}
- raw = fmt[count] == 'r';
+ do {
+ switch (fmt[count++]) {
+ case 'r':
+ raw = true;
+ break;
+ case 's':
+ iso8601_separator = false;
+ break;
+ default:
+ found = false;
+ break;
+ }
+ } while (found);
if (have_d)
buf = date_str(buf, end, tm, raw);
if (have_d && have_t) {
- /* Respect ISO 8601 */
if (buf < end)
- *buf = 'T';
+ *buf = iso8601_separator ? 'T' : ' ';
buf++;
}
if (have_t)
@@ -2186,7 +2225,7 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
bool no_hash_pointers __ro_after_init;
EXPORT_SYMBOL_GPL(no_hash_pointers);
-static int __init no_hash_pointers_enable(char *str)
+int __init no_hash_pointers_enable(char *str)
{
if (no_hash_pointers)
return 0;
@@ -2298,7 +2337,7 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
* - 'd[234]' For a dentry name (optionally 2-4 last components)
* - 'D[234]' Same as 'd' but for a struct file
* - 'g' For block_device name (gendisk + partition number)
- * - 't[RT][dt][r]' For time and date as represented by:
+ * - 't[RT][dt][r][s]' For time and date as represented by:
* R struct rtc_time
* T time64_t
* - 'C' For a clock, it prints the name (Common Clock Framework) or address
@@ -3565,8 +3604,12 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
str = skip_spaces(str);
digit = *str;
- if (is_sign && digit == '-')
+ if (is_sign && digit == '-') {
+ if (field_width == 1)
+ break;
+
digit = *(str + 1);
+ }
if (!digit
|| (base == 16 && !isxdigit(digit))
@@ -3576,25 +3619,13 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
break;
if (is_sign)
- val.s = qualifier != 'L' ?
- simple_strtol(str, &next, base) :
- simple_strtoll(str, &next, base);
+ val.s = simple_strntoll(str,
+ field_width >= 0 ? field_width : INT_MAX,
+ &next, base);
else
- val.u = qualifier != 'L' ?
- simple_strtoul(str, &next, base) :
- simple_strtoull(str, &next, base);
-
- if (field_width > 0 && next - str > field_width) {
- if (base == 0)
- _parse_integer_fixup_radix(str, &base);
- while (next - str > field_width) {
- if (is_sign)
- val.s = div_s64(val.s, base);
- else
- val.u = div_u64(val.u, base);
- --next;
- }
- }
+ val.u = simple_strntoull(str,
+ field_width >= 0 ? field_width : INT_MAX,
+ &next, base);
switch (qualifier) {
case 'H': /* that's 'hh' in format */
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index 72ddac6ef2ec..ef449e97d1a1 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -422,7 +422,7 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
/*
* Flush pending already filtered data to the output buffer. Return
- * immediatelly if we couldn't flush everything, or if the next
+ * immediately if we couldn't flush everything, or if the next
* filter in the chain had already returned XZ_STREAM_END.
*/
if (s->temp.filtered > 0) {
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index ca2603abee08..7a6781e3f47b 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -147,8 +147,8 @@ struct lzma_dec {
/*
* LZMA properties or related bit masks (number of literal
- * context bits, a mask dervied from the number of literal
- * position bits, and a mask dervied from the number
+ * context bits, a mask derived from the number of literal
+ * position bits, and a mask derived from the number
* position bits)
*/
uint32_t lc;
@@ -484,7 +484,7 @@ static __always_inline void rc_normalize(struct rc_dec *rc)
}
/*
- * Decode one bit. In some versions, this function has been splitted in three
+ * Decode one bit. In some versions, this function has been split in three
* functions so that the compiler is supposed to be able to more easily avoid
* an extra branch. In this particular version of the LZMA decoder, this
* doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
@@ -761,7 +761,7 @@ static bool lzma_main(struct xz_dec_lzma2 *s)
}
/*
- * Reset the LZMA decoder and range decoder state. Dictionary is nore reset
+ * Reset the LZMA decoder and range decoder state. Dictionary is not reset
* here, because LZMA state may be reset without resetting the dictionary.
*/
static void lzma_reset(struct xz_dec_lzma2 *s)
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index ed1f3df27260..f19c4fbe1be7 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -15,7 +15,7 @@ union uu {
unsigned char b[2];
};
-/* Endian independed version */
+/* Endian independent version */
static inline unsigned short
get_unaligned16(const unsigned short *p)
{
diff --git a/lib/zstd/huf.h b/lib/zstd/huf.h
index 2143da28d952..923218d12e28 100644
--- a/lib/zstd/huf.h
+++ b/lib/zstd/huf.h
@@ -134,7 +134,7 @@ typedef enum {
HUF_repeat_none, /**< Cannot use the previous table */
HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1,
4}X_repeat */
- HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */
+ HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */
} HUF_repeat;
/** HUF_compress4X_repeat() :
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.