summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-16 11:31:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-16 11:31:55 -0700
commitc4cf498dc0241fa2d758dba177634268446afb06 (patch)
tree77e86dd4f211c6681ff9fbab481295732dc23422 /lib
parent9ff9b0d392ea08090cd1780fb196f36dbb586529 (diff)
parent4d0e9df5e43dba52d38b251e3b909df8fa1110be (diff)
downloadlinux-c4cf498dc0241fa2d758dba177634268446afb06.tar.gz
linux-c4cf498dc0241fa2d758dba177634268446afb06.tar.bz2
linux-c4cf498dc0241fa2d758dba177634268446afb06.zip
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "155 patches. Subsystems affected by this patch series: mm (dax, debug, thp, readahead, page-poison, util, memory-hotplug, zram, cleanups), misc, core-kernel, get_maintainer, MAINTAINERS, lib, bitops, checkpatch, binfmt, ramfs, autofs, nilfs, rapidio, panic, relay, kgdb, ubsan, romfs, and fault-injection" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (155 commits) lib, uaccess: add failure injection to usercopy functions lib, include/linux: add usercopy failure capability ROMFS: support inode blocks calculation ubsan: introduce CONFIG_UBSAN_LOCAL_BOUNDS for Clang sched.h: drop in_ubsan field when UBSAN is in trap mode scripts/gdb/tasks: add headers and improve spacing format scripts/gdb/proc: add struct mount & struct super_block addr in lx-mounts command kernel/relay.c: drop unneeded initialization panic: dump registers on panic_on_warn rapidio: fix the missed put_device() for rio_mport_add_riodev rapidio: fix error handling path nilfs2: fix some kernel-doc warnings for nilfs2 autofs: harden ioctl table ramfs: fix nommu mmap with gaps in the page cache mm: remove the now-unnecessary mmget_still_valid() hack mm/gup: take mmap_lock in get_dump_page() binfmt_elf, binfmt_elf_fdpic: use a VMA list snapshot coredump: rework elf/elf_fdpic vma_dump_size() into common helper coredump: refactor page range dumping into common helper coredump: let dump_emit() bail out on short writes ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug7
-rw-r--r--lib/Kconfig.ubsan14
-rw-r--r--lib/Makefile1
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/crc32.c2
-rw-r--r--lib/decompress_bunzip2.c2
-rw-r--r--lib/dynamic_queue_limits.c4
-rw-r--r--lib/earlycpio.c2
-rw-r--r--lib/fault-inject-usercopy.c39
-rw-r--r--lib/find_bit.c1
-rw-r--r--lib/hexdump.c1
-rw-r--r--lib/idr.c9
-rw-r--r--lib/iov_iter.c5
-rw-r--r--lib/libcrc32c.c2
-rw-r--r--lib/math/rational.c2
-rw-r--r--lib/math/reciprocal_div.c1
-rw-r--r--lib/mpi/mpi-bit.c2
-rw-r--r--lib/percpu_counter.c2
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/strncpy_from_user.c3
-rw-r--r--lib/syscall.c2
-rw-r--r--lib/test_hmm.c2
-rw-r--r--lib/test_sysctl.c2
-rw-r--r--lib/test_xarray.c65
-rw-r--r--lib/usercopy.c5
-rw-r--r--lib/xarray.c208
27 files changed, 361 insertions, 28 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 491789a793ae..ebe5ab111e65 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1768,6 +1768,13 @@ config FAIL_PAGE_ALLOC
help
Provide fault-injection capability for alloc_pages().
+config FAULT_INJECTION_USERCOPY
+ bool "Fault injection capability for usercopy functions"
+ depends on FAULT_INJECTION
+ help
+ Provides fault-injection capability to inject failures
+ in usercopy functions (copy_from_user(), get_user(), ...).
+
config FAIL_MAKE_REQUEST
bool "Fault-injection capability for disk IO"
depends on FAULT_INJECTION && BLOCK
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 774315de555a..58f8d03d037b 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -47,6 +47,20 @@ config UBSAN_BOUNDS
to the {str,mem}*cpy() family of functions (that is addressed
by CONFIG_FORTIFY_SOURCE).
+config UBSAN_LOCAL_BOUNDS
+ bool "Perform array local bounds checking"
+ depends on UBSAN_TRAP
+ depends on CC_IS_CLANG
+ depends on !UBSAN_KCOV_BROKEN
+ help
+ This option enables -fsanitize=local-bounds which traps when an
+ exception/error is detected. Therefore, it should be enabled only
+ if trapping is expected.
+ Enabling this option detects errors due to accesses through a
+ pointer that is derived from an object of a statically-known size,
+ where an added offset (which may not be known statically) is
+ out-of-bounds.
+
config UBSAN_MISC
bool "Enable all other Undefined Behavior sanity checks"
default UBSAN
diff --git a/lib/Makefile b/lib/Makefile
index 49a2a9e36224..1c7577b2e86a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -210,6 +210,7 @@ obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
+obj-$(CONFIG_FAULT_INJECTION_USERCOPY) += fault-inject-usercopy.o
obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 61394890788e..75006c4036e9 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -23,7 +23,7 @@
/**
* DOC: bitmap introduction
*
- * bitmaps provide an array of bits, implemented using an an
+ * bitmaps provide an array of bits, implemented using an
* array of unsigned longs. The number of valid bits in a
* given bitmap does _not_ need to be an exact multiple of
* BITS_PER_LONG.
diff --git a/lib/crc32.c b/lib/crc32.c
index 35a03d03f973..2a68dfd3b96c 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -331,7 +331,7 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
return crc;
}
-#if CRC_LE_BITS == 1
+#if CRC_BE_BITS == 1
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index f9628f3924ce..c72c865032fa 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -390,7 +390,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
j = (bd->inbufBits >> bd->inbufBitCount)&
((1 << hufGroup->maxLen)-1);
got_huff_bits:
- /* Figure how how many bits are in next symbol and
+ /* Figure how many bits are in next symbol and
* unget extras */
i = hufGroup->minLen;
while (j > limit[i])
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index e659a027036e..fde0aa244148 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -60,8 +60,8 @@ void dql_completed(struct dql *dql, unsigned int count)
* A decrease is only considered if the queue has been busy in
* the whole interval (the check above).
*
- * If there is slack, the amount of execess data queued above
- * the the amount needed to prevent starvation, the queue limit
+ * If there is slack, the amount of excess data queued above
+ * the amount needed to prevent starvation, the queue limit
* can be decreased. To avoid hysteresis we consider the
* minimum amount of slack found over several iterations of the
* completion routine.
diff --git a/lib/earlycpio.c b/lib/earlycpio.c
index c001e084829e..e83628882001 100644
--- a/lib/earlycpio.c
+++ b/lib/earlycpio.c
@@ -42,7 +42,7 @@ enum cpio_fields {
/**
* cpio_data find_cpio_data - Search for files in an uncompressed cpio
* @path: The directory to search for, including a slash at the end
- * @data: Pointer to the the cpio archive or a header inside
+ * @data: Pointer to the cpio archive or a header inside
* @len: Remaining length of the cpio based on data pointer
* @nextoff: When a matching file is found, this is the offset from the
* beginning of the cpio to the beginning of the next file, not the
diff --git a/lib/fault-inject-usercopy.c b/lib/fault-inject-usercopy.c
new file mode 100644
index 000000000000..77558b6c29ca
--- /dev/null
+++ b/lib/fault-inject-usercopy.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/fault-inject.h>
+#include <linux/fault-inject-usercopy.h>
+
+static struct {
+ struct fault_attr attr;
+} fail_usercopy = {
+ .attr = FAULT_ATTR_INITIALIZER,
+};
+
+static int __init setup_fail_usercopy(char *str)
+{
+ return setup_fault_attr(&fail_usercopy.attr, str);
+}
+__setup("fail_usercopy=", setup_fail_usercopy);
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+static int __init fail_usercopy_debugfs(void)
+{
+ struct dentry *dir;
+
+ dir = fault_create_debugfs_attr("fail_usercopy", NULL,
+ &fail_usercopy.attr);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ return 0;
+}
+
+late_initcall(fail_usercopy_debugfs);
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+bool should_fail_usercopy(void)
+{
+ return should_fail(&fail_usercopy.attr, 1);
+}
+EXPORT_SYMBOL_GPL(should_fail_usercopy);
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 49f875f1baf7..4a8751010d59 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -16,6 +16,7 @@
#include <linux/bitmap.h>
#include <linux/export.h>
#include <linux/kernel.h>
+#include <linux/minmax.h>
#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
!defined(find_next_bit_le) || !defined(find_next_zero_bit_le) || \
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 147133f8eb2f..9301578f98e8 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -7,6 +7,7 @@
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/export.h>
#include <asm/unaligned.h>
diff --git a/lib/idr.c b/lib/idr.c
index c2cf2c52bbde..3fa8be43696f 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -372,7 +372,8 @@ EXPORT_SYMBOL(idr_replace);
* Allocate an ID between @min and @max, inclusive. The allocated ID will
* not exceed %INT_MAX, even if @max is larger.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
@@ -479,7 +480,8 @@ EXPORT_SYMBOL(ida_alloc_range);
* @ida: IDA handle.
* @id: Previously allocated ID.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
*/
void ida_free(struct ida *ida, unsigned int id)
{
@@ -531,7 +533,8 @@ EXPORT_SYMBOL(ida_free);
* or freed. If the IDA is already empty, there is no need to call this
* function.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
*/
void ida_destroy(struct ida *ida)
{
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 14cae2584db4..1635111c5bd2 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -2,6 +2,7 @@
#include <crypto/hash.h>
#include <linux/export.h>
#include <linux/bvec.h>
+#include <linux/fault-inject-usercopy.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
@@ -140,6 +141,8 @@
static int copyout(void __user *to, const void *from, size_t n)
{
+ if (should_fail_usercopy())
+ return n;
if (access_ok(to, n)) {
instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
@@ -149,6 +152,8 @@ static int copyout(void __user *to, const void *from, size_t n)
static int copyin(void *to, const void __user *from, size_t n)
{
+ if (should_fail_usercopy())
+ return n;
if (access_ok(from, n)) {
instrument_copy_from_user(to, from, n);
n = raw_copy_from_user(to, from, n);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 77ab839644c5..5ca0d815a95d 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -12,7 +12,7 @@
* pages = {},
* month = {June},
*}
- * Used by the iSCSI driver, possibly others, and derived from the
+ * Used by the iSCSI driver, possibly others, and derived from
* the iscsi-crc.c module of the linux-iscsi driver at
* http://linux-iscsi.sourceforge.net.
*
diff --git a/lib/math/rational.c b/lib/math/rational.c
index df75c8809693..9781d521963d 100644
--- a/lib/math/rational.c
+++ b/lib/math/rational.c
@@ -11,7 +11,7 @@
#include <linux/rational.h>
#include <linux/compiler.h>
#include <linux/export.h>
-#include <linux/kernel.h>
+#include <linux/minmax.h>
/*
* calculate best rational approximation for a given fraction
diff --git a/lib/math/reciprocal_div.c b/lib/math/reciprocal_div.c
index bf043258fa00..32436dd4171e 100644
--- a/lib/math/reciprocal_div.c
+++ b/lib/math/reciprocal_div.c
@@ -4,6 +4,7 @@
#include <asm/div64.h>
#include <linux/reciprocal_div.h>
#include <linux/export.h>
+#include <linux/minmax.h>
/*
* For a description of the algorithm please have a look at
diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c
index a5119a2bcdd4..142b680835df 100644
--- a/lib/mpi/mpi-bit.c
+++ b/lib/mpi/mpi-bit.c
@@ -1,4 +1,4 @@
-/* mpi-bit.c - MPI bit level fucntions
+/* mpi-bit.c - MPI bit level functions
* Copyright (C) 1998, 1999 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index f61689a96e85..00f666d94486 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -85,7 +85,7 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
preempt_disable();
count = __this_cpu_read(*fbc->counters) + amount;
- if (count >= batch || count <= -batch) {
+ if (abs(count) >= batch) {
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
fbc->count += count;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 8e4a3a4397f2..005ced92a4a9 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -325,7 +325,7 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
int ret = -ENOMEM;
/*
- * Nodes preloaded by one cgroup can be be used by another cgroup, so
+ * Nodes preloaded by one cgroup can be used by another cgroup, so
* they should never be accounted to any particular memory cgroup.
*/
gfp_mask &= ~__GFP_ACCOUNT;
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 5d63a8857f36..d94628fa3349 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -504,7 +504,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
nalloc++;
}
sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
- (gfp & ~GFP_DMA) | __GFP_ZERO);
+ gfp & ~GFP_DMA);
if (!sgl)
return NULL;
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 34696a348864..e6d5fcc2cdf3 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/export.h>
+#include <linux/fault-inject-usercopy.h>
#include <linux/kasan-checks.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
@@ -99,6 +100,8 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
unsigned long max_addr, src_addr;
might_fault();
+ if (should_fail_usercopy())
+ return -EFAULT;
if (unlikely(count <= 0))
return 0;
diff --git a/lib/syscall.c b/lib/syscall.c
index fb328e7ccb08..8533d2fea2d7 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -44,7 +44,7 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info
* .data.instruction_pointer - filled with user PC
*
* If @target is blocked in a system call, returns zero with @info.data.nr
- * set to the the call's number and @info.data.args filled in with its
+ * set to the call's number and @info.data.args filled in with its
* arguments. Registers not used for system call arguments may not be available
* and it is not kosher to use &struct user_regset calls while the system
* call is still in progress. Note we may get this result if @target
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index e151a7f10519..80a78877bd93 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -461,7 +461,7 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
if (!devmem)
- return -ENOMEM;
+ return false;
res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
"hmm_dmirror");
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index 98bc92a91662..3750323973f4 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -16,7 +16,7 @@
*/
/*
- * This module provides an interface to the the proc sysctl interfaces. This
+ * This module provides an interface to the proc sysctl interfaces. This
* driver requires CONFIG_PROC_SYSCTL. It will not normally be loaded by the
* system unless explicitly requested by name. You can also build this driver
* into your kernel.
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index d4f97925dbd8..8262c3f05a5d 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1503,6 +1503,49 @@ static noinline void check_store_range(struct xarray *xa)
}
}
+#ifdef CONFIG_XARRAY_MULTI
+static void check_split_1(struct xarray *xa, unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, index);
+ void *entry;
+ unsigned int i = 0;
+
+ xa_store_order(xa, index, order, xa, GFP_KERNEL);
+
+ xas_split_alloc(&xas, xa, order, GFP_KERNEL);
+ xas_lock(&xas);
+ xas_split(&xas, xa, order);
+ xas_unlock(&xas);
+
+ xa_for_each(xa, index, entry) {
+ XA_BUG_ON(xa, entry != xa);
+ i++;
+ }
+ XA_BUG_ON(xa, i != 1 << order);
+
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+
+ xa_destroy(xa);
+}
+
+static noinline void check_split(struct xarray *xa)
+{
+ unsigned int order;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
+ check_split_1(xa, 0, order);
+ check_split_1(xa, 1UL << order, order);
+ check_split_1(xa, 3UL << order, order);
+ }
+}
+#else
+static void check_split(struct xarray *xa) { }
+#endif
+
static void check_align_1(struct xarray *xa, char *name)
{
int i;
@@ -1649,6 +1692,26 @@ static noinline void check_account(struct xarray *xa)
#endif
}
+static noinline void check_get_order(struct xarray *xa)
+{
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned int order;
+ unsigned long i, j;
+
+ for (i = 0; i < 3; i++)
+ XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
+
+ for (order = 0; order < max_order; order++) {
+ for (i = 0; i < 10; i++) {
+ xa_store_order(xa, i << order, order,
+ xa_mk_index(i << order), GFP_KERNEL);
+ for (j = i << order; j < (i + 1) << order; j++)
+ XA_BUG_ON(xa, xa_get_order(xa, j) != order);
+ xa_erase(xa, i << order);
+ }
+ }
+}
+
static noinline void check_destroy(struct xarray *xa)
{
unsigned long index;
@@ -1697,6 +1760,7 @@ static int xarray_checks(void)
check_reserve(&array);
check_reserve(&xa0);
check_multi_store(&array);
+ check_get_order(&array);
check_xa_alloc();
check_find(&array);
check_find_entry(&array);
@@ -1708,6 +1772,7 @@ static int xarray_checks(void)
check_store_range(&array);
check_store_iter(&array);
check_align(&xa0);
+ check_split(&array);
check_workingset(&array, 0);
check_workingset(&array, 64);
diff --git a/lib/usercopy.c b/lib/usercopy.c
index b26509f112f9..7413dd300516 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
+#include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h>
#include <linux/uaccess.h>
@@ -10,7 +11,7 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
{
unsigned long res = n;
might_fault();
- if (likely(access_ok(from, n))) {
+ if (!should_fail_usercopy() && likely(access_ok(from, n))) {
instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n);
}
@@ -25,6 +26,8 @@ EXPORT_SYMBOL(_copy_from_user);
unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
+ if (should_fail_usercopy())
+ return n;
if (likely(access_ok(to, n))) {
instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
diff --git a/lib/xarray.c b/lib/xarray.c
index e9e641d3c0c3..b76eea7b314c 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -266,13 +266,14 @@ static void xa_node_free(struct xa_node *node)
*/
static void xas_destroy(struct xa_state *xas)
{
- struct xa_node *node = xas->xa_alloc;
+ struct xa_node *next, *node = xas->xa_alloc;
- if (!node)
- return;
- XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
- kmem_cache_free(radix_tree_node_cachep, node);
- xas->xa_alloc = NULL;
+ while (node) {
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+ next = rcu_dereference_raw(node->parent);
+ radix_tree_node_rcu_free(&node->rcu_head);
+ xas->xa_alloc = node = next;
+ }
}
/**
@@ -304,6 +305,7 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp)
xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
if (!xas->xa_alloc)
return false;
+ xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_node = XAS_RESTART;
return true;
@@ -339,6 +341,7 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
}
if (!xas->xa_alloc)
return false;
+ xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_node = XAS_RESTART;
return true;
@@ -403,7 +406,7 @@ static unsigned long xas_size(const struct xa_state *xas)
/*
* Use this to calculate the maximum index that will need to be created
* in order to add the entry described by @xas. Because we cannot store a
- * multiple-index entry at index 0, the calculation is a little more complex
+ * multi-index entry at index 0, the calculation is a little more complex
* than you might expect.
*/
static unsigned long xas_max(struct xa_state *xas)
@@ -946,6 +949,153 @@ void xas_init_marks(const struct xa_state *xas)
}
EXPORT_SYMBOL_GPL(xas_init_marks);
+#ifdef CONFIG_XARRAY_MULTI
+static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
+{
+ unsigned int marks = 0;
+ xa_mark_t mark = XA_MARK_0;
+
+ for (;;) {
+ if (node_get_mark(node, offset, mark))
+ marks |= 1 << (__force unsigned int)mark;
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+
+ return marks;
+}
+
+static void node_set_marks(struct xa_node *node, unsigned int offset,
+ struct xa_node *child, unsigned int marks)
+{
+ xa_mark_t mark = XA_MARK_0;
+
+ for (;;) {
+ if (marks & (1 << (__force unsigned int)mark)) {
+ node_set_mark(node, offset, mark);
+ if (child)
+ node_mark_all(child, mark);
+ }
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+}
+
+/**
+ * xas_split_alloc() - Allocate memory for splitting an entry.
+ * @xas: XArray operation state.
+ * @entry: New entry which will be stored in the array.
+ * @order: New entry order.
+ * @gfp: Memory allocation flags.
+ *
+ * This function should be called before calling xas_split().
+ * If necessary, it will allocate new nodes (and fill them with @entry)
+ * to prepare for the upcoming split of an entry of @order size into
+ * entries of the order stored in the @xas.
+ *
+ * Context: May sleep if @gfp flags permit.
+ */
+void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
+ gfp_t gfp)
+{
+ unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
+ unsigned int mask = xas->xa_sibs;
+
+ /* XXX: no support for splitting really large entries yet */
+ if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
+ goto nomem;
+ if (xas->xa_shift + XA_CHUNK_SHIFT > order)
+ return;
+
+ do {
+ unsigned int i;
+ void *sibling;
+ struct xa_node *node;
+
+ node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ if (!node)
+ goto nomem;
+ node->array = xas->xa;
+ for (i = 0; i < XA_CHUNK_SIZE; i++) {
+ if ((i & mask) == 0) {
+ RCU_INIT_POINTER(node->slots[i], entry);
+ sibling = xa_mk_sibling(0);
+ } else {
+ RCU_INIT_POINTER(node->slots[i], sibling);
+ }
+ }
+ RCU_INIT_POINTER(node->parent, xas->xa_alloc);
+ xas->xa_alloc = node;
+ } while (sibs-- > 0);
+
+ return;
+nomem:
+ xas_destroy(xas);
+ xas_set_err(xas, -ENOMEM);
+}
+EXPORT_SYMBOL_GPL(xas_split_alloc);
+
+/**
+ * xas_split() - Split a multi-index entry into smaller entries.
+ * @xas: XArray operation state.
+ * @entry: New entry to store in the array.
+ * @order: New entry order.
+ *
+ * The value in the entry is copied to all the replacement entries.
+ *
+ * Context: Any context. The caller should hold the xa_lock.
+ */
+void xas_split(struct xa_state *xas, void *entry, unsigned int order)
+{
+ unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
+ unsigned int offset, marks;
+ struct xa_node *node;
+ void *curr = xas_load(xas);
+ int values = 0;
+
+ node = xas->xa_node;
+ if (xas_top(node))
+ return;
+
+ marks = node_get_marks(node, xas->xa_offset);
+
+ offset = xas->xa_offset + sibs;
+ do {
+ if (xas->xa_shift < node->shift) {
+ struct xa_node *child = xas->xa_alloc;
+
+ xas->xa_alloc = rcu_dereference_raw(child->parent);
+ child->shift = node->shift - XA_CHUNK_SHIFT;
+ child->offset = offset;
+ child->count = XA_CHUNK_SIZE;
+ child->nr_values = xa_is_value(entry) ?
+ XA_CHUNK_SIZE : 0;
+ RCU_INIT_POINTER(child->parent, node);
+ node_set_marks(node, offset, child, marks);
+ rcu_assign_pointer(node->slots[offset],
+ xa_mk_node(child));
+ if (xa_is_value(curr))
+ values--;
+ } else {
+ unsigned int canon = offset - xas->xa_sibs;
+
+ node_set_marks(node, canon, NULL, marks);
+ rcu_assign_pointer(node->slots[canon], entry);
+ while (offset > canon)
+ rcu_assign_pointer(node->slots[offset--],
+ xa_mk_sibling(canon));
+ values += (xa_is_value(entry) - xa_is_value(curr)) *
+ (xas->xa_sibs + 1);
+ }
+ } while (offset-- > xas->xa_offset);
+
+ node->nr_values += values;
+}
+EXPORT_SYMBOL_GPL(xas_split);
+#endif
+
/**
* xas_pause() - Pause a walk to drop a lock.
* @xas: XArray operation state.
@@ -1407,7 +1557,7 @@ EXPORT_SYMBOL(__xa_store);
* @gfp: Memory allocation flags.
*
* After this function returns, loads from this index will return @entry.
- * Storing into an existing multislot entry updates the entry of every index.
+ * Storing into an existing multi-index entry updates the entry of every index.
* The marks associated with @index are unaffected unless @entry is %NULL.
*
* Context: Any context. Takes and releases the xa_lock.
@@ -1549,7 +1699,7 @@ static void xas_set_range(struct xa_state *xas, unsigned long first,
*
* After this function returns, loads from any index between @first and @last,
* inclusive will return @entry.
- * Storing into an existing multislot entry updates the entry of every index.
+ * Storing into an existing multi-index entry updates the entry of every index.
* The marks associated with @index are unaffected unless @entry is %NULL.
*
* Context: Process context. Takes and releases the xa_lock. May sleep
@@ -1592,6 +1742,46 @@ unlock:
return xas_result(&xas, NULL);
}
EXPORT_SYMBOL(xa_store_range);
+
+/**
+ * xa_get_order() - Get the order of an entry.
+ * @xa: XArray.
+ * @index: Index of the entry.
+ *
+ * Return: A number between 0 and 63 indicating the order of the entry.
+ */
+int xa_get_order(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ void *entry;
+ int order = 0;
+
+ rcu_read_lock();
+ entry = xas_load(&xas);
+
+ if (!entry)
+ goto unlock;
+
+ if (!xas.xa_node)
+ goto unlock;
+
+ for (;;) {
+ unsigned int slot = xas.xa_offset + (1 << order);
+
+ if (slot >= XA_CHUNK_SIZE)
+ break;
+ if (!xa_is_sibling(xas.xa_node->slots[slot]))
+ break;
+ order++;
+ }
+
+ order += xas.xa_node->shift;
+unlock:
+ rcu_read_unlock();
+
+ return order;
+}
+EXPORT_SYMBOL(xa_get_order);
#endif /* CONFIG_XARRAY_MULTI */
/**