summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig7
-rw-r--r--lib/Kconfig.debug70
-rw-r--r--lib/Kconfig.kasan31
-rw-r--r--lib/Kconfig.kcsan5
-rw-r--r--lib/Kconfig.kgdb15
-rw-r--r--lib/Kconfig.ubsan14
-rw-r--r--lib/Makefile8
-rw-r--r--lib/bitfield_kunit.c (renamed from lib/test_bitfield.c)90
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/checksum.c11
-rw-r--r--lib/crc32.c2
-rw-r--r--lib/crypto/chacha20poly1305.c4
-rw-r--r--lib/debugobjects.c55
-rw-r--r--lib/decompress_bunzip2.c2
-rw-r--r--lib/decompress_unzstd.c7
-rw-r--r--lib/devres.c20
-rw-r--r--lib/dynamic_debug.c27
-rw-r--r--lib/dynamic_queue_limits.c4
-rw-r--r--lib/earlycpio.c2
-rw-r--r--lib/fault-inject-usercopy.c39
-rw-r--r--lib/find_bit.c1
-rw-r--r--lib/fonts/Kconfig7
-rw-r--r--lib/fonts/Makefile1
-rw-r--r--lib/fonts/font_6x8.c2576
-rw-r--r--lib/fonts/fonts.c3
-rw-r--r--lib/hexdump.c1
-rw-r--r--lib/idr.c10
-rw-r--r--lib/iov_iter.c252
-rw-r--r--lib/kunit/Makefile3
-rw-r--r--lib/kunit/executor.c43
-rw-r--r--lib/kunit/test.c26
-rw-r--r--lib/libcrc32c.c2
-rw-r--r--lib/locking-selftest.c445
-rw-r--r--lib/math/rational.c2
-rw-r--r--lib/math/reciprocal_div.c1
-rw-r--r--lib/mpi/Makefile6
-rw-r--r--lib/mpi/ec.c1509
-rw-r--r--lib/mpi/mpi-add.c155
-rw-r--r--lib/mpi/mpi-bit.c253
-rw-r--r--lib/mpi/mpi-cmp.c46
-rw-r--r--lib/mpi/mpi-div.c234
-rw-r--r--lib/mpi/mpi-internal.h53
-rw-r--r--lib/mpi/mpi-inv.c143
-rw-r--r--lib/mpi/mpi-mod.c155
-rw-r--r--lib/mpi/mpi-mul.c91
-rw-r--r--lib/mpi/mpicoder.c336
-rw-r--r--lib/mpi/mpih-div.c294
-rw-r--r--lib/mpi/mpih-mul.c25
-rw-r--r--lib/mpi/mpiutil.c204
-rw-r--r--lib/nlattr.c122
-rw-r--r--lib/nmi_backtrace.c6
-rw-r--r--lib/percpu-refcount.c131
-rw-r--r--lib/percpu_counter.c6
-rw-r--r--lib/radix-tree.c3
-rw-r--r--lib/random32.c525
-rw-r--r--lib/scatterlist.c137
-rw-r--r--lib/string_helpers.c23
-rw-r--r--lib/strncpy_from_user.c3
-rw-r--r--lib/syscall.c2
-rw-r--r--lib/test_bitmap.c91
-rw-r--r--lib/test_firmware.c154
-rw-r--r--lib/test_free_pages.c42
-rw-r--r--lib/test_hmm.c65
-rw-r--r--lib/test_kasan.c728
-rw-r--r--lib/test_kasan_module.c111
-rw-r--r--lib/test_sysctl.c2
-rw-r--r--lib/test_xarray.c97
-rw-r--r--lib/usercopy.c5
-rw-r--r--lib/vsprintf.c4
-rw-r--r--lib/xarray.c233
70 files changed, 8663 insertions, 1121 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index b26721cf51bc..46806332a8cc 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -635,7 +635,12 @@ config UACCESS_MEMCPY
config ARCH_HAS_UACCESS_FLUSHCACHE
bool
-config ARCH_HAS_UACCESS_MCSAFE
+# arch has a concept of a recoverable synchronous exception due to a
+# memory-read error like x86 machine-check or ARM data-abort, and
+# implements copy_mc_to_{user,kernel} to abort and report
+# 'bytes-transferred' if that exception fires when accessing the source
+# buffer.
+config ARCH_HAS_COPY_MC
bool
# Temporary. Goes away when all archs are cleaned up
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 87410f62b501..c63fc68f9bf2 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -212,9 +212,10 @@ config DEBUG_INFO
If unsure, say N.
+if DEBUG_INFO
+
config DEBUG_INFO_REDUCED
bool "Reduce debugging information"
- depends on DEBUG_INFO
help
If you say Y here gcc is instructed to generate less debugging
information for structure types. This means that tools that
@@ -227,7 +228,6 @@ config DEBUG_INFO_REDUCED
config DEBUG_INFO_COMPRESSED
bool "Compressed debugging information"
- depends on DEBUG_INFO
depends on $(cc-option,-gz=zlib)
depends on $(ld-option,--compress-debug-sections=zlib)
help
@@ -243,7 +243,6 @@ config DEBUG_INFO_COMPRESSED
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
- depends on DEBUG_INFO
depends on $(cc-option,-gsplit-dwarf)
help
Generate debug info into separate .dwo files. This significantly
@@ -259,7 +258,6 @@ config DEBUG_INFO_SPLIT
config DEBUG_INFO_DWARF4
bool "Generate dwarf4 debuginfo"
- depends on DEBUG_INFO
depends on $(cc-option,-gdwarf-4)
help
Generate dwarf4 debug info. This requires recent versions
@@ -269,7 +267,6 @@ config DEBUG_INFO_DWARF4
config DEBUG_INFO_BTF
bool "Generate BTF typeinfo"
- depends on DEBUG_INFO
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
help
@@ -279,7 +276,6 @@ config DEBUG_INFO_BTF
config GDB_SCRIPTS
bool "Provide GDB scripts for kernel debugging"
- depends on DEBUG_INFO
help
This creates the required links to GDB helper scripts in the
build directory. If you load vmlinux into gdb, the helper
@@ -288,6 +284,8 @@ config GDB_SCRIPTS
instance. See Documentation/dev-tools/gdb-kernel-debugging.rst
for further details.
+endif # DEBUG_INFO
+
config ENABLE_MUST_CHECK
bool "Enable __must_check logic"
default y
@@ -1367,6 +1365,27 @@ config WW_MUTEX_SELFTEST
Say M if you want these self tests to build as a module.
Say N if you are unsure.
+config SCF_TORTURE_TEST
+ tristate "torture tests for smp_call_function*()"
+ depends on DEBUG_KERNEL
+ select TORTURE_TEST
+ help
+ This option provides a kernel module that runs torture tests
+ on the smp_call_function() family of primitives. The kernel
+ module may be built after the fact on the running kernel to
+ be tested, if desired.
+
+config CSD_LOCK_WAIT_DEBUG
+ bool "Debugging for csd_lock_wait(), called from smp_call_function*()"
+ depends on DEBUG_KERNEL
+ depends on 64BIT
+ default n
+ help
+ This option enables debug prints when CPUs are slow to respond
+ to the smp_call_function*() IPI wrappers. These debug prints
+ include the IPI handler function currently executing (if any)
+ and relevant stack traces.
+
endmenu # lock debugging
config TRACE_IRQFLAGS
@@ -1768,6 +1787,13 @@ config FAIL_PAGE_ALLOC
help
Provide fault-injection capability for alloc_pages().
+config FAULT_INJECTION_USERCOPY
+ bool "Fault injection capability for usercopy functions"
+ depends on FAULT_INJECTION
+ help
+ Provides fault-injection capability to inject failures
+ in usercopy functions (copy_from_user(), get_user(), ...).
+
config FAIL_MAKE_REQUEST
bool "Fault-injection capability for disk IO"
depends on FAULT_INJECTION && BLOCK
@@ -2035,13 +2061,6 @@ config TEST_BITMAP
If unsure, say N.
-config TEST_BITFIELD
- tristate "Test bitfield functions at runtime"
- help
- Enable this option to test the bitfield functions at boot.
-
- If unsure, say N.
-
config TEST_UUID
tristate "Test functions located in the uuid module at runtime"
@@ -2191,6 +2210,22 @@ config TEST_SYSCTL
If unsure, say N.
+config BITFIELD_KUNIT
+ tristate "KUnit test bitfield functions at runtime"
+ depends on KUNIT
+ help
+ Enable this option to test the bitfield functions at boot.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config SYSCTL_KUNIT_TEST
tristate "KUnit test for sysctl" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2367,6 +2402,15 @@ config TEST_HMM
If unsure, say N.
+config TEST_FREE_PAGES
+ tristate "Test freeing pages"
+ help
+ Test that a memory leak does not occur due to a race between
+ freeing a block of pages and a speculative page reference.
+ Loading this module is safe if your kernel has the bug fixed.
+ If the bug is not fixed, it will leak gigabytes of memory and
+ probably OOM your system.
+
config TEST_FPU
tristate "Test floating point operations in kernel space"
depends on X86 && !KCOV_INSTRUMENT_ALL
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 047b53dbfd58..542a9c18398e 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -54,9 +54,9 @@ config KASAN_GENERIC
Enables generic KASAN mode.
This mode is supported in both GCC and Clang. With GCC it requires
- version 8.3.0 or later. With Clang it requires version 7.0.0 or
- later, but detection of out-of-bounds accesses for global variables
- is supported only since Clang 11.
+ version 8.3.0 or later. Any supported Clang version is compatible,
+ but detection of out-of-bounds accesses for global variables is
+ supported only since Clang 11.
This mode consumes about 1/8th of available memory at kernel start
and introduces an overhead of ~x1.5 for the rest of the allocations.
@@ -78,8 +78,7 @@ config KASAN_SW_TAGS
Enables software tag-based KASAN mode.
This mode requires Top Byte Ignore support by the CPU and therefore
- is only supported for arm64. This mode requires Clang version 7.0.0
- or later.
+ is only supported for arm64. This mode requires Clang.
This mode consumes about 1/16th of available memory at kernel start
and introduces an overhead of ~20% for the rest of the allocations.
@@ -167,12 +166,24 @@ config KASAN_VMALLOC
for KASAN to detect more sorts of errors (and to support vmapped
stacks), but at the cost of higher memory usage.
-config TEST_KASAN
- tristate "Module for testing KASAN for bug detection"
- depends on m
+config KASAN_KUNIT_TEST
+ tristate "KUnit-compatible tests of KASAN bug detection capabilities" if !KUNIT_ALL_TESTS
+ depends on KASAN && KUNIT
+ default KUNIT_ALL_TESTS
help
- This is a test module doing various nasty things like
- out of bounds accesses, use after free. It is useful for testing
+ This is a KUnit test suite doing various nasty things like
+ out of bounds and use after free accesses. It is useful for testing
kernel debugging features like KASAN.
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit
+
+config TEST_KASAN_MODULE
+ tristate "KUnit-incompatible tests of KASAN bug detection capabilities"
+ depends on m && KASAN
+ help
+ This is a part of the KASAN test suite that is incompatible with
+ KUnit. Currently includes tests that do bad copy_from/to_user
+ accesses.
+
endif # KASAN
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 3d282d51849b..f271ff5fbb5a 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -40,6 +40,11 @@ menuconfig KCSAN
if KCSAN
+# Compiler capabilities that should not fail the test if they are unavailable.
+config CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
+ def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-compound-read-before-write=1)) || \
+ (CC_IS_GCC && $(cc-option,-fsanitize=thread --param tsan-compound-read-before-write=1))
+
config KCSAN_VERBOSE
bool "Show verbose reports with more information about system state"
depends on PROVE_LOCKING
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 256f2486f9bd..05dae05b6cc9 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -24,6 +24,21 @@ menuconfig KGDB
if KGDB
+config KGDB_HONOUR_BLOCKLIST
+ bool "KGDB: use kprobe blocklist to prohibit unsafe breakpoints"
+ depends on HAVE_KPROBES
+ depends on MODULES
+ select KPROBES
+ default y
+ help
+ If set to Y the debug core will use the kprobe blocklist to
+ identify symbols where it is unsafe to set breakpoints.
+ In particular this disallows instrumentation of functions
+ called during debug trap handling and thus makes it very
+ difficult to inadvertently provoke recursive trap handling.
+
+ If unsure, say Y.
+
config KGDB_SERIAL_CONSOLE
tristate "KGDB: use kgdb over the serial console"
select CONSOLE_POLL
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 774315de555a..58f8d03d037b 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -47,6 +47,20 @@ config UBSAN_BOUNDS
to the {str,mem}*cpy() family of functions (that is addressed
by CONFIG_FORTIFY_SOURCE).
+config UBSAN_LOCAL_BOUNDS
+ bool "Perform array local bounds checking"
+ depends on UBSAN_TRAP
+ depends on CC_IS_CLANG
+ depends on !UBSAN_KCOV_BROKEN
+ help
+ This option enables -fsanitize=local-bounds which traps when an
+ exception/error is detected. Therefore, it should be enabled only
+ if trapping is expected.
+ Enabling this option detects errors due to accesses through a
+ pointer that is derived from an object of a statically-known size,
+ where an added offset (which may not be known statically) is
+ out-of-bounds.
+
config UBSAN_MISC
bool "Enable all other Undefined Behavior sanity checks"
default UBSAN
diff --git a/lib/Makefile b/lib/Makefile
index 34011a11fbb3..431d7d894bf7 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -65,9 +65,11 @@ CFLAGS_test_bitops.o += -Werror
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
-obj-$(CONFIG_TEST_KASAN) += test_kasan.o
+obj-$(CONFIG_KASAN_KUNIT_TEST) += test_kasan.o
CFLAGS_test_kasan.o += -fno-builtin
CFLAGS_test_kasan.o += $(call cc-disable-warning, vla)
+obj-$(CONFIG_TEST_KASAN_MODULE) += test_kasan_module.o
+CFLAGS_test_kasan_module.o += -fno-builtin
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
UBSAN_SANITIZE_test_ubsan.o := y
@@ -85,7 +87,6 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o
-obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
@@ -99,6 +100,7 @@ obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
obj-$(CONFIG_TEST_HMM) += test_hmm.o
+obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o
#
# CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns
@@ -207,6 +209,7 @@ obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
+obj-$(CONFIG_FAULT_INJECTION_USERCOPY) += fault-inject-usercopy.o
obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
@@ -345,6 +348,7 @@ obj-$(CONFIG_OBJAGG) += objagg.o
obj-$(CONFIG_PLDMFW) += pldmfw/
# KUnit tests
+obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
obj-$(CONFIG_BITS_TEST) += test_bits.o
diff --git a/lib/test_bitfield.c b/lib/bitfield_kunit.c
index 5b8f4108662d..1473d8b4bf0f 100644
--- a/lib/test_bitfield.c
+++ b/lib/bitfield_kunit.c
@@ -5,8 +5,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
+#include <kunit/test.h>
#include <linux/bitfield.h>
#define CHECK_ENC_GET_U(tp, v, field, res) do { \
@@ -14,13 +13,11 @@
u##tp _res; \
\
_res = u##tp##_encode_bits(v, field); \
- if (_res != res) { \
- pr_warn("u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n",\
- (u64)_res); \
- return -EINVAL; \
- } \
- if (u##tp##_get_bits(_res, field) != v) \
- return -EINVAL; \
+ KUNIT_ASSERT_FALSE_MSG(context, _res != res, \
+ "u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n", \
+ (u64)_res); \
+ KUNIT_ASSERT_FALSE(context, \
+ u##tp##_get_bits(_res, field) != v); \
} \
} while (0)
@@ -29,14 +26,13 @@
__le##tp _res; \
\
_res = le##tp##_encode_bits(v, field); \
- if (_res != cpu_to_le##tp(res)) { \
- pr_warn("le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
- (u64)le##tp##_to_cpu(_res), \
- (u64)(res)); \
- return -EINVAL; \
- } \
- if (le##tp##_get_bits(_res, field) != v) \
- return -EINVAL; \
+ KUNIT_ASSERT_FALSE_MSG(context, \
+ _res != cpu_to_le##tp(res), \
+ "le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx",\
+ (u64)le##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ KUNIT_ASSERT_FALSE(context, \
+ le##tp##_get_bits(_res, field) != v);\
} \
} while (0)
@@ -45,14 +41,13 @@
__be##tp _res; \
\
_res = be##tp##_encode_bits(v, field); \
- if (_res != cpu_to_be##tp(res)) { \
- pr_warn("be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
- (u64)be##tp##_to_cpu(_res), \
- (u64)(res)); \
- return -EINVAL; \
- } \
- if (be##tp##_get_bits(_res, field) != v) \
- return -EINVAL; \
+ KUNIT_ASSERT_FALSE_MSG(context, \
+ _res != cpu_to_be##tp(res), \
+ "be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx", \
+ (u64)be##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ KUNIT_ASSERT_FALSE(context, \
+ be##tp##_get_bits(_res, field) != v);\
} \
} while (0)
@@ -62,7 +57,7 @@
CHECK_ENC_GET_BE(tp, v, field, res); \
} while (0)
-static int test_constants(void)
+static void __init test_bitfields_constants(struct kunit *context)
{
/*
* NOTE
@@ -95,19 +90,17 @@ static int test_constants(void)
CHECK_ENC_GET(64, 7, 0x00f0000000000000ull, 0x0070000000000000ull);
CHECK_ENC_GET(64, 14, 0x0f00000000000000ull, 0x0e00000000000000ull);
CHECK_ENC_GET(64, 15, 0xf000000000000000ull, 0xf000000000000000ull);
-
- return 0;
}
#define CHECK(tp, mask) do { \
u64 v; \
\
for (v = 0; v < 1 << hweight32(mask); v++) \
- if (tp##_encode_bits(v, mask) != v << __ffs64(mask)) \
- return -EINVAL; \
+ KUNIT_ASSERT_FALSE(context, \
+ tp##_encode_bits(v, mask) != v << __ffs64(mask));\
} while (0)
-static int test_variables(void)
+static void __init test_bitfields_variables(struct kunit *context)
{
CHECK(u8, 0x0f);
CHECK(u8, 0xf0);
@@ -130,39 +123,32 @@ static int test_variables(void)
CHECK(u64, 0x000000007f000000ull);
CHECK(u64, 0x0000000018000000ull);
CHECK(u64, 0x0000001f8000000ull);
-
- return 0;
}
-static int __init test_bitfields(void)
-{
- int ret = test_constants();
-
- if (ret) {
- pr_warn("constant tests failed!\n");
- return ret;
- }
-
- ret = test_variables();
- if (ret) {
- pr_warn("variable tests failed!\n");
- return ret;
- }
-
#ifdef TEST_BITFIELD_COMPILE
+static void __init test_bitfields_compile(struct kunit *context)
+{
/* these should fail compilation */
CHECK_ENC_GET(16, 16, 0x0f00, 0x1000);
u32_encode_bits(7, 0x06000000);
/* this should at least give a warning */
u16_encode_bits(0, 0x60000);
+}
#endif
- pr_info("tests passed\n");
+static struct kunit_case __refdata bitfields_test_cases[] = {
+ KUNIT_CASE(test_bitfields_constants),
+ KUNIT_CASE(test_bitfields_variables),
+ {}
+};
- return 0;
-}
-module_init(test_bitfields)
+static struct kunit_suite bitfields_test_suite = {
+ .name = "bitfields",
+ .test_cases = bitfields_test_cases,
+};
+
+kunit_test_suites(&bitfields_test_suite);
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
MODULE_LICENSE("GPL");
diff --git a/lib/bitmap.c b/lib/bitmap.c
index c13d859bc7ab..75006c4036e9 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -23,7 +23,7 @@
/**
* DOC: bitmap introduction
*
- * bitmaps provide an array of bits, implemented using an an
+ * bitmaps provide an array of bits, implemented using an
* array of unsigned longs. The number of valid bits in a
* given bitmap does _not_ need to be an exact multiple of
* BITS_PER_LONG.
@@ -552,7 +552,7 @@ static inline bool end_of_region(char c)
}
/*
- * The format allows commas and whitespases at the beginning
+ * The format allows commas and whitespaces at the beginning
* of the region.
*/
static const char *bitmap_find_region(const char *str)
diff --git a/lib/checksum.c b/lib/checksum.c
index c7861e84c526..6860d6b05a17 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -145,17 +145,6 @@ __sum16 ip_compute_csum(const void *buff, int len)
}
EXPORT_SYMBOL(ip_compute_csum);
-/*
- * copy from ds while checksumming, otherwise like csum_partial
- */
-__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
-{
- memcpy(dst, src, len);
- return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-
#ifndef csum_tcpudp_nofold
static inline u32 from64to32(u64 x)
{
diff --git a/lib/crc32.c b/lib/crc32.c
index 35a03d03f973..2a68dfd3b96c 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -331,7 +331,7 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
return crc;
}
-#if CRC_LE_BITS == 1
+#if CRC_BE_BITS == 1
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c
index 431e04280332..5850f3b87359 100644
--- a/lib/crypto/chacha20poly1305.c
+++ b/lib/crypto/chacha20poly1305.c
@@ -251,9 +251,7 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src,
poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf));
}
- flags = SG_MITER_TO_SG;
- if (!preemptible())
- flags |= SG_MITER_ATOMIC;
+ flags = SG_MITER_TO_SG | SG_MITER_ATOMIC;
sg_miter_start(&miter, src, sg_nents(src), flags);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index fe4557955d97..9e14ae02306b 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/kmemleak.h>
+#include <linux/cpu.h>
#define ODEBUG_HASH_BITS 14
#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
@@ -90,7 +91,7 @@ static int debug_objects_pool_size __read_mostly
= ODEBUG_POOL_SIZE;
static int debug_objects_pool_min_level __read_mostly
= ODEBUG_POOL_MIN_LEVEL;
-static struct debug_obj_descr *descr_test __read_mostly;
+static const struct debug_obj_descr *descr_test __read_mostly;
static struct kmem_cache *obj_cache __read_mostly;
/*
@@ -223,7 +224,7 @@ static struct debug_obj *__alloc_object(struct hlist_head *list)
* Must be called with interrupts disabled.
*/
static struct debug_obj *
-alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
+alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
{
struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
struct debug_obj *obj;
@@ -433,6 +434,25 @@ static void free_object(struct debug_obj *obj)
}
}
+#ifdef CONFIG_HOTPLUG_CPU
+static int object_cpu_offline(unsigned int cpu)
+{
+ struct debug_percpu_free *percpu_pool;
+ struct hlist_node *tmp;
+ struct debug_obj *obj;
+
+ /* Remote access is safe as the CPU is dead already */
+ percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
+ hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
+ hlist_del(&obj->node);
+ kmem_cache_free(obj_cache, obj);
+ }
+ percpu_pool->obj_free = 0;
+
+ return 0;
+}
+#endif
+
/*
* We run out of memory. That means we probably have tons of objects
* allocated.
@@ -475,7 +495,7 @@ static struct debug_bucket *get_bucket(unsigned long addr)
static void debug_print_object(struct debug_obj *obj, char *msg)
{
- struct debug_obj_descr *descr = obj->descr;
+ const struct debug_obj_descr *descr = obj->descr;
static int limit;
if (limit < 5 && descr != descr_test) {
@@ -529,7 +549,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
}
static void
-__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
+__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
{
enum debug_obj_state state;
bool check_stack = false;
@@ -587,7 +607,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_init(void *addr, struct debug_obj_descr *descr)
+void debug_object_init(void *addr, const struct debug_obj_descr *descr)
{
if (!debug_objects_enabled)
return;
@@ -602,7 +622,7 @@ EXPORT_SYMBOL_GPL(debug_object_init);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
+void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
{
if (!debug_objects_enabled)
return;
@@ -617,7 +637,7 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
* @descr: pointer to an object specific debug description structure
* Returns 0 for success, -EINVAL for check failed.
*/
-int debug_object_activate(void *addr, struct debug_obj_descr *descr)
+int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
{
enum debug_obj_state state;
struct debug_bucket *db;
@@ -695,7 +715,7 @@ EXPORT_SYMBOL_GPL(debug_object_activate);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
+void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
{
struct debug_bucket *db;
struct debug_obj *obj;
@@ -747,7 +767,7 @@ EXPORT_SYMBOL_GPL(debug_object_deactivate);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
+void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
{
enum debug_obj_state state;
struct debug_bucket *db;
@@ -797,7 +817,7 @@ EXPORT_SYMBOL_GPL(debug_object_destroy);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_free(void *addr, struct debug_obj_descr *descr)
+void debug_object_free(void *addr, const struct debug_obj_descr *descr)
{
enum debug_obj_state state;
struct debug_bucket *db;
@@ -838,7 +858,7 @@ EXPORT_SYMBOL_GPL(debug_object_free);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
+void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
{
struct debug_bucket *db;
struct debug_obj *obj;
@@ -886,7 +906,7 @@ EXPORT_SYMBOL_GPL(debug_object_assert_init);
* @next: state to move to if expected state is found
*/
void
-debug_object_active_state(void *addr, struct debug_obj_descr *descr,
+debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
unsigned int expect, unsigned int next)
{
struct debug_bucket *db;
@@ -934,7 +954,7 @@ EXPORT_SYMBOL_GPL(debug_object_active_state);
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
- struct debug_obj_descr *descr;
+ const struct debug_obj_descr *descr;
enum debug_obj_state state;
struct debug_bucket *db;
struct hlist_node *tmp;
@@ -1052,7 +1072,7 @@ struct self_test {
unsigned long dummy2[3];
};
-static __initdata struct debug_obj_descr descr_type_test;
+static __initconst const struct debug_obj_descr descr_type_test;
static bool __init is_static_object(void *addr)
{
@@ -1177,7 +1197,7 @@ out:
return res;
}
-static __initdata struct debug_obj_descr descr_type_test = {
+static __initconst const struct debug_obj_descr descr_type_test = {
.name = "selftest",
.is_static_object = is_static_object,
.fixup_init = fixup_init,
@@ -1367,6 +1387,11 @@ void __init debug_objects_mem_init(void)
} else
debug_objects_selftest();
+#ifdef CONFIG_HOTPLUG_CPU
+ cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
+ object_cpu_offline);
+#endif
+
/*
* Increase the thresholds for allocating and freeing objects
* according to the number of possible CPUs available in the system.
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index f9628f3924ce..c72c865032fa 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -390,7 +390,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
j = (bd->inbufBits >> bd->inbufBitCount)&
((1 << hufGroup->maxLen)-1);
got_huff_bits:
- /* Figure how how many bits are in next symbol and
+ /* Figure how many bits are in next symbol and
* unget extras */
i = hufGroup->minLen;
while (j > limit[i])
diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c
index 0ad2c15479ed..790abc472f5b 100644
--- a/lib/decompress_unzstd.c
+++ b/lib/decompress_unzstd.c
@@ -178,8 +178,13 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
int err;
size_t ret;
+ /*
+ * ZSTD decompression code won't be happy if the buffer size is so big
+ * that its end address overflows. When the size is not provided, make
+ * it as big as possible without having the end address overflow.
+ */
if (out_len == 0)
- out_len = LONG_MAX; /* no limit */
+ out_len = UINTPTR_MAX - (uintptr_t)out_buf;
if (fill == NULL && flush == NULL)
/*
diff --git a/lib/devres.c b/lib/devres.c
index ebb1573d9ae3..2a4ff5d64288 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -162,13 +162,15 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res,
* region and ioremaps it. All operations are managed and will be undone
* on driver detach.
*
- * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
- * on failure. Usage example:
+ * Usage example:
*
* res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
* base = devm_ioremap_resource(&pdev->dev, res);
* if (IS_ERR(base))
* return PTR_ERR(base);
+ *
+ * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure.
*/
void __iomem *devm_ioremap_resource(struct device *dev,
const struct resource *res)
@@ -183,8 +185,8 @@ EXPORT_SYMBOL(devm_ioremap_resource);
* @dev: generic device to handle the resource for
* @res: resource to be handled
*
- * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
- * on failure. Usage example:
+ * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure.
*/
void __iomem *devm_ioremap_resource_wc(struct device *dev,
const struct resource *res)
@@ -207,8 +209,8 @@ void __iomem *devm_ioremap_resource_wc(struct device *dev,
* @node: The device-tree node where the resource resides
* @index: index of the MMIO range in the "reg" property
* @size: Returns the size of the resource (pass NULL if not needed)
- * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
- * error code on failure. Usage example:
+ *
+ * Usage example:
*
* base = devm_of_iomap(&pdev->dev, node, 0, NULL);
* if (IS_ERR(base))
@@ -217,8 +219,10 @@ void __iomem *devm_ioremap_resource_wc(struct device *dev,
* Please Note: This is not a one-to-one replacement for of_iomap() because the
* of_iomap() function does not track whether the region is already mapped. If
* two drivers try to map the same memory, the of_iomap() function will succeed
- * but the the devm_of_iomap() function will return -EBUSY.
+ * but the devm_of_iomap() function will return -EBUSY.
*
+ * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded
+ * error code on failure.
*/
void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
resource_size_t *size)
@@ -256,6 +260,8 @@ static int devm_ioport_map_match(struct device *dev, void *res,
*
* Managed ioport_map(). Map is automatically unmapped on driver
* detach.
+ *
+ * Return: a pointer to the remapped memory or NULL on failure.
*/
void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
unsigned int nr)
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 2d4dfd44b0fa..bd7b3aaa93c3 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -384,10 +384,13 @@ static int ddebug_parse_query(char *words[], int nwords,
query->module = modname;
for (i = 0; i < nwords; i += 2) {
- if (!strcmp(words[i], "func")) {
- rc = check_set(&query->function, words[i+1], "func");
- } else if (!strcmp(words[i], "file")) {
- if (check_set(&query->filename, words[i+1], "file"))
+ char *keyword = words[i];
+ char *arg = words[i+1];
+
+ if (!strcmp(keyword, "func")) {
+ rc = check_set(&query->function, arg, "func");
+ } else if (!strcmp(keyword, "file")) {
+ if (check_set(&query->filename, arg, "file"))
return -EINVAL;
/* tail :$info is function or line-range */
@@ -403,18 +406,18 @@ static int ddebug_parse_query(char *words[], int nwords,
if (parse_linerange(query, fline))
return -EINVAL;
}
- } else if (!strcmp(words[i], "module")) {
- rc = check_set(&query->module, words[i+1], "module");
- } else if (!strcmp(words[i], "format")) {
- string_unescape_inplace(words[i+1], UNESCAPE_SPACE |
+ } else if (!strcmp(keyword, "module")) {
+ rc = check_set(&query->module, arg, "module");
+ } else if (!strcmp(keyword, "format")) {
+ string_unescape_inplace(arg, UNESCAPE_SPACE |
UNESCAPE_OCTAL |
UNESCAPE_SPECIAL);
- rc = check_set(&query->format, words[i+1], "format");
- } else if (!strcmp(words[i], "line")) {
- if (parse_linerange(query, words[i+1]))
+ rc = check_set(&query->format, arg, "format");
+ } else if (!strcmp(keyword, "line")) {
+ if (parse_linerange(query, arg))
return -EINVAL;
} else {
- pr_err("unknown keyword \"%s\"\n", words[i]);
+ pr_err("unknown keyword \"%s\"\n", keyword);
return -EINVAL;
}
if (rc)
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index e659a027036e..fde0aa244148 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -60,8 +60,8 @@ void dql_completed(struct dql *dql, unsigned int count)
* A decrease is only considered if the queue has been busy in
* the whole interval (the check above).
*
- * If there is slack, the amount of execess data queued above
- * the the amount needed to prevent starvation, the queue limit
+ * If there is slack, the amount of excess data queued above
+ * the amount needed to prevent starvation, the queue limit
* can be decreased. To avoid hysteresis we consider the
* minimum amount of slack found over several iterations of the
* completion routine.
diff --git a/lib/earlycpio.c b/lib/earlycpio.c
index c001e084829e..e83628882001 100644
--- a/lib/earlycpio.c
+++ b/lib/earlycpio.c
@@ -42,7 +42,7 @@ enum cpio_fields {
/**
* cpio_data find_cpio_data - Search for files in an uncompressed cpio
* @path: The directory to search for, including a slash at the end
- * @data: Pointer to the the cpio archive or a header inside
+ * @data: Pointer to the cpio archive or a header inside
* @len: Remaining length of the cpio based on data pointer
* @nextoff: When a matching file is found, this is the offset from the
* beginning of the cpio to the beginning of the next file, not the
diff --git a/lib/fault-inject-usercopy.c b/lib/fault-inject-usercopy.c
new file mode 100644
index 000000000000..77558b6c29ca
--- /dev/null
+++ b/lib/fault-inject-usercopy.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/fault-inject.h>
+#include <linux/fault-inject-usercopy.h>
+
+static struct {
+ struct fault_attr attr;
+} fail_usercopy = {
+ .attr = FAULT_ATTR_INITIALIZER,
+};
+
+static int __init setup_fail_usercopy(char *str)
+{
+ return setup_fault_attr(&fail_usercopy.attr, str);
+}
+__setup("fail_usercopy=", setup_fail_usercopy);
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+static int __init fail_usercopy_debugfs(void)
+{
+ struct dentry *dir;
+
+ dir = fault_create_debugfs_attr("fail_usercopy", NULL,
+ &fail_usercopy.attr);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ return 0;
+}
+
+late_initcall(fail_usercopy_debugfs);
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+bool should_fail_usercopy(void)
+{
+ return should_fail(&fail_usercopy.attr, 1);
+}
+EXPORT_SYMBOL_GPL(should_fail_usercopy);
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 49f875f1baf7..4a8751010d59 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -16,6 +16,7 @@
#include <linux/bitmap.h>
#include <linux/export.h>
#include <linux/kernel.h>
+#include <linux/minmax.h>
#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
!defined(find_next_bit_le) || !defined(find_next_zero_bit_le) || \
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 37baa79cdd71..c035fde66aeb 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -119,6 +119,12 @@ config FONT_TER16x32
This is the high resolution, large version for use with HiDPI screens.
If the standard font is unreadable for you, say Y, otherwise say N.
+config FONT_6x8
+ bool "OLED 6x8 font" if FONTS
+ depends on FRAMEBUFFER_CONSOLE
+ help
+ This font is useful for small displays (OLED).
+
config FONT_AUTOSELECT
def_bool y
depends on !FONT_8x8
@@ -132,6 +138,7 @@ config FONT_AUTOSELECT
depends on !FONT_SUN12x22
depends on !FONT_10x18
depends on !FONT_TER16x32
+ depends on !FONT_6x8
select FONT_8x16
endif # FONT_SUPPORT
diff --git a/lib/fonts/Makefile b/lib/fonts/Makefile
index ed95070860de..e16f68492174 100644
--- a/lib/fonts/Makefile
+++ b/lib/fonts/Makefile
@@ -15,6 +15,7 @@ font-objs-$(CONFIG_FONT_ACORN_8x8) += font_acorn_8x8.o
font-objs-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o
font-objs-$(CONFIG_FONT_6x10) += font_6x10.o
font-objs-$(CONFIG_FONT_TER16x32) += font_ter16x32.o
+font-objs-$(CONFIG_FONT_6x8) += font_6x8.o
font-objs += $(font-objs-y)
diff --git a/lib/fonts/font_6x8.c b/lib/fonts/font_6x8.c
new file mode 100644
index 000000000000..700039a9ceae
--- /dev/null
+++ b/lib/fonts/font_6x8.c
@@ -0,0 +1,2576 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/font.h>
+
+#define FONTDATAMAX 2048
+
+static struct font_data fontdata_6x8 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* 0 0x00 '^@' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 1 0x01 '^A' */
+ 0x78, /* 011110 */
+ 0x84, /* 100001 */
+ 0xCC, /* 110011 */
+ 0x84, /* 100001 */
+ 0xCC, /* 110011 */
+ 0xB4, /* 101101 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 2 0x02 '^B' */
+ 0x78, /* 011110 */
+ 0xFC, /* 111111 */
+ 0xB4, /* 101101 */
+ 0xFC, /* 111111 */
+ 0xB4, /* 101101 */
+ 0xCC, /* 110011 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 3 0x03 '^C' */
+ 0x00, /* 000000 */
+ 0x28, /* 001010 */
+ 0x7C, /* 011111 */
+ 0x7C, /* 011111 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 4 0x04 '^D' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x7C, /* 011111 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 5 0x05 '^E' */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x6C, /* 011011 */
+ 0x6C, /* 011011 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 6 0x06 '^F' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x7C, /* 011111 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 7 0x07 '^G' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x78, /* 011110 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 8 0x08 '^H' */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xCC, /* 110011 */
+ 0x84, /* 100001 */
+ 0xCC, /* 110011 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+
+ /* 9 0x09 '^I' */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x84, /* 100001 */
+ 0x48, /* 010010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 10 0x0A '^J' */
+ 0xFC, /* 111111 */
+ 0xCC, /* 110011 */
+ 0xB4, /* 101101 */
+ 0x78, /* 011110 */
+ 0xB4, /* 101101 */
+ 0xCC, /* 110011 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+
+ /* 11 0x0B '^K' */
+ 0x3C, /* 001111 */
+ 0x14, /* 000101 */
+ 0x20, /* 001000 */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 12 0x0C '^L' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 13 0x0D '^M' */
+ 0x18, /* 000110 */
+ 0x14, /* 000101 */
+ 0x14, /* 000101 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x70, /* 011100 */
+ 0x60, /* 011000 */
+ 0x00, /* 000000 */
+
+ /* 14 0x0E '^N' */
+ 0x3C, /* 001111 */
+ 0x24, /* 001001 */
+ 0x3C, /* 001111 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x6C, /* 011011 */
+ 0x6C, /* 011011 */
+ 0x00, /* 000000 */
+
+ /* 15 0x0F '^O' */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x6C, /* 011011 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 16 0x10 '^P' */
+ 0x40, /* 010000 */
+ 0x60, /* 011000 */
+ 0x70, /* 011100 */
+ 0x78, /* 011110 */
+ 0x70, /* 011100 */
+ 0x60, /* 011000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 17 0x11 '^Q' */
+ 0x04, /* 000001 */
+ 0x0C, /* 000011 */
+ 0x1C, /* 000111 */
+ 0x3C, /* 001111 */
+ 0x1C, /* 000111 */
+ 0x0C, /* 000011 */
+ 0x04, /* 000001 */
+ 0x00, /* 000000 */
+
+ /* 18 0x12 '^R' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 19 0x13 '^S' */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x00, /* 000000 */
+ 0x48, /* 010010 */
+ 0x00, /* 000000 */
+
+ /* 20 0x14 '^T' */
+ 0x3C, /* 001111 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x3C, /* 001111 */
+ 0x14, /* 000101 */
+ 0x14, /* 000101 */
+ 0x14, /* 000101 */
+ 0x00, /* 000000 */
+
+ /* 21 0x15 '^U' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x30, /* 001100 */
+ 0x28, /* 001010 */
+ 0x14, /* 000101 */
+ 0x0C, /* 000011 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+
+ /* 22 0x16 '^V' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF8, /* 111110 */
+ 0xF8, /* 111110 */
+ 0xF8, /* 111110 */
+ 0x00, /* 000000 */
+
+ /* 23 0x17 '^W' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+
+ /* 24 0x18 '^X' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 25 0x19 '^Y' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 26 0x1A '^Z' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x7C, /* 011111 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 27 0x1B '^[' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x7C, /* 011111 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 28 0x1C '^\' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 29 0x1D '^]' */
+ 0x00, /* 000000 */
+ 0x48, /* 010010 */
+ 0x84, /* 100001 */
+ 0xFC, /* 111111 */
+ 0x84, /* 100001 */
+ 0x48, /* 010010 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 30 0x1E '^^' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x7C, /* 011111 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 31 0x1F '^_' */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x7C, /* 011111 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 32 0x20 ' ' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 33 0x21 '!' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 34 0x22 '"' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 35 0x23 '#' */
+ 0x00, /* 000000 */
+ 0x28, /* 001010 */
+ 0x7C, /* 011111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x7C, /* 011111 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+
+ /* 36 0x24 '$' */
+ 0x10, /* 000000 */
+ 0x38, /* 001000 */
+ 0x40, /* 010000 */
+ 0x30, /* 001000 */
+ 0x08, /* 000000 */
+ 0x70, /* 011000 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 37 0x25 '%' */
+ 0x64, /* 011001 */
+ 0x64, /* 011001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x4C, /* 010011 */
+ 0x4C, /* 010011 */
+ 0x00, /* 000000 */
+
+ /* 38 0x26 '&' */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x50, /* 010100 */
+ 0x20, /* 001000 */
+ 0x54, /* 010101 */
+ 0x48, /* 010010 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 39 0x27 ''' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 40 0x28 '(' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+
+ /* 41 0x29 ')' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 42 0x2A '*' */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 43 0x2B '+' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 44 0x2C ',' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x30, /* 001100 */
+ 0x20, /* 001000 */
+
+ /* 45 0x2D '-' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 46 0x2E '.' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x18, /* 000110 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+
+ /* 47 0x2F '/' */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x40, /* 010000 */
+
+ /* 48 0x30 '0' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x54, /* 010101 */
+ 0x64, /* 011001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 49 0x31 '1' */
+ 0x10, /* 000100 */
+ 0x30, /* 001100 */
+ 0x50, /* 010100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 50 0x32 '2' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 51 0x33 '3' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x04, /* 000001 */
+ 0x18, /* 000110 */
+ 0x04, /* 000001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 52 0x34 '4' */
+ 0x08, /* 000010 */
+ 0x18, /* 000110 */
+ 0x28, /* 001010 */
+ 0x48, /* 010010 */
+ 0x7C, /* 011111 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+
+ /* 53 0x35 '5' */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 54 0x36 '6' */
+ 0x18, /* 000110 */
+ 0x20, /* 001000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 55 0x37 '7' */
+ 0x7C, /* 011111 */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 56 0x38 '8' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 57 0x39 '9' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+
+ /* 58 0x3A ':' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x18, /* 000110 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x18, /* 000110 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+
+ /* 59 0x3B ';' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x30, /* 001100 */
+ 0x20, /* 001000 */
+
+ /* 60 0x3C '<' */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x04, /* 000001 */
+ 0x00, /* 000000 */
+
+ /* 61 0x3D '=' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 62 0x3E '>' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 63 0x3F '?' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 64 0x40 '@' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x5C, /* 010111 */
+ 0x54, /* 010101 */
+ 0x5C, /* 010111 */
+ 0x40, /* 010000 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 65 0x41 'A' */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 66 0x42 'B' */
+ 0x78, /* 011110 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x38, /* 001110 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 67 0x43 'C' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 68 0x44 'D' */
+ 0x78, /* 011110 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 69 0x45 'E' */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 70 0x46 'F' */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 71 0x47 'G' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x5C, /* 010111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 72 0x48 'H' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 73 0x49 'I' */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 74 0x4A 'J' */
+ 0x1C, /* 000111 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+
+ /* 75 0x4B 'K' */
+ 0x44, /* 010001 */
+ 0x48, /* 010010 */
+ 0x50, /* 010100 */
+ 0x60, /* 011000 */
+ 0x50, /* 010100 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 76 0x4C 'L' */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 77 0x4D 'M' */
+ 0x44, /* 010001 */
+ 0x6C, /* 011011 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 78 0x4E 'N' */
+ 0x44, /* 010001 */
+ 0x64, /* 011001 */
+ 0x54, /* 010101 */
+ 0x4C, /* 010011 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 79 0x4F 'O' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 80 0x50 'P' */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x78, /* 011110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 81 0x51 'Q' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x54, /* 010101 */
+ 0x48, /* 010010 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 82 0x52 'R' */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x78, /* 011110 */
+ 0x50, /* 010100 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 83 0x53 'S' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 84 0x54 'T' */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 85 0x55 'U' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 86 0x56 'V' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 87 0x57 'W' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x6C, /* 011011 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 88 0x58 'X' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 89 0x59 'Y' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 90 0x5A 'Z' */
+ 0x7C, /* 011111 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x40, /* 010000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 91 0x5B '[' */
+ 0x18, /* 000110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+
+ /* 92 0x5C '\' */
+ 0x40, /* 010000 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x04, /* 000001 */
+
+ /* 93 0x5D ']' */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+
+ /* 94 0x5E '^' */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 95 0x5F '_' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+
+ /* 96 0x60 '`' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 97 0x61 'a' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 98 0x62 'b' */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x58, /* 010110 */
+ 0x64, /* 011001 */
+ 0x44, /* 010001 */
+ 0x64, /* 011001 */
+ 0x58, /* 010110 */
+ 0x00, /* 000000 */
+
+ /* 99 0x63 'c' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 100 0x64 'd' */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+ 0x34, /* 001101 */
+ 0x4C, /* 010011 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 101 0x65 'e' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 102 0x66 'f' */
+ 0x0C, /* 000011 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 103 0x67 'g' */
+ 0x00, /* 000000 */
+ 0x34, /* 001101 */
+ 0x4C, /* 010011 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x04, /* 000001 */
+ 0x38, /* 001110 */
+
+ /* 104 0x68 'h' */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 105 0x69 'i' */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 106 0x6A 'j' */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x60, /* 011000 */
+
+ /* 107 0x6B 'k' */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x48, /* 010010 */
+ 0x50, /* 010100 */
+ 0x70, /* 011100 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 108 0x6C 'l' */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 109 0x6D 'm' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x68, /* 011010 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x00, /* 000000 */
+
+ /* 110 0x6E 'n' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x58, /* 010110 */
+ 0x64, /* 011001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 111 0x6F 'o' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 112 0x70 'p' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x64, /* 011001 */
+ 0x58, /* 010110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+
+ /* 113 0x71 'q' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+
+ /* 114 0x72 'r' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x58, /* 010110 */
+ 0x64, /* 011001 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 115 0x73 's' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x40, /* 010000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 116 0x74 't' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x0C, /* 000011 */
+ 0x00, /* 000000 */
+
+ /* 117 0x75 'u' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 118 0x76 'v' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 119 0x77 'w' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+
+ /* 120 0x78 'x' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 121 0x79 'y' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x04, /* 000001 */
+ 0x38, /* 001110 */
+
+ /* 122 0x7A 'z' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 123 0x7B '{' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+
+ /* 124 0x7C '|' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 125 0x7D '}' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 126 0x7E '~' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x20, /* 001000 */
+ 0x54, /* 010101 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 127 0x7F '' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 128 0x80 '\200' */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+
+ /* 129 0x81 '\201' */
+ 0x00, /* 000000 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 130 0x82 '\202' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 131 0x83 '\203' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 132 0x84 '\204' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 133 0x85 '\205' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 134 0x86 '\206' */
+ 0x3C, /* 001111 */
+ 0x18, /* 000110 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 135 0x87 '\207' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+
+ /* 136 0x88 '\210' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 137 0x89 '\211' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 138 0x8A '\212' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 139 0x8B '\213' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 140 0x8C '\214' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 141 0x8D '\215' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 142 0x8E '\216' */
+ 0x44, /* 010001 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 143 0x8F '\217' */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 144 0x90 '\220' */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 145 0x91 '\221' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x78, /* 011110 */
+ 0x14, /* 000101 */
+ 0x7C, /* 011111 */
+ 0x50, /* 010100 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 146 0x92 '\222' */
+ 0x3C, /* 001111 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x78, /* 011110 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x5C, /* 010111 */
+ 0x00, /* 000000 */
+
+ /* 147 0x93 '\223' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 148 0x94 '\224' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 149 0x95 '\225' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 150 0x96 '\226' */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 151 0x97 '\227' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 152 0x98 '\230' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x04, /* 000001 */
+ 0x38, /* 001110 */
+
+ /* 153 0x99 '\231' */
+ 0x84, /* 100001 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 154 0x9A '\232' */
+ 0x88, /* 100010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 155 0x9B '\233' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x50, /* 010100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 156 0x9C '\234' */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x40, /* 010000 */
+ 0x70, /* 011100 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 157 0x9D '\235' */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 158 0x9E '\236' */
+ 0x70, /* 011100 */
+ 0x48, /* 010010 */
+ 0x70, /* 011100 */
+ 0x48, /* 010010 */
+ 0x5C, /* 010111 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 159 0x9F '\237' */
+ 0x0C, /* 000011 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x60, /* 011000 */
+ 0x00, /* 000000 */
+
+ /* 160 0xA0 '\240' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 161 0xA1 '\241' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 162 0xA2 '\242' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 163 0xA3 '\243' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 164 0xA4 '\244' */
+ 0x34, /* 001101 */
+ 0x58, /* 010110 */
+ 0x00, /* 000000 */
+ 0x58, /* 010110 */
+ 0x64, /* 011001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 165 0xA5 '\245' */
+ 0x58, /* 010110 */
+ 0x44, /* 010001 */
+ 0x64, /* 011001 */
+ 0x54, /* 010101 */
+ 0x4C, /* 010011 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 166 0xA6 '\246' */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 167 0xA7 '\247' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 168 0xA8 '\250' */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 169 0xA9 '\251' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 170 0xAA '\252' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 171 0xAB '\253' */
+ 0x20, /* 001000 */
+ 0x24, /* 001001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x08, /* 000010 */
+ 0x1C, /* 000111 */
+
+ /* 172 0xAC '\254' */
+ 0x20, /* 001000 */
+ 0x24, /* 001001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x58, /* 010110 */
+ 0x3C, /* 001111 */
+ 0x08, /* 000010 */
+
+ /* 173 0xAD '\255' */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 174 0xAE '\256' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x24, /* 001001 */
+ 0x48, /* 010010 */
+ 0x90, /* 100100 */
+ 0x48, /* 010010 */
+ 0x24, /* 001001 */
+ 0x00, /* 000000 */
+
+ /* 175 0xAF '\257' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x90, /* 100100 */
+ 0x48, /* 010010 */
+ 0x24, /* 001001 */
+ 0x48, /* 010010 */
+ 0x90, /* 100100 */
+ 0x00, /* 000000 */
+
+ /* 176 0xB0 '\260' */
+ 0x10, /* 000100 */
+ 0x44, /* 010001 */
+ 0x10, /* 000100 */
+ 0x44, /* 010001 */
+ 0x10, /* 000100 */
+ 0x44, /* 010001 */
+ 0x10, /* 000100 */
+ 0x44, /* 010001 */
+
+ /* 177 0xB1 '\261' */
+ 0xA8, /* 101010 */
+ 0x54, /* 010101 */
+ 0xA8, /* 101010 */
+ 0x54, /* 010101 */
+ 0xA8, /* 101010 */
+ 0x54, /* 010101 */
+ 0xA8, /* 101010 */
+ 0x54, /* 010101 */
+
+ /* 178 0xB2 '\262' */
+ 0xDC, /* 110111 */
+ 0x74, /* 011101 */
+ 0xDC, /* 110111 */
+ 0x74, /* 011101 */
+ 0xDC, /* 110111 */
+ 0x74, /* 011101 */
+ 0xDC, /* 110111 */
+ 0x74, /* 011101 */
+
+ /* 179 0xB3 '\263' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 180 0xB4 '\264' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 181 0xB5 '\265' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 182 0xB6 '\266' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xE8, /* 111010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 183 0xB7 '\267' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF8, /* 111110 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 184 0xB8 '\270' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 185 0xB9 '\271' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xE8, /* 111010 */
+ 0x08, /* 000010 */
+ 0xE8, /* 111010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 186 0xBA '\272' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 187 0xBB '\273' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF8, /* 111110 */
+ 0x08, /* 000010 */
+ 0xE8, /* 111010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 188 0xBC '\274' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xE8, /* 111010 */
+ 0x08, /* 000010 */
+ 0xF8, /* 111110 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 189 0xBD '\275' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xF8, /* 111110 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 190 0xBE '\276' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 191 0xBF '\277' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 192 0xC0 '\300' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 193 0xC1 '\301' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 194 0xC2 '\302' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 195 0xC3 '\303' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 196 0xC4 '\304' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 197 0xC5 '\305' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 198 0xC6 '\306' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 199 0xC7 '\307' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x2C, /* 001011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 200 0xC8 '\310' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x2C, /* 001011 */
+ 0x20, /* 001000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 201 0xC9 '\311' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x20, /* 001000 */
+ 0x2C, /* 001011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 202 0xCA '\312' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xEC, /* 111011 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 203 0xCB '\313' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xEC, /* 111011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 204 0xCC '\314' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x2C, /* 001011 */
+ 0x20, /* 001000 */
+ 0x2C, /* 001011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 205 0xCD '\315' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 206 0xCE '\316' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xEC, /* 111011 */
+ 0x00, /* 000000 */
+ 0xEC, /* 111011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 207 0xCF '\317' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 208 0xD0 '\320' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 209 0xD1 '\321' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 210 0xD2 '\322' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 211 0xD3 '\323' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 212 0xD4 '\324' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 213 0xD5 '\325' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 214 0xD6 '\326' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 215 0xD7 '\327' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xFC, /* 111111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 216 0xD8 '\330' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 217 0xD9 '\331' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 218 0xDA '\332' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 219 0xDB '\333' */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+
+ /* 220 0xDC '\334' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+
+ /* 221 0xDD '\335' */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+
+ /* 222 0xDE '\336' */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+
+ /* 223 0xDF '\337' */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 224 0xE0 '\340' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x34, /* 001101 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 225 0xE1 '\341' */
+ 0x24, /* 001001 */
+ 0x44, /* 010001 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x58, /* 010110 */
+ 0x40, /* 010000 */
+
+ /* 226 0xE2 '\342' */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 227 0xE3 '\343' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+
+ /* 228 0xE4 '\344' */
+ 0x7C, /* 011111 */
+ 0x24, /* 001001 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x24, /* 001001 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 229 0xE5 '\345' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+
+ /* 230 0xE6 '\346' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x74, /* 011101 */
+ 0x40, /* 010000 */
+
+ /* 231 0xE7 '\347' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x0C, /* 000011 */
+ 0x00, /* 000000 */
+
+ /* 232 0xE8 '\350' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 233 0xE9 '\351' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 234 0xEA '\352' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x6C, /* 011011 */
+ 0x00, /* 000000 */
+
+ /* 235 0xEB '\353' */
+ 0x18, /* 000110 */
+ 0x20, /* 001000 */
+ 0x18, /* 000110 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+
+ /* 236 0xEC '\354' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 237 0xED '\355' */
+ 0x00, /* 000000 */
+ 0x04, /* 000001 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 238 0xEE '\356' */
+ 0x3C, /* 001111 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x38, /* 001110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 239 0xEF '\357' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 240 0xF0 '\360' */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 241 0xF1 '\361' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 242 0xF2 '\362' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 243 0xF3 '\363' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 244 0xF4 '\364' */
+ 0x0C, /* 000011 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 245 0xF5 '\365' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x60, /* 011000 */
+
+ /* 246 0xF6 '\366' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 247 0xF7 '\367' */
+ 0x00, /* 000000 */
+ 0x20, /* 001000 */
+ 0x54, /* 010101 */
+ 0x08, /* 000010 */
+ 0x20, /* 001000 */
+ 0x54, /* 010101 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+
+ /* 248 0xF8 '\370' */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 249 0xF9 '\371' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 250 0xFA '\372' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 251 0xFB '\373' */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 252 0xFC '\374' */
+ 0x60, /* 011000 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 253 0xFD '\375' */
+ 0x60, /* 011000 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x70, /* 011100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 254 0xFE '\376' */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 255 0xFF '\377' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+} };
+
+const struct font_desc font_6x8 = {
+ .idx = FONT6x8_IDX,
+ .name = "6x8",
+ .width = 6,
+ .height = 8,
+ .data = fontdata_6x8.data,
+ .pref = 0,
+};
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index e7258d8c252b..5f4b07b56cd9 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -57,6 +57,9 @@ static const struct font_desc *fonts[] = {
#ifdef CONFIG_FONT_TER16x32
&font_ter_16x32,
#endif
+#ifdef CONFIG_FONT_6x8
+ &font_6x8,
+#endif
};
#define num_fonts ARRAY_SIZE(fonts)
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 147133f8eb2f..9301578f98e8 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -7,6 +7,7 @@
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/export.h>
#include <asm/unaligned.h>
diff --git a/lib/idr.c b/lib/idr.c
index c2cf2c52bbde..f4ab4f4aa3c7 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -372,7 +372,8 @@ EXPORT_SYMBOL(idr_replace);
* Allocate an ID between @min and @max, inclusive. The allocated ID will
* not exceed %INT_MAX, even if @max is larger.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
@@ -470,6 +471,7 @@ alloc:
goto retry;
nospc:
xas_unlock_irqrestore(&xas, flags);
+ kfree(alloc);
return -ENOSPC;
}
EXPORT_SYMBOL(ida_alloc_range);
@@ -479,7 +481,8 @@ EXPORT_SYMBOL(ida_alloc_range);
* @ida: IDA handle.
* @id: Previously allocated ID.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
*/
void ida_free(struct ida *ida, unsigned int id)
{
@@ -531,7 +534,8 @@ EXPORT_SYMBOL(ida_free);
* or freed. If the IDA is already empty, there is no need to call this
* function.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
*/
void ida_destroy(struct ida *ida)
{
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 5e40786c8f12..1635111c5bd2 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -2,11 +2,13 @@
#include <crypto/hash.h>
#include <linux/export.h>
#include <linux/bvec.h>
+#include <linux/fault-inject-usercopy.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/splice.h>
+#include <linux/compat.h>
#include <net/checksum.h>
#include <linux/scatterlist.h>
#include <linux/instrumented.h>
@@ -139,6 +141,8 @@
static int copyout(void __user *to, const void *from, size_t n)
{
+ if (should_fail_usercopy())
+ return n;
if (access_ok(to, n)) {
instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
@@ -148,6 +152,8 @@ static int copyout(void __user *to, const void *from, size_t n)
static int copyin(void *to, const void __user *from, size_t n)
{
+ if (should_fail_usercopy())
+ return n;
if (access_ok(from, n)) {
instrument_copy_from_user(to, from, n);
n = raw_copy_from_user(to, from, n);
@@ -581,7 +587,7 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
__wsum sum, size_t off)
{
- __wsum next = csum_partial_copy_nocheck(from, to, len, 0);
+ __wsum next = csum_partial_copy_nocheck(from, to, len);
return csum_block_add(sum, next, off);
}
@@ -637,30 +643,30 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
}
EXPORT_SYMBOL(_copy_to_iter);
-#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
-static int copyout_mcsafe(void __user *to, const void *from, size_t n)
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+static int copyout_mc(void __user *to, const void *from, size_t n)
{
if (access_ok(to, n)) {
instrument_copy_to_user(to, from, n);
- n = copy_to_user_mcsafe((__force void *) to, from, n);
+ n = copy_mc_to_user((__force void *) to, from, n);
}
return n;
}
-static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
+static unsigned long copy_mc_to_page(struct page *page, size_t offset,
const char *from, size_t len)
{
unsigned long ret;
char *to;
to = kmap_atomic(page);
- ret = memcpy_mcsafe(to + offset, from, len);
+ ret = copy_mc_to_kernel(to + offset, from, len);
kunmap_atomic(to);
return ret;
}
-static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
+static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
@@ -678,7 +684,7 @@ static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
unsigned long rem;
- rem = memcpy_mcsafe_to_page(pipe->bufs[i_head & p_mask].page,
+ rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
off, addr, chunk);
i->head = i_head;
i->iov_offset = off + chunk - rem;
@@ -695,18 +701,17 @@ static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
}
/**
- * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
+ * _copy_mc_to_iter - copy to iter with source memory error exception handling
* @addr: source kernel address
* @bytes: total transfer length
* @iter: destination iterator
*
- * The pmem driver arranges for filesystem-dax to use this facility via
- * dax_copy_to_iter() for protecting read/write to persistent memory.
- * Unless / until an architecture can guarantee identical performance
- * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
- * performance regression to switch more users to the mcsafe version.
+ * The pmem driver deploys this for the dax operation
+ * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
+ * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
+ * successfully copied.
*
- * Otherwise, the main differences between this and typical _copy_to_iter().
+ * The main differences between this and typical _copy_to_iter().
*
* * Typical tail/residue handling after a fault retries the copy
* byte-by-byte until the fault happens again. Re-triggering machine
@@ -717,23 +722,22 @@ static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
* * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
* Compare to copy_to_iter() where only ITER_IOVEC attempts might return
* a short copy.
- *
- * See MCSAFE_TEST for self-test.
*/
-size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
const char *from = addr;
unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
if (unlikely(iov_iter_is_pipe(i)))
- return copy_pipe_to_iter_mcsafe(addr, bytes, i);
+ return copy_mc_pipe_to_iter(addr, bytes, i);
if (iter_is_iovec(i))
might_fault();
iterate_and_advance(i, bytes, v,
- copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
+ copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
+ v.iov_len),
({
- rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len);
+ rem = copy_mc_to_page(v.bv_page, v.bv_offset,
+ (from += v.bv_len) - v.bv_len, v.bv_len);
if (rem) {
curr_addr = (unsigned long) from;
bytes = curr_addr - s_addr - rem;
@@ -741,8 +745,8 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
}
}),
({
- rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
- v.iov_len);
+ rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
+ - v.iov_len, v.iov_len);
if (rem) {
curr_addr = (unsigned long) from;
bytes = curr_addr - s_addr - rem;
@@ -753,8 +757,8 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
return bytes;
}
-EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
-#endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
+EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
+#endif /* CONFIG_ARCH_HAS_COPY_MC */
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
@@ -1449,15 +1453,14 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
return 0;
}
iterate_and_advance(i, bytes, v, ({
- int err = 0;
next = csum_and_copy_from_user(v.iov_base,
(to += v.iov_len) - v.iov_len,
- v.iov_len, 0, &err);
- if (!err) {
+ v.iov_len);
+ if (next) {
sum = csum_block_add(sum, next, off);
off += v.iov_len;
}
- err ? v.iov_len : 0;
+ next ? 0 : v.iov_len;
}), ({
char *p = kmap_atomic(v.bv_page);
sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
@@ -1491,11 +1494,10 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
if (unlikely(i->count < bytes))
return false;
iterate_all_kinds(i, bytes, v, ({
- int err = 0;
next = csum_and_copy_from_user(v.iov_base,
(to += v.iov_len) - v.iov_len,
- v.iov_len, 0, &err);
- if (err)
+ v.iov_len);
+ if (!next)
return false;
sum = csum_block_add(sum, next, off);
off += v.iov_len;
@@ -1537,15 +1539,14 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
return 0;
}
iterate_and_advance(i, bytes, v, ({
- int err = 0;
next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
v.iov_base,
- v.iov_len, 0, &err);
- if (!err) {
+ v.iov_len);
+ if (next) {
sum = csum_block_add(sum, next, off);
off += v.iov_len;
}
- err ? v.iov_len : 0;
+ next ? 0 : v.iov_len;
}), ({
char *p = kmap_atomic(v.bv_page);
sum = csum_and_memcpy(p + v.bv_offset,
@@ -1650,16 +1651,145 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
}
EXPORT_SYMBOL(dup_iter);
+static int copy_compat_iovec_from_user(struct iovec *iov,
+ const struct iovec __user *uvec, unsigned long nr_segs)
+{
+ const struct compat_iovec __user *uiov =
+ (const struct compat_iovec __user *)uvec;
+ int ret = -EFAULT, i;
+
+ if (!user_access_begin(uvec, nr_segs * sizeof(*uvec)))
+ return -EFAULT;
+
+ for (i = 0; i < nr_segs; i++) {
+ compat_uptr_t buf;
+ compat_ssize_t len;
+
+ unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
+ unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
+
+ /* check for compat_size_t not fitting in compat_ssize_t .. */
+ if (len < 0) {
+ ret = -EINVAL;
+ goto uaccess_end;
+ }
+ iov[i].iov_base = compat_ptr(buf);
+ iov[i].iov_len = len;
+ }
+
+ ret = 0;
+uaccess_end:
+ user_access_end();
+ return ret;
+}
+
+static int copy_iovec_from_user(struct iovec *iov,
+ const struct iovec __user *uvec, unsigned long nr_segs)
+{
+ unsigned long seg;
+
+ if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
+ return -EFAULT;
+ for (seg = 0; seg < nr_segs; seg++) {
+ if ((ssize_t)iov[seg].iov_len < 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct iovec *iovec_from_user(const struct iovec __user *uvec,
+ unsigned long nr_segs, unsigned long fast_segs,
+ struct iovec *fast_iov, bool compat)
+{
+ struct iovec *iov = fast_iov;
+ int ret;
+
+ /*
+ * SuS says "The readv() function *may* fail if the iovcnt argument was
+ * less than or equal to 0, or greater than {IOV_MAX}. Linux has
+ * traditionally returned zero for zero segments, so...
+ */
+ if (nr_segs == 0)
+ return iov;
+ if (nr_segs > UIO_MAXIOV)
+ return ERR_PTR(-EINVAL);
+ if (nr_segs > fast_segs) {
+ iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
+ if (!iov)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (compat)
+ ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
+ else
+ ret = copy_iovec_from_user(iov, uvec, nr_segs);
+ if (ret) {
+ if (iov != fast_iov)
+ kfree(iov);
+ return ERR_PTR(ret);
+ }
+
+ return iov;
+}
+
+ssize_t __import_iovec(int type, const struct iovec __user *uvec,
+ unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
+ struct iov_iter *i, bool compat)
+{
+ ssize_t total_len = 0;
+ unsigned long seg;
+ struct iovec *iov;
+
+ iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
+ if (IS_ERR(iov)) {
+ *iovp = NULL;
+ return PTR_ERR(iov);
+ }
+
+ /*
+ * According to the Single Unix Specification we should return EINVAL if
+ * an element length is < 0 when cast to ssize_t or if the total length
+ * would overflow the ssize_t return value of the system call.
+ *
+ * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
+ * overflow case.
+ */
+ for (seg = 0; seg < nr_segs; seg++) {
+ ssize_t len = (ssize_t)iov[seg].iov_len;
+
+ if (!access_ok(iov[seg].iov_base, len)) {
+ if (iov != *iovp)
+ kfree(iov);
+ *iovp = NULL;
+ return -EFAULT;
+ }
+
+ if (len > MAX_RW_COUNT - total_len) {
+ len = MAX_RW_COUNT - total_len;
+ iov[seg].iov_len = len;
+ }
+ total_len += len;
+ }
+
+ iov_iter_init(i, type, iov, nr_segs, total_len);
+ if (iov == *iovp)
+ *iovp = NULL;
+ else
+ *iovp = iov;
+ return total_len;
+}
+
/**
* import_iovec() - Copy an array of &struct iovec from userspace
* into the kernel, check that it is valid, and initialize a new
* &struct iov_iter iterator to access it.
*
* @type: One of %READ or %WRITE.
- * @uvector: Pointer to the userspace array.
+ * @uvec: Pointer to the userspace array.
* @nr_segs: Number of elements in userspace array.
* @fast_segs: Number of elements in @iov.
- * @iov: (input and output parameter) Pointer to pointer to (usually small
+ * @iovp: (input and output parameter) Pointer to pointer to (usually small
* on-stack) kernel array.
* @i: Pointer to iterator that will be initialized on success.
*
@@ -1672,51 +1802,15 @@ EXPORT_SYMBOL(dup_iter);
*
* Return: Negative error code on error, bytes imported on success
*/
-ssize_t import_iovec(int type, const struct iovec __user * uvector,
+ssize_t import_iovec(int type, const struct iovec __user *uvec,
unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i)
+ struct iovec **iovp, struct iov_iter *i)
{
- ssize_t n;
- struct iovec *p;
- n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
- *iov, &p);
- if (n < 0) {
- if (p != *iov)
- kfree(p);
- *iov = NULL;
- return n;
- }
- iov_iter_init(i, type, p, nr_segs, n);
- *iov = p == *iov ? NULL : p;
- return n;
+ return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
+ in_compat_syscall());
}
EXPORT_SYMBOL(import_iovec);
-#ifdef CONFIG_COMPAT
-#include <linux/compat.h>
-
-ssize_t compat_import_iovec(int type,
- const struct compat_iovec __user * uvector,
- unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i)
-{
- ssize_t n;
- struct iovec *p;
- n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
- *iov, &p);
- if (n < 0) {
- if (p != *iov)
- kfree(p);
- *iov = NULL;
- return n;
- }
- iov_iter_init(i, type, p, nr_segs, n);
- *iov = p == *iov ? NULL : p;
- return n;
-}
-EXPORT_SYMBOL(compat_import_iovec);
-#endif
-
int import_single_range(int rw, void __user *buf, size_t len,
struct iovec *iov, struct iov_iter *i)
{
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index 724b94311ca3..c49f4ffb6273 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -3,7 +3,8 @@ obj-$(CONFIG_KUNIT) += kunit.o
kunit-objs += test.o \
string-stream.o \
assert.o \
- try-catch.o
+ try-catch.o \
+ executor.o
ifeq ($(CONFIG_KUNIT_DEBUGFS),y)
kunit-objs += debugfs.o
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
new file mode 100644
index 000000000000..a95742a4ece7
--- /dev/null
+++ b/lib/kunit/executor.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test.h>
+
+/*
+ * These symbols point to the .kunit_test_suites section and are defined in
+ * include/asm-generic/vmlinux.lds.h, and consequently must be extern.
+ */
+extern struct kunit_suite * const * const __kunit_suites_start[];
+extern struct kunit_suite * const * const __kunit_suites_end[];
+
+#if IS_BUILTIN(CONFIG_KUNIT)
+
+static void kunit_print_tap_header(void)
+{
+ struct kunit_suite * const * const *suites, * const *subsuite;
+ int num_of_suites = 0;
+
+ for (suites = __kunit_suites_start;
+ suites < __kunit_suites_end;
+ suites++)
+ for (subsuite = *suites; *subsuite != NULL; subsuite++)
+ num_of_suites++;
+
+ pr_info("TAP version 14\n");
+ pr_info("1..%d\n", num_of_suites);
+}
+
+int kunit_run_all_tests(void)
+{
+ struct kunit_suite * const * const *suites;
+
+ kunit_print_tap_header();
+
+ for (suites = __kunit_suites_start;
+ suites < __kunit_suites_end;
+ suites++)
+ __kunit_test_suites_init(*suites);
+
+ return 0;
+}
+
+#endif /* IS_BUILTIN(CONFIG_KUNIT) */
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index c36037200310..750704abe89a 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -10,26 +10,12 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/sched/debug.h>
+#include <linux/sched.h>
#include "debugfs.h"
#include "string-stream.h"
#include "try-catch-impl.h"
-static void kunit_set_failure(struct kunit *test)
-{
- WRITE_ONCE(test->success, false);
-}
-
-static void kunit_print_tap_version(void)
-{
- static bool kunit_has_printed_tap_version;
-
- if (!kunit_has_printed_tap_version) {
- pr_info("TAP version 14\n");
- kunit_has_printed_tap_version = true;
- }
-}
-
/*
* Append formatted message to log, size of which is limited to
* KUNIT_LOG_SIZE bytes (including null terminating byte).
@@ -69,7 +55,6 @@ EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
static void kunit_print_subtest_start(struct kunit_suite *suite)
{
- kunit_print_tap_version();
kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
suite->name);
kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd",
@@ -288,6 +273,10 @@ static void kunit_try_run_case(void *data)
struct kunit_suite *suite = ctx->suite;
struct kunit_case *test_case = ctx->test_case;
+#if (IS_ENABLED(CONFIG_KASAN) && IS_ENABLED(CONFIG_KUNIT))
+ current->kunit_test = test;
+#endif /* IS_ENABLED(CONFIG_KASAN) && IS_ENABLED(CONFIG_KUNIT) */
+
/*
* kunit_run_case_internal may encounter a fatal error; if it does,
* abort will be called, this thread will exit, and finally the parent
@@ -381,7 +370,7 @@ static void kunit_init_suite(struct kunit_suite *suite)
kunit_debugfs_create_suite(suite);
}
-int __kunit_test_suites_init(struct kunit_suite **suites)
+int __kunit_test_suites_init(struct kunit_suite * const * const suites)
{
unsigned int i;
@@ -602,6 +591,9 @@ void kunit_cleanup(struct kunit *test)
spin_unlock(&test->lock);
kunit_remove_resource(test, res);
}
+#if (IS_ENABLED(CONFIG_KASAN) && IS_ENABLED(CONFIG_KUNIT))
+ current->kunit_test = NULL;
+#endif /* IS_ENABLED(CONFIG_KASAN) && IS_ENABLED(CONFIG_KUNIT)*/
}
EXPORT_SYMBOL_GPL(kunit_cleanup);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 77ab839644c5..5ca0d815a95d 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -12,7 +12,7 @@
* pages = {},
* month = {June},
*}
- * Used by the iSCSI driver, possibly others, and derived from the
+ * Used by the iSCSI driver, possibly others, and derived from
* the iscsi-crc.c module of the linux-iscsi driver at
* http://linux-iscsi.sourceforge.net.
*
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 14f44f59e733..a899b3f0e2e5 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -28,6 +28,7 @@
* Change this to 1 if you want to see the failure printouts:
*/
static unsigned int debug_locks_verbose;
+unsigned int force_read_lock_recursive;
static DEFINE_WD_CLASS(ww_lockdep);
@@ -399,6 +400,49 @@ static void rwsem_ABBA1(void)
* read_lock(A)
* spin_lock(B)
* spin_lock(B)
+ * write_lock(A)
+ *
+ * This test case is aimed at poking whether the chain cache prevents us from
+ * detecting a read-lock/lock-write deadlock: if the chain cache doesn't differ
+ * read/write locks, the following case may happen
+ *
+ * { read_lock(A)->lock(B) dependency exists }
+ *
+ * P0:
+ * lock(B);
+ * read_lock(A);
+ *
+ * { Not a deadlock, B -> A is added in the chain cache }
+ *
+ * P1:
+ * lock(B);
+ * write_lock(A);
+ *
+ * { B->A found in chain cache, not reported as a deadlock }
+ *
+ */
+static void rlock_chaincache_ABBA1(void)
+{
+ RL(X1);
+ L(Y1);
+ U(Y1);
+ RU(X1);
+
+ L(Y1);
+ RL(X1);
+ RU(X1);
+ U(Y1);
+
+ L(Y1);
+ WL(X1);
+ WU(X1);
+ U(Y1); // should fail
+}
+
+/*
+ * read_lock(A)
+ * spin_lock(B)
+ * spin_lock(B)
* read_lock(A)
*/
static void rlock_ABBA2(void)
@@ -991,6 +1035,133 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
#undef E3
/*
+ * write-read / write-read / write-read deadlock even if read is recursive
+ */
+
+#define E1() \
+ \
+ WL(X1); \
+ RL(Y1); \
+ RU(Y1); \
+ WU(X1);
+
+#define E2() \
+ \
+ WL(Y1); \
+ RL(Z1); \
+ RU(Z1); \
+ WU(Y1);
+
+#define E3() \
+ \
+ WL(Z1); \
+ RL(X1); \
+ RU(X1); \
+ WU(Z1);
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(W1R2_W2R3_W3R1)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * write-write / read-read / write-read deadlock even if read is recursive
+ */
+
+#define E1() \
+ \
+ WL(X1); \
+ WL(Y1); \
+ WU(Y1); \
+ WU(X1);
+
+#define E2() \
+ \
+ RL(Y1); \
+ RL(Z1); \
+ RU(Z1); \
+ RU(Y1);
+
+#define E3() \
+ \
+ WL(Z1); \
+ RL(X1); \
+ RU(X1); \
+ WU(Z1);
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(W1W2_R2R3_W3R1)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * write-write / read-read / read-write is not deadlock when read is recursive
+ */
+
+#define E1() \
+ \
+ WL(X1); \
+ WL(Y1); \
+ WU(Y1); \
+ WU(X1);
+
+#define E2() \
+ \
+ RL(Y1); \
+ RL(Z1); \
+ RU(Z1); \
+ RU(Y1);
+
+#define E3() \
+ \
+ RL(Z1); \
+ WL(X1); \
+ WU(X1); \
+ RU(Z1);
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(W1R2_R2R3_W3W1)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * write-read / read-read / write-write is not deadlock when read is recursive
+ */
+
+#define E1() \
+ \
+ WL(X1); \
+ RL(Y1); \
+ RU(Y1); \
+ WU(X1);
+
+#define E2() \
+ \
+ RL(Y1); \
+ RL(Z1); \
+ RU(Z1); \
+ RU(Y1);
+
+#define E3() \
+ \
+ WL(Z1); \
+ WL(X1); \
+ WU(X1); \
+ WU(Z1);
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(W1W2_R2R3_R3W1)
+
+#undef E1
+#undef E2
+#undef E3
+/*
* read-lock / write-lock recursion that is actually safe.
*/
@@ -1009,20 +1180,28 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
#define E3() \
\
IRQ_ENTER(); \
- RL(A); \
+ LOCK(A); \
L(B); \
U(B); \
- RU(A); \
+ UNLOCK(A); \
IRQ_EXIT();
/*
- * Generate 12 testcases:
+ * Generate 24 testcases:
*/
#include "locking-selftest-hardirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard)
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_wlock)
#include "locking-selftest-softirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock)
#undef E1
#undef E2
@@ -1036,8 +1215,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
\
IRQ_DISABLE(); \
L(B); \
- WL(A); \
- WU(A); \
+ LOCK(A); \
+ UNLOCK(A); \
U(B); \
IRQ_ENABLE();
@@ -1054,13 +1233,75 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
IRQ_EXIT();
/*
- * Generate 12 testcases:
+ * Generate 24 testcases:
*/
#include "locking-selftest-hardirq.h"
-// GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard)
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_wlock)
#include "locking-selftest-softirq.h"
-// GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft)
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_wlock)
+
+#undef E1
+#undef E2
+#undef E3
+/*
+ * read-lock / write-lock recursion that is unsafe.
+ *
+ * A is a ENABLED_*_READ lock
+ * B is a USED_IN_*_READ lock
+ *
+ * read_lock(A);
+ * write_lock(B);
+ * <interrupt>
+ * read_lock(B);
+ * write_lock(A); // if this one is read_lock(), no deadlock
+ */
+
+#define E1() \
+ \
+ IRQ_DISABLE(); \
+ WL(B); \
+ LOCK(A); \
+ UNLOCK(A); \
+ WU(B); \
+ IRQ_ENABLE();
+
+#define E2() \
+ \
+ RL(A); \
+ RU(A); \
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ RL(B); \
+ RU(B); \
+ IRQ_EXIT();
+
+/*
+ * Generate 24 testcases:
+ */
+#include "locking-selftest-hardirq.h"
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_wlock)
+
+#include "locking-selftest-softirq.h"
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
@@ -1199,6 +1440,19 @@ static inline void print_testname(const char *testname)
dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
pr_cont("\n");
+#define DO_TESTCASE_1RR(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ pr_cont(" |"); \
+ dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ pr_cont("\n");
+
+#define DO_TESTCASE_1RRB(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ pr_cont(" |"); \
+ dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ pr_cont("\n");
+
+
#define DO_TESTCASE_3(desc, name, nr) \
print_testname(desc"/"#nr); \
dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \
@@ -1213,6 +1467,25 @@ static inline void print_testname(const char *testname)
dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
pr_cont("\n");
+#define DO_TESTCASE_2RW(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ pr_cont(" |"); \
+ dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ pr_cont("\n");
+
+#define DO_TESTCASE_2x2RW(desc, name, nr) \
+ DO_TESTCASE_2RW("hard-"desc, name##_hard, nr) \
+ DO_TESTCASE_2RW("soft-"desc, name##_soft, nr) \
+
+#define DO_TESTCASE_6x2x2RW(desc, name) \
+ DO_TESTCASE_2x2RW(desc, name, 123); \
+ DO_TESTCASE_2x2RW(desc, name, 132); \
+ DO_TESTCASE_2x2RW(desc, name, 213); \
+ DO_TESTCASE_2x2RW(desc, name, 231); \
+ DO_TESTCASE_2x2RW(desc, name, 312); \
+ DO_TESTCASE_2x2RW(desc, name, 321);
+
#define DO_TESTCASE_6(desc, name) \
print_testname(desc); \
dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
@@ -1289,6 +1562,22 @@ static inline void print_testname(const char *testname)
DO_TESTCASE_2IB(desc, name, 312); \
DO_TESTCASE_2IB(desc, name, 321);
+#define DO_TESTCASE_6x1RR(desc, name) \
+ DO_TESTCASE_1RR(desc, name, 123); \
+ DO_TESTCASE_1RR(desc, name, 132); \
+ DO_TESTCASE_1RR(desc, name, 213); \
+ DO_TESTCASE_1RR(desc, name, 231); \
+ DO_TESTCASE_1RR(desc, name, 312); \
+ DO_TESTCASE_1RR(desc, name, 321);
+
+#define DO_TESTCASE_6x1RRB(desc, name) \
+ DO_TESTCASE_1RRB(desc, name, 123); \
+ DO_TESTCASE_1RRB(desc, name, 132); \
+ DO_TESTCASE_1RRB(desc, name, 213); \
+ DO_TESTCASE_1RRB(desc, name, 231); \
+ DO_TESTCASE_1RRB(desc, name, 312); \
+ DO_TESTCASE_1RRB(desc, name, 321);
+
#define DO_TESTCASE_6x6(desc, name) \
DO_TESTCASE_6I(desc, name, 123); \
DO_TESTCASE_6I(desc, name, 132); \
@@ -1966,6 +2255,108 @@ static void ww_tests(void)
pr_cont("\n");
}
+
+/*
+ * <in hardirq handler>
+ * read_lock(&A);
+ * <hardirq disable>
+ * spin_lock(&B);
+ * spin_lock(&B);
+ * read_lock(&A);
+ *
+ * is a deadlock.
+ */
+static void queued_read_lock_hardirq_RE_Er(void)
+{
+ HARDIRQ_ENTER();
+ read_lock(&rwlock_A);
+ LOCK(B);
+ UNLOCK(B);
+ read_unlock(&rwlock_A);
+ HARDIRQ_EXIT();
+
+ HARDIRQ_DISABLE();
+ LOCK(B);
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+ UNLOCK(B);
+ HARDIRQ_ENABLE();
+}
+
+/*
+ * <in hardirq handler>
+ * spin_lock(&B);
+ * <hardirq disable>
+ * read_lock(&A);
+ * read_lock(&A);
+ * spin_lock(&B);
+ *
+ * is not a deadlock.
+ */
+static void queued_read_lock_hardirq_ER_rE(void)
+{
+ HARDIRQ_ENTER();
+ LOCK(B);
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+ UNLOCK(B);
+ HARDIRQ_EXIT();
+
+ HARDIRQ_DISABLE();
+ read_lock(&rwlock_A);
+ LOCK(B);
+ UNLOCK(B);
+ read_unlock(&rwlock_A);
+ HARDIRQ_ENABLE();
+}
+
+/*
+ * <hardirq disable>
+ * spin_lock(&B);
+ * read_lock(&A);
+ * <in hardirq handler>
+ * spin_lock(&B);
+ * read_lock(&A);
+ *
+ * is a deadlock. Because the two read_lock()s are both non-recursive readers.
+ */
+static void queued_read_lock_hardirq_inversion(void)
+{
+
+ HARDIRQ_ENTER();
+ LOCK(B);
+ UNLOCK(B);
+ HARDIRQ_EXIT();
+
+ HARDIRQ_DISABLE();
+ LOCK(B);
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+ UNLOCK(B);
+ HARDIRQ_ENABLE();
+
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+}
+
+static void queued_read_lock_tests(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | queued read lock tests |\n");
+ printk(" ---------------------------\n");
+ print_testname("hardirq read-lock/lock-read");
+ dotest(queued_read_lock_hardirq_RE_Er, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont("\n");
+
+ print_testname("hardirq lock-read/read-lock");
+ dotest(queued_read_lock_hardirq_ER_rE, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont("\n");
+
+ print_testname("hardirq inversion");
+ dotest(queued_read_lock_hardirq_inversion, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont("\n");
+}
+
void locking_selftest(void)
{
/*
@@ -1979,6 +2370,11 @@ void locking_selftest(void)
}
/*
+ * treats read_lock() as recursive read locks for testing purpose
+ */
+ force_read_lock_recursive = 1;
+
+ /*
* Run the testsuite:
*/
printk("------------------------\n");
@@ -2033,14 +2429,6 @@ void locking_selftest(void)
print_testname("mixed read-lock/lock-write ABBA");
pr_cont(" |");
dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
-#ifdef CONFIG_PROVE_LOCKING
- /*
- * Lockdep does indeed fail here, but there's nothing we can do about
- * that now. Don't kill lockdep for it.
- */
- unexpected_testcase_failures--;
-#endif
-
pr_cont(" |");
dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
@@ -2056,6 +2444,15 @@ void locking_selftest(void)
pr_cont(" |");
dotest(rwsem_ABBA3, FAILURE, LOCKTYPE_RWSEM);
+ print_testname("chain cached mixed R-L/L-W ABBA");
+ pr_cont(" |");
+ dotest(rlock_chaincache_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
+
+ DO_TESTCASE_6x1RRB("rlock W1R2/W2R3/W3R1", W1R2_W2R3_W3R1);
+ DO_TESTCASE_6x1RRB("rlock W1W2/R2R3/W3R1", W1W2_R2R3_W3R1);
+ DO_TESTCASE_6x1RR("rlock W1W2/R2R3/R3W1", W1W2_R2R3_R3W1);
+ DO_TESTCASE_6x1RR("rlock W1R2/R2R3/W3W1", W1R2_R2R3_W3W1);
+
printk(" --------------------------------------------------------------------------\n");
/*
@@ -2068,11 +2465,19 @@ void locking_selftest(void)
DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4);
DO_TESTCASE_6x6RW("irq lock-inversion", irq_inversion);
- DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
-// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
+ DO_TESTCASE_6x2x2RW("irq read-recursion", irq_read_recursion);
+ DO_TESTCASE_6x2x2RW("irq read-recursion #2", irq_read_recursion2);
+ DO_TESTCASE_6x2x2RW("irq read-recursion #3", irq_read_recursion3);
ww_tests();
+ force_read_lock_recursive = 0;
+ /*
+ * queued_read_lock() specific test cases can be put here
+ */
+ if (IS_ENABLED(CONFIG_QUEUED_RWLOCKS))
+ queued_read_lock_tests();
+
if (unexpected_testcase_failures) {
printk("-----------------------------------------------------------------\n");
debug_locks = 0;
diff --git a/lib/math/rational.c b/lib/math/rational.c
index df75c8809693..9781d521963d 100644
--- a/lib/math/rational.c
+++ b/lib/math/rational.c
@@ -11,7 +11,7 @@
#include <linux/rational.h>
#include <linux/compiler.h>
#include <linux/export.h>
-#include <linux/kernel.h>
+#include <linux/minmax.h>
/*
* calculate best rational approximation for a given fraction
diff --git a/lib/math/reciprocal_div.c b/lib/math/reciprocal_div.c
index bf043258fa00..32436dd4171e 100644
--- a/lib/math/reciprocal_div.c
+++ b/lib/math/reciprocal_div.c
@@ -4,6 +4,7 @@
#include <asm/div64.h>
#include <linux/reciprocal_div.h>
#include <linux/export.h>
+#include <linux/minmax.h>
/*
* For a description of the algorithm please have a look at
diff --git a/lib/mpi/Makefile b/lib/mpi/Makefile
index 43b8fce14079..6e6ef9a34fe1 100644
--- a/lib/mpi/Makefile
+++ b/lib/mpi/Makefile
@@ -13,10 +13,16 @@ mpi-y = \
generic_mpih-rshift.o \
generic_mpih-sub1.o \
generic_mpih-add1.o \
+ ec.o \
mpicoder.o \
+ mpi-add.o \
mpi-bit.o \
mpi-cmp.o \
mpi-sub-ui.o \
+ mpi-div.o \
+ mpi-inv.o \
+ mpi-mod.o \
+ mpi-mul.o \
mpih-cmp.o \
mpih-div.o \
mpih-mul.o \
diff --git a/lib/mpi/ec.c b/lib/mpi/ec.c
new file mode 100644
index 000000000000..c21470122dfc
--- /dev/null
+++ b/lib/mpi/ec.c
@@ -0,0 +1,1509 @@
+/* ec.c - Elliptic Curve functions
+ * Copyright (C) 2007 Free Software Foundation, Inc.
+ * Copyright (C) 2013 g10 Code GmbH
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+#define point_init(a) mpi_point_init((a))
+#define point_free(a) mpi_point_free_parts((a))
+
+#define log_error(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
+#define log_fatal(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
+
+#define DIM(v) (sizeof(v)/sizeof((v)[0]))
+
+
+/* Create a new point option. NBITS gives the size in bits of one
+ * coordinate; it is only used to pre-allocate some resources and
+ * might also be passed as 0 to use a default value.
+ */
+MPI_POINT mpi_point_new(unsigned int nbits)
+{
+ MPI_POINT p;
+
+ (void)nbits; /* Currently not used. */
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p)
+ mpi_point_init(p);
+ return p;
+}
+EXPORT_SYMBOL_GPL(mpi_point_new);
+
+/* Release the point object P. P may be NULL. */
+void mpi_point_release(MPI_POINT p)
+{
+ if (p) {
+ mpi_point_free_parts(p);
+ kfree(p);
+ }
+}
+EXPORT_SYMBOL_GPL(mpi_point_release);
+
+/* Initialize the fields of a point object. gcry_mpi_point_free_parts
+ * may be used to release the fields.
+ */
+void mpi_point_init(MPI_POINT p)
+{
+ p->x = mpi_new(0);
+ p->y = mpi_new(0);
+ p->z = mpi_new(0);
+}
+EXPORT_SYMBOL_GPL(mpi_point_init);
+
+/* Release the parts of a point object. */
+void mpi_point_free_parts(MPI_POINT p)
+{
+ mpi_free(p->x); p->x = NULL;
+ mpi_free(p->y); p->y = NULL;
+ mpi_free(p->z); p->z = NULL;
+}
+EXPORT_SYMBOL_GPL(mpi_point_free_parts);
+
+/* Set the value from S into D. */
+static void point_set(MPI_POINT d, MPI_POINT s)
+{
+ mpi_set(d->x, s->x);
+ mpi_set(d->y, s->y);
+ mpi_set(d->z, s->z);
+}
+
+static void point_resize(MPI_POINT p, struct mpi_ec_ctx *ctx)
+{
+ size_t nlimbs = ctx->p->nlimbs;
+
+ mpi_resize(p->x, nlimbs);
+ p->x->nlimbs = nlimbs;
+ mpi_resize(p->z, nlimbs);
+ p->z->nlimbs = nlimbs;
+
+ if (ctx->model != MPI_EC_MONTGOMERY) {
+ mpi_resize(p->y, nlimbs);
+ p->y->nlimbs = nlimbs;
+ }
+}
+
+static void point_swap_cond(MPI_POINT d, MPI_POINT s, unsigned long swap,
+ struct mpi_ec_ctx *ctx)
+{
+ mpi_swap_cond(d->x, s->x, swap);
+ if (ctx->model != MPI_EC_MONTGOMERY)
+ mpi_swap_cond(d->y, s->y, swap);
+ mpi_swap_cond(d->z, s->z, swap);
+}
+
+
+/* W = W mod P. */
+static void ec_mod(MPI w, struct mpi_ec_ctx *ec)
+{
+ if (ec->t.p_barrett)
+ mpi_mod_barrett(w, w, ec->t.p_barrett);
+ else
+ mpi_mod(w, w, ec->p);
+}
+
+static void ec_addm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_add(w, u, v);
+ ec_mod(w, ctx);
+}
+
+static void ec_subm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec)
+{
+ mpi_sub(w, u, v);
+ while (w->sign)
+ mpi_add(w, w, ec->p);
+ /*ec_mod(w, ec);*/
+}
+
+static void ec_mulm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_mul(w, u, v);
+ ec_mod(w, ctx);
+}
+
+/* W = 2 * U mod P. */
+static void ec_mul2(MPI w, MPI u, struct mpi_ec_ctx *ctx)
+{
+ mpi_lshift(w, u, 1);
+ ec_mod(w, ctx);
+}
+
+static void ec_powm(MPI w, const MPI b, const MPI e,
+ struct mpi_ec_ctx *ctx)
+{
+ mpi_powm(w, b, e, ctx->p);
+ /* mpi_abs(w); */
+}
+
+/* Shortcut for
+ * ec_powm(B, B, mpi_const(MPI_C_TWO), ctx);
+ * for easier optimization.
+ */
+static void ec_pow2(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
+{
+ /* Using mpi_mul is slightly faster (at least on amd64). */
+ /* mpi_powm(w, b, mpi_const(MPI_C_TWO), ctx->p); */
+ ec_mulm(w, b, b, ctx);
+}
+
+/* Shortcut for
+ * ec_powm(B, B, mpi_const(MPI_C_THREE), ctx);
+ * for easier optimization.
+ */
+static void ec_pow3(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
+{
+ mpi_powm(w, b, mpi_const(MPI_C_THREE), ctx->p);
+}
+
+static void ec_invm(MPI x, MPI a, struct mpi_ec_ctx *ctx)
+{
+ if (!mpi_invm(x, a, ctx->p))
+ log_error("ec_invm: inverse does not exist:\n");
+}
+
+static void mpih_set_cond(mpi_ptr_t wp, mpi_ptr_t up,
+ mpi_size_t usize, unsigned long set)
+{
+ mpi_size_t i;
+ mpi_limb_t mask = ((mpi_limb_t)0) - set;
+ mpi_limb_t x;
+
+ for (i = 0; i < usize; i++) {
+ x = mask & (wp[i] ^ up[i]);
+ wp[i] = wp[i] ^ x;
+ }
+}
+
+/* Routines for 2^255 - 19. */
+
+#define LIMB_SIZE_25519 ((256+BITS_PER_MPI_LIMB-1)/BITS_PER_MPI_LIMB)
+
+static void ec_addm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_25519;
+ mpi_limb_t n[LIMB_SIZE_25519];
+ mpi_limb_t borrow;
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("addm_25519: different sizes\n");
+
+ memset(n, 0, sizeof(n));
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ mpihelp_add_n(wp, up, vp, wsize);
+ borrow = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
+ mpihelp_add_n(wp, wp, n, wsize);
+ wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
+}
+
+static void ec_subm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_25519;
+ mpi_limb_t n[LIMB_SIZE_25519];
+ mpi_limb_t borrow;
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("subm_25519: different sizes\n");
+
+ memset(n, 0, sizeof(n));
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ borrow = mpihelp_sub_n(wp, up, vp, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
+ mpihelp_add_n(wp, wp, n, wsize);
+ wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
+}
+
+static void ec_mulm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_25519;
+ mpi_limb_t n[LIMB_SIZE_25519*2];
+ mpi_limb_t m[LIMB_SIZE_25519+1];
+ mpi_limb_t cy;
+ int msb;
+
+ (void)ctx;
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("mulm_25519: different sizes\n");
+
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ mpihelp_mul_n(n, up, vp, wsize);
+ memcpy(wp, n, wsize * BYTES_PER_MPI_LIMB);
+ wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
+
+ memcpy(m, n+LIMB_SIZE_25519-1, (wsize+1) * BYTES_PER_MPI_LIMB);
+ mpihelp_rshift(m, m, LIMB_SIZE_25519+1, (255 % BITS_PER_MPI_LIMB));
+
+ memcpy(n, m, wsize * BYTES_PER_MPI_LIMB);
+ cy = mpihelp_lshift(m, m, LIMB_SIZE_25519, 4);
+ m[LIMB_SIZE_25519] = cy;
+ cy = mpihelp_add_n(m, m, n, wsize);
+ m[LIMB_SIZE_25519] += cy;
+ cy = mpihelp_add_n(m, m, n, wsize);
+ m[LIMB_SIZE_25519] += cy;
+ cy = mpihelp_add_n(m, m, n, wsize);
+ m[LIMB_SIZE_25519] += cy;
+
+ cy = mpihelp_add_n(wp, wp, m, wsize);
+ m[LIMB_SIZE_25519] += cy;
+
+ memset(m, 0, wsize * BYTES_PER_MPI_LIMB);
+ msb = (wp[LIMB_SIZE_25519-1] >> (255 % BITS_PER_MPI_LIMB));
+ m[0] = (m[LIMB_SIZE_25519] * 2 + msb) * 19;
+ wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
+ mpihelp_add_n(wp, wp, m, wsize);
+
+ m[0] = 0;
+ cy = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
+ mpih_set_cond(m, ctx->p->d, wsize, (cy != 0UL));
+ mpihelp_add_n(wp, wp, m, wsize);
+}
+
+static void ec_mul2_25519(MPI w, MPI u, struct mpi_ec_ctx *ctx)
+{
+ ec_addm_25519(w, u, u, ctx);
+}
+
+static void ec_pow2_25519(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
+{
+ ec_mulm_25519(w, b, b, ctx);
+}
+
+/* Routines for 2^448 - 2^224 - 1. */
+
+#define LIMB_SIZE_448 ((448+BITS_PER_MPI_LIMB-1)/BITS_PER_MPI_LIMB)
+#define LIMB_SIZE_HALF_448 ((LIMB_SIZE_448+1)/2)
+
+static void ec_addm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_448;
+ mpi_limb_t n[LIMB_SIZE_448];
+ mpi_limb_t cy;
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("addm_448: different sizes\n");
+
+ memset(n, 0, sizeof(n));
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ cy = mpihelp_add_n(wp, up, vp, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (cy != 0UL));
+ mpihelp_sub_n(wp, wp, n, wsize);
+}
+
+static void ec_subm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_448;
+ mpi_limb_t n[LIMB_SIZE_448];
+ mpi_limb_t borrow;
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("subm_448: different sizes\n");
+
+ memset(n, 0, sizeof(n));
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ borrow = mpihelp_sub_n(wp, up, vp, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
+ mpihelp_add_n(wp, wp, n, wsize);
+}
+
+static void ec_mulm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_448;
+ mpi_limb_t n[LIMB_SIZE_448*2];
+ mpi_limb_t a2[LIMB_SIZE_HALF_448];
+ mpi_limb_t a3[LIMB_SIZE_HALF_448];
+ mpi_limb_t b0[LIMB_SIZE_HALF_448];
+ mpi_limb_t b1[LIMB_SIZE_HALF_448];
+ mpi_limb_t cy;
+ int i;
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ mpi_limb_t b1_rest, a3_rest;
+#endif
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("mulm_448: different sizes\n");
+
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ mpihelp_mul_n(n, up, vp, wsize);
+
+ for (i = 0; i < (wsize + 1) / 2; i++) {
+ b0[i] = n[i];
+ b1[i] = n[i+wsize/2];
+ a2[i] = n[i+wsize];
+ a3[i] = n[i+wsize+wsize/2];
+ }
+
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ b0[LIMB_SIZE_HALF_448-1] &= ((mpi_limb_t)1UL << 32)-1;
+ a2[LIMB_SIZE_HALF_448-1] &= ((mpi_limb_t)1UL << 32)-1;
+
+ b1_rest = 0;
+ a3_rest = 0;
+
+ for (i = (wsize + 1) / 2 - 1; i >= 0; i--) {
+ mpi_limb_t b1v, a3v;
+ b1v = b1[i];
+ a3v = a3[i];
+ b1[i] = (b1_rest << 32) | (b1v >> 32);
+ a3[i] = (a3_rest << 32) | (a3v >> 32);
+ b1_rest = b1v & (((mpi_limb_t)1UL << 32)-1);
+ a3_rest = a3v & (((mpi_limb_t)1UL << 32)-1);
+ }
+#endif
+
+ cy = mpihelp_add_n(b0, b0, a2, LIMB_SIZE_HALF_448);
+ cy += mpihelp_add_n(b0, b0, a3, LIMB_SIZE_HALF_448);
+ for (i = 0; i < (wsize + 1) / 2; i++)
+ wp[i] = b0[i];
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ wp[LIMB_SIZE_HALF_448-1] &= (((mpi_limb_t)1UL << 32)-1);
+#endif
+
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ cy = b0[LIMB_SIZE_HALF_448-1] >> 32;
+#endif
+
+ cy = mpihelp_add_1(b1, b1, LIMB_SIZE_HALF_448, cy);
+ cy += mpihelp_add_n(b1, b1, a2, LIMB_SIZE_HALF_448);
+ cy += mpihelp_add_n(b1, b1, a3, LIMB_SIZE_HALF_448);
+ cy += mpihelp_add_n(b1, b1, a3, LIMB_SIZE_HALF_448);
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ b1_rest = 0;
+ for (i = (wsize + 1) / 2 - 1; i >= 0; i--) {
+ mpi_limb_t b1v = b1[i];
+ b1[i] = (b1_rest << 32) | (b1v >> 32);
+ b1_rest = b1v & (((mpi_limb_t)1UL << 32)-1);
+ }
+ wp[LIMB_SIZE_HALF_448-1] |= (b1_rest << 32);
+#endif
+ for (i = 0; i < wsize / 2; i++)
+ wp[i+(wsize + 1) / 2] = b1[i];
+
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ cy = b1[LIMB_SIZE_HALF_448-1];
+#endif
+
+ memset(n, 0, wsize * BYTES_PER_MPI_LIMB);
+
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ n[LIMB_SIZE_HALF_448-1] = cy << 32;
+#else
+ n[LIMB_SIZE_HALF_448] = cy;
+#endif
+ n[0] = cy;
+ mpihelp_add_n(wp, wp, n, wsize);
+
+ memset(n, 0, wsize * BYTES_PER_MPI_LIMB);
+ cy = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (cy != 0UL));
+ mpihelp_add_n(wp, wp, n, wsize);
+}
+
+static void ec_mul2_448(MPI w, MPI u, struct mpi_ec_ctx *ctx)
+{
+ ec_addm_448(w, u, u, ctx);
+}
+
+static void ec_pow2_448(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
+{
+ ec_mulm_448(w, b, b, ctx);
+}
+
+struct field_table {
+ const char *p;
+
+ /* computation routines for the field. */
+ void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx);
+ void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx);
+};
+
+static const struct field_table field_table[] = {
+ {
+ "0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFED",
+ ec_addm_25519,
+ ec_subm_25519,
+ ec_mulm_25519,
+ ec_mul2_25519,
+ ec_pow2_25519
+ },
+ {
+ "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE"
+ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
+ ec_addm_448,
+ ec_subm_448,
+ ec_mulm_448,
+ ec_mul2_448,
+ ec_pow2_448
+ },
+ { NULL, NULL, NULL, NULL, NULL, NULL },
+};
+
+/* Force recomputation of all helper variables. */
+static void mpi_ec_get_reset(struct mpi_ec_ctx *ec)
+{
+ ec->t.valid.a_is_pminus3 = 0;
+ ec->t.valid.two_inv_p = 0;
+}
+
+/* Accessor for helper variable. */
+static int ec_get_a_is_pminus3(struct mpi_ec_ctx *ec)
+{
+ MPI tmp;
+
+ if (!ec->t.valid.a_is_pminus3) {
+ ec->t.valid.a_is_pminus3 = 1;
+ tmp = mpi_alloc_like(ec->p);
+ mpi_sub_ui(tmp, ec->p, 3);
+ ec->t.a_is_pminus3 = !mpi_cmp(ec->a, tmp);
+ mpi_free(tmp);
+ }
+
+ return ec->t.a_is_pminus3;
+}
+
+/* Accessor for helper variable. */
+static MPI ec_get_two_inv_p(struct mpi_ec_ctx *ec)
+{
+ if (!ec->t.valid.two_inv_p) {
+ ec->t.valid.two_inv_p = 1;
+ if (!ec->t.two_inv_p)
+ ec->t.two_inv_p = mpi_alloc(0);
+ ec_invm(ec->t.two_inv_p, mpi_const(MPI_C_TWO), ec);
+ }
+ return ec->t.two_inv_p;
+}
+
+static const char *const curve25519_bad_points[] = {
+ "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed",
+ "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "0x0000000000000000000000000000000000000000000000000000000000000001",
+ "0x00b8495f16056286fdb1329ceb8d09da6ac49ff1fae35616aeb8413b7c7aebe0",
+ "0x57119fd0dd4e22d8868e1c58c45c44045bef839c55b1d0b1248c50a3bc959c5f",
+ "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec",
+ "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee",
+ NULL
+};
+
+static const char *const curve448_bad_points[] = {
+ "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0x00000000000000000000000000000000000000000000000000000000"
+ "00000000000000000000000000000000000000000000000000000000",
+ "0x00000000000000000000000000000000000000000000000000000000"
+ "00000000000000000000000000000000000000000000000000000001",
+ "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffe",
+ "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+ "00000000000000000000000000000000000000000000000000000000",
+ NULL
+};
+
+static const char *const *bad_points_table[] = {
+ curve25519_bad_points,
+ curve448_bad_points,
+};
+
+static void mpi_ec_coefficient_normalize(MPI a, MPI p)
+{
+ if (a->sign) {
+ mpi_resize(a, p->nlimbs);
+ mpihelp_sub_n(a->d, p->d, a->d, p->nlimbs);
+ a->nlimbs = p->nlimbs;
+ a->sign = 0;
+ }
+}
+
+/* This function initialized a context for elliptic curve based on the
+ * field GF(p). P is the prime specifying this field, A is the first
+ * coefficient. CTX is expected to be zeroized.
+ */
+void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
+ enum ecc_dialects dialect,
+ int flags, MPI p, MPI a, MPI b)
+{
+ int i;
+ static int use_barrett = -1 /* TODO: 1 or -1 */;
+
+ mpi_ec_coefficient_normalize(a, p);
+ mpi_ec_coefficient_normalize(b, p);
+
+ /* Fixme: Do we want to check some constraints? e.g. a < p */
+
+ ctx->model = model;
+ ctx->dialect = dialect;
+ ctx->flags = flags;
+ if (dialect == ECC_DIALECT_ED25519)
+ ctx->nbits = 256;
+ else
+ ctx->nbits = mpi_get_nbits(p);
+ ctx->p = mpi_copy(p);
+ ctx->a = mpi_copy(a);
+ ctx->b = mpi_copy(b);
+
+ ctx->t.p_barrett = use_barrett > 0 ? mpi_barrett_init(ctx->p, 0) : NULL;
+
+ mpi_ec_get_reset(ctx);
+
+ if (model == MPI_EC_MONTGOMERY) {
+ for (i = 0; i < DIM(bad_points_table); i++) {
+ MPI p_candidate = mpi_scanval(bad_points_table[i][0]);
+ int match_p = !mpi_cmp(ctx->p, p_candidate);
+ int j;
+
+ mpi_free(p_candidate);
+ if (!match_p)
+ continue;
+
+ for (j = 0; i < DIM(ctx->t.scratch) && bad_points_table[i][j]; j++)
+ ctx->t.scratch[j] = mpi_scanval(bad_points_table[i][j]);
+ }
+ } else {
+ /* Allocate scratch variables. */
+ for (i = 0; i < DIM(ctx->t.scratch); i++)
+ ctx->t.scratch[i] = mpi_alloc_like(ctx->p);
+ }
+
+ ctx->addm = ec_addm;
+ ctx->subm = ec_subm;
+ ctx->mulm = ec_mulm;
+ ctx->mul2 = ec_mul2;
+ ctx->pow2 = ec_pow2;
+
+ for (i = 0; field_table[i].p; i++) {
+ MPI f_p;
+
+ f_p = mpi_scanval(field_table[i].p);
+ if (!f_p)
+ break;
+
+ if (!mpi_cmp(p, f_p)) {
+ ctx->addm = field_table[i].addm;
+ ctx->subm = field_table[i].subm;
+ ctx->mulm = field_table[i].mulm;
+ ctx->mul2 = field_table[i].mul2;
+ ctx->pow2 = field_table[i].pow2;
+ mpi_free(f_p);
+
+ mpi_resize(ctx->a, ctx->p->nlimbs);
+ ctx->a->nlimbs = ctx->p->nlimbs;
+
+ mpi_resize(ctx->b, ctx->p->nlimbs);
+ ctx->b->nlimbs = ctx->p->nlimbs;
+
+ for (i = 0; i < DIM(ctx->t.scratch) && ctx->t.scratch[i]; i++)
+ ctx->t.scratch[i]->nlimbs = ctx->p->nlimbs;
+
+ break;
+ }
+
+ mpi_free(f_p);
+ }
+}
+EXPORT_SYMBOL_GPL(mpi_ec_init);
+
+void mpi_ec_deinit(struct mpi_ec_ctx *ctx)
+{
+ int i;
+
+ mpi_barrett_free(ctx->t.p_barrett);
+
+ /* Domain parameter. */
+ mpi_free(ctx->p);
+ mpi_free(ctx->a);
+ mpi_free(ctx->b);
+ mpi_point_release(ctx->G);
+ mpi_free(ctx->n);
+
+ /* The key. */
+ mpi_point_release(ctx->Q);
+ mpi_free(ctx->d);
+
+ /* Private data of ec.c. */
+ mpi_free(ctx->t.two_inv_p);
+
+ for (i = 0; i < DIM(ctx->t.scratch); i++)
+ mpi_free(ctx->t.scratch[i]);
+}
+EXPORT_SYMBOL_GPL(mpi_ec_deinit);
+
+/* Compute the affine coordinates from the projective coordinates in
+ * POINT. Set them into X and Y. If one coordinate is not required,
+ * X or Y may be passed as NULL. CTX is the usual context. Returns: 0
+ * on success or !0 if POINT is at infinity.
+ */
+int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+ if (!mpi_cmp_ui(point->z, 0))
+ return -1;
+
+ switch (ctx->model) {
+ case MPI_EC_WEIERSTRASS: /* Using Jacobian coordinates. */
+ {
+ MPI z1, z2, z3;
+
+ z1 = mpi_new(0);
+ z2 = mpi_new(0);
+ ec_invm(z1, point->z, ctx); /* z1 = z^(-1) mod p */
+ ec_mulm(z2, z1, z1, ctx); /* z2 = z^(-2) mod p */
+
+ if (x)
+ ec_mulm(x, point->x, z2, ctx);
+
+ if (y) {
+ z3 = mpi_new(0);
+ ec_mulm(z3, z2, z1, ctx); /* z3 = z^(-3) mod p */
+ ec_mulm(y, point->y, z3, ctx);
+ mpi_free(z3);
+ }
+
+ mpi_free(z2);
+ mpi_free(z1);
+ }
+ return 0;
+
+ case MPI_EC_MONTGOMERY:
+ {
+ if (x)
+ mpi_set(x, point->x);
+
+ if (y) {
+ log_fatal("%s: Getting Y-coordinate on %s is not supported\n",
+ "mpi_ec_get_affine", "Montgomery");
+ return -1;
+ }
+ }
+ return 0;
+
+ case MPI_EC_EDWARDS:
+ {
+ MPI z;
+
+ z = mpi_new(0);
+ ec_invm(z, point->z, ctx);
+
+ mpi_resize(z, ctx->p->nlimbs);
+ z->nlimbs = ctx->p->nlimbs;
+
+ if (x) {
+ mpi_resize(x, ctx->p->nlimbs);
+ x->nlimbs = ctx->p->nlimbs;
+ ctx->mulm(x, point->x, z, ctx);
+ }
+ if (y) {
+ mpi_resize(y, ctx->p->nlimbs);
+ y->nlimbs = ctx->p->nlimbs;
+ ctx->mulm(y, point->y, z, ctx);
+ }
+
+ mpi_free(z);
+ }
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+EXPORT_SYMBOL_GPL(mpi_ec_get_affine);
+
+/* RESULT = 2 * POINT (Weierstrass version). */
+static void dup_point_weierstrass(MPI_POINT result,
+ MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+#define x3 (result->x)
+#define y3 (result->y)
+#define z3 (result->z)
+#define t1 (ctx->t.scratch[0])
+#define t2 (ctx->t.scratch[1])
+#define t3 (ctx->t.scratch[2])
+#define l1 (ctx->t.scratch[3])
+#define l2 (ctx->t.scratch[4])
+#define l3 (ctx->t.scratch[5])
+
+ if (!mpi_cmp_ui(point->y, 0) || !mpi_cmp_ui(point->z, 0)) {
+ /* P_y == 0 || P_z == 0 => [1:1:0] */
+ mpi_set_ui(x3, 1);
+ mpi_set_ui(y3, 1);
+ mpi_set_ui(z3, 0);
+ } else {
+ if (ec_get_a_is_pminus3(ctx)) {
+ /* Use the faster case. */
+ /* L1 = 3(X - Z^2)(X + Z^2) */
+ /* T1: used for Z^2. */
+ /* T2: used for the right term. */
+ ec_pow2(t1, point->z, ctx);
+ ec_subm(l1, point->x, t1, ctx);
+ ec_mulm(l1, l1, mpi_const(MPI_C_THREE), ctx);
+ ec_addm(t2, point->x, t1, ctx);
+ ec_mulm(l1, l1, t2, ctx);
+ } else {
+ /* Standard case. */
+ /* L1 = 3X^2 + aZ^4 */
+ /* T1: used for aZ^4. */
+ ec_pow2(l1, point->x, ctx);
+ ec_mulm(l1, l1, mpi_const(MPI_C_THREE), ctx);
+ ec_powm(t1, point->z, mpi_const(MPI_C_FOUR), ctx);
+ ec_mulm(t1, t1, ctx->a, ctx);
+ ec_addm(l1, l1, t1, ctx);
+ }
+ /* Z3 = 2YZ */
+ ec_mulm(z3, point->y, point->z, ctx);
+ ec_mul2(z3, z3, ctx);
+
+ /* L2 = 4XY^2 */
+ /* T2: used for Y2; required later. */
+ ec_pow2(t2, point->y, ctx);
+ ec_mulm(l2, t2, point->x, ctx);
+ ec_mulm(l2, l2, mpi_const(MPI_C_FOUR), ctx);
+
+ /* X3 = L1^2 - 2L2 */
+ /* T1: used for L2^2. */
+ ec_pow2(x3, l1, ctx);
+ ec_mul2(t1, l2, ctx);
+ ec_subm(x3, x3, t1, ctx);
+
+ /* L3 = 8Y^4 */
+ /* T2: taken from above. */
+ ec_pow2(t2, t2, ctx);
+ ec_mulm(l3, t2, mpi_const(MPI_C_EIGHT), ctx);
+
+ /* Y3 = L1(L2 - X3) - L3 */
+ ec_subm(y3, l2, x3, ctx);
+ ec_mulm(y3, y3, l1, ctx);
+ ec_subm(y3, y3, l3, ctx);
+ }
+
+#undef x3
+#undef y3
+#undef z3
+#undef t1
+#undef t2
+#undef t3
+#undef l1
+#undef l2
+#undef l3
+}
+
+/* RESULT = 2 * POINT (Montgomery version). */
+static void dup_point_montgomery(MPI_POINT result,
+ MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+ (void)result;
+ (void)point;
+ (void)ctx;
+ log_fatal("%s: %s not yet supported\n",
+ "mpi_ec_dup_point", "Montgomery");
+}
+
+/* RESULT = 2 * POINT (Twisted Edwards version). */
+static void dup_point_edwards(MPI_POINT result,
+ MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+#define X1 (point->x)
+#define Y1 (point->y)
+#define Z1 (point->z)
+#define X3 (result->x)
+#define Y3 (result->y)
+#define Z3 (result->z)
+#define B (ctx->t.scratch[0])
+#define C (ctx->t.scratch[1])
+#define D (ctx->t.scratch[2])
+#define E (ctx->t.scratch[3])
+#define F (ctx->t.scratch[4])
+#define H (ctx->t.scratch[5])
+#define J (ctx->t.scratch[6])
+
+ /* Compute: (X_3 : Y_3 : Z_3) = 2( X_1 : Y_1 : Z_1 ) */
+
+ /* B = (X_1 + Y_1)^2 */
+ ctx->addm(B, X1, Y1, ctx);
+ ctx->pow2(B, B, ctx);
+
+ /* C = X_1^2 */
+ /* D = Y_1^2 */
+ ctx->pow2(C, X1, ctx);
+ ctx->pow2(D, Y1, ctx);
+
+ /* E = aC */
+ if (ctx->dialect == ECC_DIALECT_ED25519)
+ ctx->subm(E, ctx->p, C, ctx);
+ else
+ ctx->mulm(E, ctx->a, C, ctx);
+
+ /* F = E + D */
+ ctx->addm(F, E, D, ctx);
+
+ /* H = Z_1^2 */
+ ctx->pow2(H, Z1, ctx);
+
+ /* J = F - 2H */
+ ctx->mul2(J, H, ctx);
+ ctx->subm(J, F, J, ctx);
+
+ /* X_3 = (B - C - D) · J */
+ ctx->subm(X3, B, C, ctx);
+ ctx->subm(X3, X3, D, ctx);
+ ctx->mulm(X3, X3, J, ctx);
+
+ /* Y_3 = F · (E - D) */
+ ctx->subm(Y3, E, D, ctx);
+ ctx->mulm(Y3, Y3, F, ctx);
+
+ /* Z_3 = F · J */
+ ctx->mulm(Z3, F, J, ctx);
+
+#undef X1
+#undef Y1
+#undef Z1
+#undef X3
+#undef Y3
+#undef Z3
+#undef B
+#undef C
+#undef D
+#undef E
+#undef F
+#undef H
+#undef J
+}
+
+/* RESULT = 2 * POINT */
+static void
+mpi_ec_dup_point(MPI_POINT result, MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+ switch (ctx->model) {
+ case MPI_EC_WEIERSTRASS:
+ dup_point_weierstrass(result, point, ctx);
+ break;
+ case MPI_EC_MONTGOMERY:
+ dup_point_montgomery(result, point, ctx);
+ break;
+ case MPI_EC_EDWARDS:
+ dup_point_edwards(result, point, ctx);
+ break;
+ }
+}
+
+/* RESULT = P1 + P2 (Weierstrass version).*/
+static void add_points_weierstrass(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx)
+{
+#define x1 (p1->x)
+#define y1 (p1->y)
+#define z1 (p1->z)
+#define x2 (p2->x)
+#define y2 (p2->y)
+#define z2 (p2->z)
+#define x3 (result->x)
+#define y3 (result->y)
+#define z3 (result->z)
+#define l1 (ctx->t.scratch[0])
+#define l2 (ctx->t.scratch[1])
+#define l3 (ctx->t.scratch[2])
+#define l4 (ctx->t.scratch[3])
+#define l5 (ctx->t.scratch[4])
+#define l6 (ctx->t.scratch[5])
+#define l7 (ctx->t.scratch[6])
+#define l8 (ctx->t.scratch[7])
+#define l9 (ctx->t.scratch[8])
+#define t1 (ctx->t.scratch[9])
+#define t2 (ctx->t.scratch[10])
+
+ if ((!mpi_cmp(x1, x2)) && (!mpi_cmp(y1, y2)) && (!mpi_cmp(z1, z2))) {
+ /* Same point; need to call the duplicate function. */
+ mpi_ec_dup_point(result, p1, ctx);
+ } else if (!mpi_cmp_ui(z1, 0)) {
+ /* P1 is at infinity. */
+ mpi_set(x3, p2->x);
+ mpi_set(y3, p2->y);
+ mpi_set(z3, p2->z);
+ } else if (!mpi_cmp_ui(z2, 0)) {
+ /* P2 is at infinity. */
+ mpi_set(x3, p1->x);
+ mpi_set(y3, p1->y);
+ mpi_set(z3, p1->z);
+ } else {
+ int z1_is_one = !mpi_cmp_ui(z1, 1);
+ int z2_is_one = !mpi_cmp_ui(z2, 1);
+
+ /* l1 = x1 z2^2 */
+ /* l2 = x2 z1^2 */
+ if (z2_is_one)
+ mpi_set(l1, x1);
+ else {
+ ec_pow2(l1, z2, ctx);
+ ec_mulm(l1, l1, x1, ctx);
+ }
+ if (z1_is_one)
+ mpi_set(l2, x2);
+ else {
+ ec_pow2(l2, z1, ctx);
+ ec_mulm(l2, l2, x2, ctx);
+ }
+ /* l3 = l1 - l2 */
+ ec_subm(l3, l1, l2, ctx);
+ /* l4 = y1 z2^3 */
+ ec_powm(l4, z2, mpi_const(MPI_C_THREE), ctx);
+ ec_mulm(l4, l4, y1, ctx);
+ /* l5 = y2 z1^3 */
+ ec_powm(l5, z1, mpi_const(MPI_C_THREE), ctx);
+ ec_mulm(l5, l5, y2, ctx);
+ /* l6 = l4 - l5 */
+ ec_subm(l6, l4, l5, ctx);
+
+ if (!mpi_cmp_ui(l3, 0)) {
+ if (!mpi_cmp_ui(l6, 0)) {
+ /* P1 and P2 are the same - use duplicate function. */
+ mpi_ec_dup_point(result, p1, ctx);
+ } else {
+ /* P1 is the inverse of P2. */
+ mpi_set_ui(x3, 1);
+ mpi_set_ui(y3, 1);
+ mpi_set_ui(z3, 0);
+ }
+ } else {
+ /* l7 = l1 + l2 */
+ ec_addm(l7, l1, l2, ctx);
+ /* l8 = l4 + l5 */
+ ec_addm(l8, l4, l5, ctx);
+ /* z3 = z1 z2 l3 */
+ ec_mulm(z3, z1, z2, ctx);
+ ec_mulm(z3, z3, l3, ctx);
+ /* x3 = l6^2 - l7 l3^2 */
+ ec_pow2(t1, l6, ctx);
+ ec_pow2(t2, l3, ctx);
+ ec_mulm(t2, t2, l7, ctx);
+ ec_subm(x3, t1, t2, ctx);
+ /* l9 = l7 l3^2 - 2 x3 */
+ ec_mul2(t1, x3, ctx);
+ ec_subm(l9, t2, t1, ctx);
+ /* y3 = (l9 l6 - l8 l3^3)/2 */
+ ec_mulm(l9, l9, l6, ctx);
+ ec_powm(t1, l3, mpi_const(MPI_C_THREE), ctx); /* fixme: Use saved value*/
+ ec_mulm(t1, t1, l8, ctx);
+ ec_subm(y3, l9, t1, ctx);
+ ec_mulm(y3, y3, ec_get_two_inv_p(ctx), ctx);
+ }
+ }
+
+#undef x1
+#undef y1
+#undef z1
+#undef x2
+#undef y2
+#undef z2
+#undef x3
+#undef y3
+#undef z3
+#undef l1
+#undef l2
+#undef l3
+#undef l4
+#undef l5
+#undef l6
+#undef l7
+#undef l8
+#undef l9
+#undef t1
+#undef t2
+}
+
+/* RESULT = P1 + P2 (Montgomery version).*/
+static void add_points_montgomery(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx)
+{
+ (void)result;
+ (void)p1;
+ (void)p2;
+ (void)ctx;
+ log_fatal("%s: %s not yet supported\n",
+ "mpi_ec_add_points", "Montgomery");
+}
+
+/* RESULT = P1 + P2 (Twisted Edwards version).*/
+static void add_points_edwards(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx)
+{
+#define X1 (p1->x)
+#define Y1 (p1->y)
+#define Z1 (p1->z)
+#define X2 (p2->x)
+#define Y2 (p2->y)
+#define Z2 (p2->z)
+#define X3 (result->x)
+#define Y3 (result->y)
+#define Z3 (result->z)
+#define A (ctx->t.scratch[0])
+#define B (ctx->t.scratch[1])
+#define C (ctx->t.scratch[2])
+#define D (ctx->t.scratch[3])
+#define E (ctx->t.scratch[4])
+#define F (ctx->t.scratch[5])
+#define G (ctx->t.scratch[6])
+#define tmp (ctx->t.scratch[7])
+
+ point_resize(result, ctx);
+
+ /* Compute: (X_3 : Y_3 : Z_3) = (X_1 : Y_1 : Z_1) + (X_2 : Y_2 : Z_3) */
+
+ /* A = Z1 · Z2 */
+ ctx->mulm(A, Z1, Z2, ctx);
+
+ /* B = A^2 */
+ ctx->pow2(B, A, ctx);
+
+ /* C = X1 · X2 */
+ ctx->mulm(C, X1, X2, ctx);
+
+ /* D = Y1 · Y2 */
+ ctx->mulm(D, Y1, Y2, ctx);
+
+ /* E = d · C · D */
+ ctx->mulm(E, ctx->b, C, ctx);
+ ctx->mulm(E, E, D, ctx);
+
+ /* F = B - E */
+ ctx->subm(F, B, E, ctx);
+
+ /* G = B + E */
+ ctx->addm(G, B, E, ctx);
+
+ /* X_3 = A · F · ((X_1 + Y_1) · (X_2 + Y_2) - C - D) */
+ ctx->addm(tmp, X1, Y1, ctx);
+ ctx->addm(X3, X2, Y2, ctx);
+ ctx->mulm(X3, X3, tmp, ctx);
+ ctx->subm(X3, X3, C, ctx);
+ ctx->subm(X3, X3, D, ctx);
+ ctx->mulm(X3, X3, F, ctx);
+ ctx->mulm(X3, X3, A, ctx);
+
+ /* Y_3 = A · G · (D - aC) */
+ if (ctx->dialect == ECC_DIALECT_ED25519) {
+ ctx->addm(Y3, D, C, ctx);
+ } else {
+ ctx->mulm(Y3, ctx->a, C, ctx);
+ ctx->subm(Y3, D, Y3, ctx);
+ }
+ ctx->mulm(Y3, Y3, G, ctx);
+ ctx->mulm(Y3, Y3, A, ctx);
+
+ /* Z_3 = F · G */
+ ctx->mulm(Z3, F, G, ctx);
+
+
+#undef X1
+#undef Y1
+#undef Z1
+#undef X2
+#undef Y2
+#undef Z2
+#undef X3
+#undef Y3
+#undef Z3
+#undef A
+#undef B
+#undef C
+#undef D
+#undef E
+#undef F
+#undef G
+#undef tmp
+}
+
+/* Compute a step of Montgomery Ladder (only use X and Z in the point).
+ * Inputs: P1, P2, and x-coordinate of DIF = P1 - P1.
+ * Outputs: PRD = 2 * P1 and SUM = P1 + P2.
+ */
+static void montgomery_ladder(MPI_POINT prd, MPI_POINT sum,
+ MPI_POINT p1, MPI_POINT p2, MPI dif_x,
+ struct mpi_ec_ctx *ctx)
+{
+ ctx->addm(sum->x, p2->x, p2->z, ctx);
+ ctx->subm(p2->z, p2->x, p2->z, ctx);
+ ctx->addm(prd->x, p1->x, p1->z, ctx);
+ ctx->subm(p1->z, p1->x, p1->z, ctx);
+ ctx->mulm(p2->x, p1->z, sum->x, ctx);
+ ctx->mulm(p2->z, prd->x, p2->z, ctx);
+ ctx->pow2(p1->x, prd->x, ctx);
+ ctx->pow2(p1->z, p1->z, ctx);
+ ctx->addm(sum->x, p2->x, p2->z, ctx);
+ ctx->subm(p2->z, p2->x, p2->z, ctx);
+ ctx->mulm(prd->x, p1->x, p1->z, ctx);
+ ctx->subm(p1->z, p1->x, p1->z, ctx);
+ ctx->pow2(sum->x, sum->x, ctx);
+ ctx->pow2(sum->z, p2->z, ctx);
+ ctx->mulm(prd->z, p1->z, ctx->a, ctx); /* CTX->A: (a-2)/4 */
+ ctx->mulm(sum->z, sum->z, dif_x, ctx);
+ ctx->addm(prd->z, p1->x, prd->z, ctx);
+ ctx->mulm(prd->z, prd->z, p1->z, ctx);
+}
+
+/* RESULT = P1 + P2 */
+void mpi_ec_add_points(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx)
+{
+ switch (ctx->model) {
+ case MPI_EC_WEIERSTRASS:
+ add_points_weierstrass(result, p1, p2, ctx);
+ break;
+ case MPI_EC_MONTGOMERY:
+ add_points_montgomery(result, p1, p2, ctx);
+ break;
+ case MPI_EC_EDWARDS:
+ add_points_edwards(result, p1, p2, ctx);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mpi_ec_add_points);
+
+/* Scalar point multiplication - the main function for ECC. If takes
+ * an integer SCALAR and a POINT as well as the usual context CTX.
+ * RESULT will be set to the resulting point.
+ */
+void mpi_ec_mul_point(MPI_POINT result,
+ MPI scalar, MPI_POINT point,
+ struct mpi_ec_ctx *ctx)
+{
+ MPI x1, y1, z1, k, h, yy;
+ unsigned int i, loops;
+ struct gcry_mpi_point p1, p2, p1inv;
+
+ if (ctx->model == MPI_EC_EDWARDS) {
+ /* Simple left to right binary method. Algorithm 3.27 from
+ * {author={Hankerson, Darrel and Menezes, Alfred J. and Vanstone, Scott},
+ * title = {Guide to Elliptic Curve Cryptography},
+ * year = {2003}, isbn = {038795273X},
+ * url = {http://www.cacr.math.uwaterloo.ca/ecc/},
+ * publisher = {Springer-Verlag New York, Inc.}}
+ */
+ unsigned int nbits;
+ int j;
+
+ if (mpi_cmp(scalar, ctx->p) >= 0)
+ nbits = mpi_get_nbits(scalar);
+ else
+ nbits = mpi_get_nbits(ctx->p);
+
+ mpi_set_ui(result->x, 0);
+ mpi_set_ui(result->y, 1);
+ mpi_set_ui(result->z, 1);
+ point_resize(point, ctx);
+
+ point_resize(result, ctx);
+ point_resize(point, ctx);
+
+ for (j = nbits-1; j >= 0; j--) {
+ mpi_ec_dup_point(result, result, ctx);
+ if (mpi_test_bit(scalar, j))
+ mpi_ec_add_points(result, result, point, ctx);
+ }
+ return;
+ } else if (ctx->model == MPI_EC_MONTGOMERY) {
+ unsigned int nbits;
+ int j;
+ struct gcry_mpi_point p1_, p2_;
+ MPI_POINT q1, q2, prd, sum;
+ unsigned long sw;
+ mpi_size_t rsize;
+ int scalar_copied = 0;
+
+ /* Compute scalar point multiplication with Montgomery Ladder.
+ * Note that we don't use Y-coordinate in the points at all.
+ * RESULT->Y will be filled by zero.
+ */
+
+ nbits = mpi_get_nbits(scalar);
+ point_init(&p1);
+ point_init(&p2);
+ point_init(&p1_);
+ point_init(&p2_);
+ mpi_set_ui(p1.x, 1);
+ mpi_free(p2.x);
+ p2.x = mpi_copy(point->x);
+ mpi_set_ui(p2.z, 1);
+
+ point_resize(&p1, ctx);
+ point_resize(&p2, ctx);
+ point_resize(&p1_, ctx);
+ point_resize(&p2_, ctx);
+
+ mpi_resize(point->x, ctx->p->nlimbs);
+ point->x->nlimbs = ctx->p->nlimbs;
+
+ q1 = &p1;
+ q2 = &p2;
+ prd = &p1_;
+ sum = &p2_;
+
+ for (j = nbits-1; j >= 0; j--) {
+ MPI_POINT t;
+
+ sw = mpi_test_bit(scalar, j);
+ point_swap_cond(q1, q2, sw, ctx);
+ montgomery_ladder(prd, sum, q1, q2, point->x, ctx);
+ point_swap_cond(prd, sum, sw, ctx);
+ t = q1; q1 = prd; prd = t;
+ t = q2; q2 = sum; sum = t;
+ }
+
+ mpi_clear(result->y);
+ sw = (nbits & 1);
+ point_swap_cond(&p1, &p1_, sw, ctx);
+
+ rsize = p1.z->nlimbs;
+ MPN_NORMALIZE(p1.z->d, rsize);
+ if (rsize == 0) {
+ mpi_set_ui(result->x, 1);
+ mpi_set_ui(result->z, 0);
+ } else {
+ z1 = mpi_new(0);
+ ec_invm(z1, p1.z, ctx);
+ ec_mulm(result->x, p1.x, z1, ctx);
+ mpi_set_ui(result->z, 1);
+ mpi_free(z1);
+ }
+
+ point_free(&p1);
+ point_free(&p2);
+ point_free(&p1_);
+ point_free(&p2_);
+ if (scalar_copied)
+ mpi_free(scalar);
+ return;
+ }
+
+ x1 = mpi_alloc_like(ctx->p);
+ y1 = mpi_alloc_like(ctx->p);
+ h = mpi_alloc_like(ctx->p);
+ k = mpi_copy(scalar);
+ yy = mpi_copy(point->y);
+
+ if (mpi_has_sign(k)) {
+ k->sign = 0;
+ ec_invm(yy, yy, ctx);
+ }
+
+ if (!mpi_cmp_ui(point->z, 1)) {
+ mpi_set(x1, point->x);
+ mpi_set(y1, yy);
+ } else {
+ MPI z2, z3;
+
+ z2 = mpi_alloc_like(ctx->p);
+ z3 = mpi_alloc_like(ctx->p);
+ ec_mulm(z2, point->z, point->z, ctx);
+ ec_mulm(z3, point->z, z2, ctx);
+ ec_invm(z2, z2, ctx);
+ ec_mulm(x1, point->x, z2, ctx);
+ ec_invm(z3, z3, ctx);
+ ec_mulm(y1, yy, z3, ctx);
+ mpi_free(z2);
+ mpi_free(z3);
+ }
+ z1 = mpi_copy(mpi_const(MPI_C_ONE));
+
+ mpi_mul(h, k, mpi_const(MPI_C_THREE)); /* h = 3k */
+ loops = mpi_get_nbits(h);
+ if (loops < 2) {
+ /* If SCALAR is zero, the above mpi_mul sets H to zero and thus
+ * LOOPs will be zero. To avoid an underflow of I in the main
+ * loop we set LOOP to 2 and the result to (0,0,0).
+ */
+ loops = 2;
+ mpi_clear(result->x);
+ mpi_clear(result->y);
+ mpi_clear(result->z);
+ } else {
+ mpi_set(result->x, point->x);
+ mpi_set(result->y, yy);
+ mpi_set(result->z, point->z);
+ }
+ mpi_free(yy); yy = NULL;
+
+ p1.x = x1; x1 = NULL;
+ p1.y = y1; y1 = NULL;
+ p1.z = z1; z1 = NULL;
+ point_init(&p2);
+ point_init(&p1inv);
+
+ /* Invert point: y = p - y mod p */
+ point_set(&p1inv, &p1);
+ ec_subm(p1inv.y, ctx->p, p1inv.y, ctx);
+
+ for (i = loops-2; i > 0; i--) {
+ mpi_ec_dup_point(result, result, ctx);
+ if (mpi_test_bit(h, i) == 1 && mpi_test_bit(k, i) == 0) {
+ point_set(&p2, result);
+ mpi_ec_add_points(result, &p2, &p1, ctx);
+ }
+ if (mpi_test_bit(h, i) == 0 && mpi_test_bit(k, i) == 1) {
+ point_set(&p2, result);
+ mpi_ec_add_points(result, &p2, &p1inv, ctx);
+ }
+ }
+
+ point_free(&p1);
+ point_free(&p2);
+ point_free(&p1inv);
+ mpi_free(h);
+ mpi_free(k);
+}
+EXPORT_SYMBOL_GPL(mpi_ec_mul_point);
+
+/* Return true if POINT is on the curve described by CTX. */
+int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+ int res = 0;
+ MPI x, y, w;
+
+ x = mpi_new(0);
+ y = mpi_new(0);
+ w = mpi_new(0);
+
+ /* Check that the point is in range. This needs to be done here and
+ * not after conversion to affine coordinates.
+ */
+ if (mpi_cmpabs(point->x, ctx->p) >= 0)
+ goto leave;
+ if (mpi_cmpabs(point->y, ctx->p) >= 0)
+ goto leave;
+ if (mpi_cmpabs(point->z, ctx->p) >= 0)
+ goto leave;
+
+ switch (ctx->model) {
+ case MPI_EC_WEIERSTRASS:
+ {
+ MPI xxx;
+
+ if (mpi_ec_get_affine(x, y, point, ctx))
+ goto leave;
+
+ xxx = mpi_new(0);
+
+ /* y^2 == x^3 + a·x + b */
+ ec_pow2(y, y, ctx);
+
+ ec_pow3(xxx, x, ctx);
+ ec_mulm(w, ctx->a, x, ctx);
+ ec_addm(w, w, ctx->b, ctx);
+ ec_addm(w, w, xxx, ctx);
+
+ if (!mpi_cmp(y, w))
+ res = 1;
+
+ mpi_free(xxx);
+ }
+ break;
+
+ case MPI_EC_MONTGOMERY:
+ {
+#define xx y
+ /* With Montgomery curve, only X-coordinate is valid. */
+ if (mpi_ec_get_affine(x, NULL, point, ctx))
+ goto leave;
+
+ /* The equation is: b * y^2 == x^3 + a · x^2 + x */
+ /* We check if right hand is quadratic residue or not by
+ * Euler's criterion.
+ */
+ /* CTX->A has (a-2)/4 and CTX->B has b^-1 */
+ ec_mulm(w, ctx->a, mpi_const(MPI_C_FOUR), ctx);
+ ec_addm(w, w, mpi_const(MPI_C_TWO), ctx);
+ ec_mulm(w, w, x, ctx);
+ ec_pow2(xx, x, ctx);
+ ec_addm(w, w, xx, ctx);
+ ec_addm(w, w, mpi_const(MPI_C_ONE), ctx);
+ ec_mulm(w, w, x, ctx);
+ ec_mulm(w, w, ctx->b, ctx);
+#undef xx
+ /* Compute Euler's criterion: w^(p-1)/2 */
+#define p_minus1 y
+ ec_subm(p_minus1, ctx->p, mpi_const(MPI_C_ONE), ctx);
+ mpi_rshift(p_minus1, p_minus1, 1);
+ ec_powm(w, w, p_minus1, ctx);
+
+ res = !mpi_cmp_ui(w, 1);
+#undef p_minus1
+ }
+ break;
+
+ case MPI_EC_EDWARDS:
+ {
+ if (mpi_ec_get_affine(x, y, point, ctx))
+ goto leave;
+
+ mpi_resize(w, ctx->p->nlimbs);
+ w->nlimbs = ctx->p->nlimbs;
+
+ /* a · x^2 + y^2 - 1 - b · x^2 · y^2 == 0 */
+ ctx->pow2(x, x, ctx);
+ ctx->pow2(y, y, ctx);
+ if (ctx->dialect == ECC_DIALECT_ED25519)
+ ctx->subm(w, ctx->p, x, ctx);
+ else
+ ctx->mulm(w, ctx->a, x, ctx);
+ ctx->addm(w, w, y, ctx);
+ ctx->mulm(x, x, y, ctx);
+ ctx->mulm(x, x, ctx->b, ctx);
+ ctx->subm(w, w, x, ctx);
+ if (!mpi_cmp_ui(w, 1))
+ res = 1;
+ }
+ break;
+ }
+
+leave:
+ mpi_free(w);
+ mpi_free(x);
+ mpi_free(y);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(mpi_ec_curve_point);
diff --git a/lib/mpi/mpi-add.c b/lib/mpi/mpi-add.c
new file mode 100644
index 000000000000..2cdae54c1bd0
--- /dev/null
+++ b/lib/mpi/mpi-add.c
@@ -0,0 +1,155 @@
+/* mpi-add.c - MPI functions
+ * Copyright (C) 1994, 1996, 1998, 2001, 2002,
+ * 2003 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ */
+
+#include "mpi-internal.h"
+
+/****************
+ * Add the unsigned integer V to the mpi-integer U and store the
+ * result in W. U and V may be the same.
+ */
+void mpi_add_ui(MPI w, MPI u, unsigned long v)
+{
+ mpi_ptr_t wp, up;
+ mpi_size_t usize, wsize;
+ int usign, wsign;
+
+ usize = u->nlimbs;
+ usign = u->sign;
+ wsign = 0;
+
+ /* If not space for W (and possible carry), increase space. */
+ wsize = usize + 1;
+ if (w->alloced < wsize)
+ mpi_resize(w, wsize);
+
+ /* These must be after realloc (U may be the same as W). */
+ up = u->d;
+ wp = w->d;
+
+ if (!usize) { /* simple */
+ wp[0] = v;
+ wsize = v ? 1:0;
+ } else if (!usign) { /* mpi is not negative */
+ mpi_limb_t cy;
+ cy = mpihelp_add_1(wp, up, usize, v);
+ wp[usize] = cy;
+ wsize = usize + cy;
+ } else {
+ /* The signs are different. Need exact comparison to determine
+ * which operand to subtract from which.
+ */
+ if (usize == 1 && up[0] < v) {
+ wp[0] = v - up[0];
+ wsize = 1;
+ } else {
+ mpihelp_sub_1(wp, up, usize, v);
+ /* Size can decrease with at most one limb. */
+ wsize = usize - (wp[usize-1] == 0);
+ wsign = 1;
+ }
+ }
+
+ w->nlimbs = wsize;
+ w->sign = wsign;
+}
+
+
+void mpi_add(MPI w, MPI u, MPI v)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t usize, vsize, wsize;
+ int usign, vsign, wsign;
+
+ if (u->nlimbs < v->nlimbs) { /* Swap U and V. */
+ usize = v->nlimbs;
+ usign = v->sign;
+ vsize = u->nlimbs;
+ vsign = u->sign;
+ wsize = usize + 1;
+ RESIZE_IF_NEEDED(w, wsize);
+ /* These must be after realloc (u or v may be the same as w). */
+ up = v->d;
+ vp = u->d;
+ } else {
+ usize = u->nlimbs;
+ usign = u->sign;
+ vsize = v->nlimbs;
+ vsign = v->sign;
+ wsize = usize + 1;
+ RESIZE_IF_NEEDED(w, wsize);
+ /* These must be after realloc (u or v may be the same as w). */
+ up = u->d;
+ vp = v->d;
+ }
+ wp = w->d;
+ wsign = 0;
+
+ if (!vsize) { /* simple */
+ MPN_COPY(wp, up, usize);
+ wsize = usize;
+ wsign = usign;
+ } else if (usign != vsign) { /* different sign */
+ /* This test is right since USIZE >= VSIZE */
+ if (usize != vsize) {
+ mpihelp_sub(wp, up, usize, vp, vsize);
+ wsize = usize;
+ MPN_NORMALIZE(wp, wsize);
+ wsign = usign;
+ } else if (mpihelp_cmp(up, vp, usize) < 0) {
+ mpihelp_sub_n(wp, vp, up, usize);
+ wsize = usize;
+ MPN_NORMALIZE(wp, wsize);
+ if (!usign)
+ wsign = 1;
+ } else {
+ mpihelp_sub_n(wp, up, vp, usize);
+ wsize = usize;
+ MPN_NORMALIZE(wp, wsize);
+ if (usign)
+ wsign = 1;
+ }
+ } else { /* U and V have same sign. Add them. */
+ mpi_limb_t cy = mpihelp_add(wp, up, usize, vp, vsize);
+ wp[usize] = cy;
+ wsize = usize + cy;
+ if (usign)
+ wsign = 1;
+ }
+
+ w->nlimbs = wsize;
+ w->sign = wsign;
+}
+EXPORT_SYMBOL_GPL(mpi_add);
+
+void mpi_sub(MPI w, MPI u, MPI v)
+{
+ MPI vv = mpi_copy(v);
+ vv->sign = !vv->sign;
+ mpi_add(w, u, vv);
+ mpi_free(vv);
+}
+
+
+void mpi_addm(MPI w, MPI u, MPI v, MPI m)
+{
+ mpi_add(w, u, v);
+ mpi_mod(w, w, m);
+}
+EXPORT_SYMBOL_GPL(mpi_addm);
+
+void mpi_subm(MPI w, MPI u, MPI v, MPI m)
+{
+ mpi_sub(w, u, v);
+ mpi_mod(w, w, m);
+}
+EXPORT_SYMBOL_GPL(mpi_subm);
diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c
index 503537e08436..142b680835df 100644
--- a/lib/mpi/mpi-bit.c
+++ b/lib/mpi/mpi-bit.c
@@ -1,4 +1,4 @@
-/* mpi-bit.c - MPI bit level fucntions
+/* mpi-bit.c - MPI bit level functions
* Copyright (C) 1998, 1999 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
@@ -32,6 +32,7 @@ void mpi_normalize(MPI a)
for (; a->nlimbs && !a->d[a->nlimbs - 1]; a->nlimbs--)
;
}
+EXPORT_SYMBOL_GPL(mpi_normalize);
/****************
* Return the number of bits in A.
@@ -54,3 +55,253 @@ unsigned mpi_get_nbits(MPI a)
return n;
}
EXPORT_SYMBOL_GPL(mpi_get_nbits);
+
+/****************
+ * Test whether bit N is set.
+ */
+int mpi_test_bit(MPI a, unsigned int n)
+{
+ unsigned int limbno, bitno;
+ mpi_limb_t limb;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs)
+ return 0; /* too far left: this is a 0 */
+ limb = a->d[limbno];
+ return (limb & (A_LIMB_1 << bitno)) ? 1 : 0;
+}
+EXPORT_SYMBOL_GPL(mpi_test_bit);
+
+/****************
+ * Set bit N of A.
+ */
+void mpi_set_bit(MPI a, unsigned int n)
+{
+ unsigned int i, limbno, bitno;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs) {
+ for (i = a->nlimbs; i < a->alloced; i++)
+ a->d[i] = 0;
+ mpi_resize(a, limbno+1);
+ a->nlimbs = limbno+1;
+ }
+ a->d[limbno] |= (A_LIMB_1<<bitno);
+}
+
+/****************
+ * Set bit N of A. and clear all bits above
+ */
+void mpi_set_highbit(MPI a, unsigned int n)
+{
+ unsigned int i, limbno, bitno;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs) {
+ for (i = a->nlimbs; i < a->alloced; i++)
+ a->d[i] = 0;
+ mpi_resize(a, limbno+1);
+ a->nlimbs = limbno+1;
+ }
+ a->d[limbno] |= (A_LIMB_1<<bitno);
+ for (bitno++; bitno < BITS_PER_MPI_LIMB; bitno++)
+ a->d[limbno] &= ~(A_LIMB_1 << bitno);
+ a->nlimbs = limbno+1;
+}
+EXPORT_SYMBOL_GPL(mpi_set_highbit);
+
+/****************
+ * clear bit N of A and all bits above
+ */
+void mpi_clear_highbit(MPI a, unsigned int n)
+{
+ unsigned int limbno, bitno;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs)
+ return; /* not allocated, therefore no need to clear bits :-) */
+
+ for ( ; bitno < BITS_PER_MPI_LIMB; bitno++)
+ a->d[limbno] &= ~(A_LIMB_1 << bitno);
+ a->nlimbs = limbno+1;
+}
+
+/****************
+ * Clear bit N of A.
+ */
+void mpi_clear_bit(MPI a, unsigned int n)
+{
+ unsigned int limbno, bitno;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs)
+ return; /* Don't need to clear this bit, it's far too left. */
+ a->d[limbno] &= ~(A_LIMB_1 << bitno);
+}
+EXPORT_SYMBOL_GPL(mpi_clear_bit);
+
+
+/****************
+ * Shift A by COUNT limbs to the right
+ * This is used only within the MPI library
+ */
+void mpi_rshift_limbs(MPI a, unsigned int count)
+{
+ mpi_ptr_t ap = a->d;
+ mpi_size_t n = a->nlimbs;
+ unsigned int i;
+
+ if (count >= n) {
+ a->nlimbs = 0;
+ return;
+ }
+
+ for (i = 0; i < n - count; i++)
+ ap[i] = ap[i+count];
+ ap[i] = 0;
+ a->nlimbs -= count;
+}
+
+/*
+ * Shift A by N bits to the right.
+ */
+void mpi_rshift(MPI x, MPI a, unsigned int n)
+{
+ mpi_size_t xsize;
+ unsigned int i;
+ unsigned int nlimbs = (n/BITS_PER_MPI_LIMB);
+ unsigned int nbits = (n%BITS_PER_MPI_LIMB);
+
+ if (x == a) {
+ /* In-place operation. */
+ if (nlimbs >= x->nlimbs) {
+ x->nlimbs = 0;
+ return;
+ }
+
+ if (nlimbs) {
+ for (i = 0; i < x->nlimbs - nlimbs; i++)
+ x->d[i] = x->d[i+nlimbs];
+ x->d[i] = 0;
+ x->nlimbs -= nlimbs;
+ }
+ if (x->nlimbs && nbits)
+ mpihelp_rshift(x->d, x->d, x->nlimbs, nbits);
+ } else if (nlimbs) {
+ /* Copy and shift by more or equal bits than in a limb. */
+ xsize = a->nlimbs;
+ x->sign = a->sign;
+ RESIZE_IF_NEEDED(x, xsize);
+ x->nlimbs = xsize;
+ for (i = 0; i < a->nlimbs; i++)
+ x->d[i] = a->d[i];
+ x->nlimbs = i;
+
+ if (nlimbs >= x->nlimbs) {
+ x->nlimbs = 0;
+ return;
+ }
+
+ if (nlimbs) {
+ for (i = 0; i < x->nlimbs - nlimbs; i++)
+ x->d[i] = x->d[i+nlimbs];
+ x->d[i] = 0;
+ x->nlimbs -= nlimbs;
+ }
+
+ if (x->nlimbs && nbits)
+ mpihelp_rshift(x->d, x->d, x->nlimbs, nbits);
+ } else {
+ /* Copy and shift by less than bits in a limb. */
+ xsize = a->nlimbs;
+ x->sign = a->sign;
+ RESIZE_IF_NEEDED(x, xsize);
+ x->nlimbs = xsize;
+
+ if (xsize) {
+ if (nbits)
+ mpihelp_rshift(x->d, a->d, x->nlimbs, nbits);
+ else {
+ /* The rshift helper function is not specified for
+ * NBITS==0, thus we do a plain copy here.
+ */
+ for (i = 0; i < x->nlimbs; i++)
+ x->d[i] = a->d[i];
+ }
+ }
+ }
+ MPN_NORMALIZE(x->d, x->nlimbs);
+}
+
+/****************
+ * Shift A by COUNT limbs to the left
+ * This is used only within the MPI library
+ */
+void mpi_lshift_limbs(MPI a, unsigned int count)
+{
+ mpi_ptr_t ap;
+ int n = a->nlimbs;
+ int i;
+
+ if (!count || !n)
+ return;
+
+ RESIZE_IF_NEEDED(a, n+count);
+
+ ap = a->d;
+ for (i = n-1; i >= 0; i--)
+ ap[i+count] = ap[i];
+ for (i = 0; i < count; i++)
+ ap[i] = 0;
+ a->nlimbs += count;
+}
+
+/*
+ * Shift A by N bits to the left.
+ */
+void mpi_lshift(MPI x, MPI a, unsigned int n)
+{
+ unsigned int nlimbs = (n/BITS_PER_MPI_LIMB);
+ unsigned int nbits = (n%BITS_PER_MPI_LIMB);
+
+ if (x == a && !n)
+ return; /* In-place shift with an amount of zero. */
+
+ if (x != a) {
+ /* Copy A to X. */
+ unsigned int alimbs = a->nlimbs;
+ int asign = a->sign;
+ mpi_ptr_t xp, ap;
+
+ RESIZE_IF_NEEDED(x, alimbs+nlimbs+1);
+ xp = x->d;
+ ap = a->d;
+ MPN_COPY(xp, ap, alimbs);
+ x->nlimbs = alimbs;
+ x->flags = a->flags;
+ x->sign = asign;
+ }
+
+ if (nlimbs && !nbits) {
+ /* Shift a full number of limbs. */
+ mpi_lshift_limbs(x, nlimbs);
+ } else if (n) {
+ /* We use a very dump approach: Shift left by the number of
+ * limbs plus one and than fix it up by an rshift.
+ */
+ mpi_lshift_limbs(x, nlimbs+1);
+ mpi_rshift(x, x, BITS_PER_MPI_LIMB - nbits);
+ }
+
+ MPN_NORMALIZE(x->d, x->nlimbs);
+}
diff --git a/lib/mpi/mpi-cmp.c b/lib/mpi/mpi-cmp.c
index d25e9e96c310..c4cfa3ff0581 100644
--- a/lib/mpi/mpi-cmp.c
+++ b/lib/mpi/mpi-cmp.c
@@ -41,28 +41,54 @@ int mpi_cmp_ui(MPI u, unsigned long v)
}
EXPORT_SYMBOL_GPL(mpi_cmp_ui);
-int mpi_cmp(MPI u, MPI v)
+static int do_mpi_cmp(MPI u, MPI v, int absmode)
{
- mpi_size_t usize, vsize;
+ mpi_size_t usize;
+ mpi_size_t vsize;
+ int usign;
+ int vsign;
int cmp;
mpi_normalize(u);
mpi_normalize(v);
+
usize = u->nlimbs;
vsize = v->nlimbs;
- if (!u->sign && v->sign)
+ usign = absmode ? 0 : u->sign;
+ vsign = absmode ? 0 : v->sign;
+
+ /* Compare sign bits. */
+
+ if (!usign && vsign)
return 1;
- if (u->sign && !v->sign)
+ if (usign && !vsign)
return -1;
- if (usize != vsize && !u->sign && !v->sign)
+
+ /* U and V are either both positive or both negative. */
+
+ if (usize != vsize && !usign && !vsign)
return usize - vsize;
- if (usize != vsize && u->sign && v->sign)
- return vsize - usize;
+ if (usize != vsize && usign && vsign)
+ return vsize + usize;
if (!usize)
return 0;
cmp = mpihelp_cmp(u->d, v->d, usize);
- if (u->sign)
- return -cmp;
- return cmp;
+ if (!cmp)
+ return 0;
+ if ((cmp < 0?1:0) == (usign?1:0))
+ return 1;
+
+ return -1;
+}
+
+int mpi_cmp(MPI u, MPI v)
+{
+ return do_mpi_cmp(u, v, 0);
}
EXPORT_SYMBOL_GPL(mpi_cmp);
+
+int mpi_cmpabs(MPI u, MPI v)
+{
+ return do_mpi_cmp(u, v, 1);
+}
+EXPORT_SYMBOL_GPL(mpi_cmpabs);
diff --git a/lib/mpi/mpi-div.c b/lib/mpi/mpi-div.c
new file mode 100644
index 000000000000..45beab8b9e9e
--- /dev/null
+++ b/lib/mpi/mpi-div.c
@@ -0,0 +1,234 @@
+/* mpi-div.c - MPI functions
+ * Copyright (C) 1994, 1996, 1998, 2001, 2002,
+ * 2003 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den);
+void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor);
+
+void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor)
+{
+ int divisor_sign = divisor->sign;
+ MPI temp_divisor = NULL;
+
+ /* We need the original value of the divisor after the remainder has been
+ * preliminary calculated. We have to copy it to temporary space if it's
+ * the same variable as REM.
+ */
+ if (rem == divisor) {
+ temp_divisor = mpi_copy(divisor);
+ divisor = temp_divisor;
+ }
+
+ mpi_tdiv_r(rem, dividend, divisor);
+
+ if (((divisor_sign?1:0) ^ (dividend->sign?1:0)) && rem->nlimbs)
+ mpi_add(rem, rem, divisor);
+
+ if (temp_divisor)
+ mpi_free(temp_divisor);
+}
+
+void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor)
+{
+ MPI tmp = mpi_alloc(mpi_get_nlimbs(quot));
+ mpi_fdiv_qr(quot, tmp, dividend, divisor);
+ mpi_free(tmp);
+}
+
+void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor)
+{
+ int divisor_sign = divisor->sign;
+ MPI temp_divisor = NULL;
+
+ if (quot == divisor || rem == divisor) {
+ temp_divisor = mpi_copy(divisor);
+ divisor = temp_divisor;
+ }
+
+ mpi_tdiv_qr(quot, rem, dividend, divisor);
+
+ if ((divisor_sign ^ dividend->sign) && rem->nlimbs) {
+ mpi_sub_ui(quot, quot, 1);
+ mpi_add(rem, rem, divisor);
+ }
+
+ if (temp_divisor)
+ mpi_free(temp_divisor);
+}
+
+/* If den == quot, den needs temporary storage.
+ * If den == rem, den needs temporary storage.
+ * If num == quot, num needs temporary storage.
+ * If den has temporary storage, it can be normalized while being copied,
+ * i.e no extra storage should be allocated.
+ */
+
+void mpi_tdiv_r(MPI rem, MPI num, MPI den)
+{
+ mpi_tdiv_qr(NULL, rem, num, den);
+}
+
+void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
+{
+ mpi_ptr_t np, dp;
+ mpi_ptr_t qp, rp;
+ mpi_size_t nsize = num->nlimbs;
+ mpi_size_t dsize = den->nlimbs;
+ mpi_size_t qsize, rsize;
+ mpi_size_t sign_remainder = num->sign;
+ mpi_size_t sign_quotient = num->sign ^ den->sign;
+ unsigned int normalization_steps;
+ mpi_limb_t q_limb;
+ mpi_ptr_t marker[5];
+ int markidx = 0;
+
+ /* Ensure space is enough for quotient and remainder.
+ * We need space for an extra limb in the remainder, because it's
+ * up-shifted (normalized) below.
+ */
+ rsize = nsize + 1;
+ mpi_resize(rem, rsize);
+
+ qsize = rsize - dsize; /* qsize cannot be bigger than this. */
+ if (qsize <= 0) {
+ if (num != rem) {
+ rem->nlimbs = num->nlimbs;
+ rem->sign = num->sign;
+ MPN_COPY(rem->d, num->d, nsize);
+ }
+ if (quot) {
+ /* This needs to follow the assignment to rem, in case the
+ * numerator and quotient are the same.
+ */
+ quot->nlimbs = 0;
+ quot->sign = 0;
+ }
+ return;
+ }
+
+ if (quot)
+ mpi_resize(quot, qsize);
+
+ /* Read pointers here, when reallocation is finished. */
+ np = num->d;
+ dp = den->d;
+ rp = rem->d;
+
+ /* Optimize division by a single-limb divisor. */
+ if (dsize == 1) {
+ mpi_limb_t rlimb;
+ if (quot) {
+ qp = quot->d;
+ rlimb = mpihelp_divmod_1(qp, np, nsize, dp[0]);
+ qsize -= qp[qsize - 1] == 0;
+ quot->nlimbs = qsize;
+ quot->sign = sign_quotient;
+ } else
+ rlimb = mpihelp_mod_1(np, nsize, dp[0]);
+ rp[0] = rlimb;
+ rsize = rlimb != 0?1:0;
+ rem->nlimbs = rsize;
+ rem->sign = sign_remainder;
+ return;
+ }
+
+
+ if (quot) {
+ qp = quot->d;
+ /* Make sure QP and NP point to different objects. Otherwise the
+ * numerator would be gradually overwritten by the quotient limbs.
+ */
+ if (qp == np) { /* Copy NP object to temporary space. */
+ np = marker[markidx++] = mpi_alloc_limb_space(nsize);
+ MPN_COPY(np, qp, nsize);
+ }
+ } else /* Put quotient at top of remainder. */
+ qp = rp + dsize;
+
+ normalization_steps = count_leading_zeros(dp[dsize - 1]);
+
+ /* Normalize the denominator, i.e. make its most significant bit set by
+ * shifting it NORMALIZATION_STEPS bits to the left. Also shift the
+ * numerator the same number of steps (to keep the quotient the same!).
+ */
+ if (normalization_steps) {
+ mpi_ptr_t tp;
+ mpi_limb_t nlimb;
+
+ /* Shift up the denominator setting the most significant bit of
+ * the most significant word. Use temporary storage not to clobber
+ * the original contents of the denominator.
+ */
+ tp = marker[markidx++] = mpi_alloc_limb_space(dsize);
+ mpihelp_lshift(tp, dp, dsize, normalization_steps);
+ dp = tp;
+
+ /* Shift up the numerator, possibly introducing a new most
+ * significant word. Move the shifted numerator in the remainder
+ * meanwhile.
+ */
+ nlimb = mpihelp_lshift(rp, np, nsize, normalization_steps);
+ if (nlimb) {
+ rp[nsize] = nlimb;
+ rsize = nsize + 1;
+ } else
+ rsize = nsize;
+ } else {
+ /* The denominator is already normalized, as required. Copy it to
+ * temporary space if it overlaps with the quotient or remainder.
+ */
+ if (dp == rp || (quot && (dp == qp))) {
+ mpi_ptr_t tp;
+
+ tp = marker[markidx++] = mpi_alloc_limb_space(dsize);
+ MPN_COPY(tp, dp, dsize);
+ dp = tp;
+ }
+
+ /* Move the numerator to the remainder. */
+ if (rp != np)
+ MPN_COPY(rp, np, nsize);
+
+ rsize = nsize;
+ }
+
+ q_limb = mpihelp_divrem(qp, 0, rp, rsize, dp, dsize);
+
+ if (quot) {
+ qsize = rsize - dsize;
+ if (q_limb) {
+ qp[qsize] = q_limb;
+ qsize += 1;
+ }
+
+ quot->nlimbs = qsize;
+ quot->sign = sign_quotient;
+ }
+
+ rsize = dsize;
+ MPN_NORMALIZE(rp, rsize);
+
+ if (normalization_steps && rsize) {
+ mpihelp_rshift(rp, rp, rsize, normalization_steps);
+ rsize -= rp[rsize - 1] == 0?1:0;
+ }
+
+ rem->nlimbs = rsize;
+ rem->sign = sign_remainder;
+ while (markidx) {
+ markidx--;
+ mpi_free_limb_space(marker[markidx]);
+ }
+}
diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h
index 91df5f0b70f2..554002182db1 100644
--- a/lib/mpi/mpi-internal.h
+++ b/lib/mpi/mpi-internal.h
@@ -52,6 +52,12 @@
typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */
typedef int mpi_size_t; /* (must be a signed type) */
+#define RESIZE_IF_NEEDED(a, b) \
+ do { \
+ if ((a)->alloced < (b)) \
+ mpi_resize((a), (b)); \
+ } while (0)
+
/* Copy N limbs from S to D. */
#define MPN_COPY(d, s, n) \
do { \
@@ -60,6 +66,14 @@ typedef int mpi_size_t; /* (must be a signed type) */
(d)[_i] = (s)[_i]; \
} while (0)
+#define MPN_COPY_INCR(d, s, n) \
+ do { \
+ mpi_size_t _i; \
+ for (_i = 0; _i < (n); _i++) \
+ (d)[_i] = (s)[_i]; \
+ } while (0)
+
+
#define MPN_COPY_DECR(d, s, n) \
do { \
mpi_size_t _i; \
@@ -92,6 +106,38 @@ typedef int mpi_size_t; /* (must be a signed type) */
mul_n(prodp, up, vp, size, tspace); \
} while (0);
+/* Divide the two-limb number in (NH,,NL) by D, with DI being the largest
+ * limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB).
+ * If this would yield overflow, DI should be the largest possible number
+ * (i.e., only ones). For correct operation, the most significant bit of D
+ * has to be set. Put the quotient in Q and the remainder in R.
+ */
+#define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \
+ do { \
+ mpi_limb_t _ql __maybe_unused; \
+ mpi_limb_t _q, _r; \
+ mpi_limb_t _xh, _xl; \
+ umul_ppmm(_q, _ql, (nh), (di)); \
+ _q += (nh); /* DI is 2**BITS_PER_MPI_LIMB too small */ \
+ umul_ppmm(_xh, _xl, _q, (d)); \
+ sub_ddmmss(_xh, _r, (nh), (nl), _xh, _xl); \
+ if (_xh) { \
+ sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \
+ _q++; \
+ if (_xh) { \
+ sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \
+ _q++; \
+ } \
+ } \
+ if (_r >= (d)) { \
+ _r -= (d); \
+ _q++; \
+ } \
+ (r) = _r; \
+ (q) = _q; \
+ } while (0)
+
+
/*-- mpiutil.c --*/
mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs);
void mpi_free_limb_space(mpi_ptr_t a);
@@ -135,6 +181,8 @@ int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size);
void mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size,
mpi_ptr_t tspace);
+void mpihelp_mul_n(mpi_ptr_t prodp,
+ mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size);
int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
mpi_ptr_t up, mpi_size_t usize,
@@ -146,9 +194,14 @@ mpi_limb_t mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
/*-- mpih-div.c --*/
+mpi_limb_t mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+ mpi_limb_t divisor_limb);
mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
mpi_ptr_t np, mpi_size_t nsize,
mpi_ptr_t dp, mpi_size_t dsize);
+mpi_limb_t mpihelp_divmod_1(mpi_ptr_t quot_ptr,
+ mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+ mpi_limb_t divisor_limb);
/*-- generic_mpih-[lr]shift.c --*/
mpi_limb_t mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
diff --git a/lib/mpi/mpi-inv.c b/lib/mpi/mpi-inv.c
new file mode 100644
index 000000000000..61e37d18f793
--- /dev/null
+++ b/lib/mpi/mpi-inv.c
@@ -0,0 +1,143 @@
+/* mpi-inv.c - MPI functions
+ * Copyright (C) 1998, 2001, 2002, 2003 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mpi-internal.h"
+
+/****************
+ * Calculate the multiplicative inverse X of A mod N
+ * That is: Find the solution x for
+ * 1 = (a*x) mod n
+ */
+int mpi_invm(MPI x, MPI a, MPI n)
+{
+ /* Extended Euclid's algorithm (See TAOCP Vol II, 4.5.2, Alg X)
+ * modified according to Michael Penk's solution for Exercise 35
+ * with further enhancement
+ */
+ MPI u, v, u1, u2 = NULL, u3, v1, v2 = NULL, v3, t1, t2 = NULL, t3;
+ unsigned int k;
+ int sign;
+ int odd;
+
+ if (!mpi_cmp_ui(a, 0))
+ return 0; /* Inverse does not exists. */
+ if (!mpi_cmp_ui(n, 1))
+ return 0; /* Inverse does not exists. */
+
+ u = mpi_copy(a);
+ v = mpi_copy(n);
+
+ for (k = 0; !mpi_test_bit(u, 0) && !mpi_test_bit(v, 0); k++) {
+ mpi_rshift(u, u, 1);
+ mpi_rshift(v, v, 1);
+ }
+ odd = mpi_test_bit(v, 0);
+
+ u1 = mpi_alloc_set_ui(1);
+ if (!odd)
+ u2 = mpi_alloc_set_ui(0);
+ u3 = mpi_copy(u);
+ v1 = mpi_copy(v);
+ if (!odd) {
+ v2 = mpi_alloc(mpi_get_nlimbs(u));
+ mpi_sub(v2, u1, u); /* U is used as const 1 */
+ }
+ v3 = mpi_copy(v);
+ if (mpi_test_bit(u, 0)) { /* u is odd */
+ t1 = mpi_alloc_set_ui(0);
+ if (!odd) {
+ t2 = mpi_alloc_set_ui(1);
+ t2->sign = 1;
+ }
+ t3 = mpi_copy(v);
+ t3->sign = !t3->sign;
+ goto Y4;
+ } else {
+ t1 = mpi_alloc_set_ui(1);
+ if (!odd)
+ t2 = mpi_alloc_set_ui(0);
+ t3 = mpi_copy(u);
+ }
+
+ do {
+ do {
+ if (!odd) {
+ if (mpi_test_bit(t1, 0) || mpi_test_bit(t2, 0)) {
+ /* one is odd */
+ mpi_add(t1, t1, v);
+ mpi_sub(t2, t2, u);
+ }
+ mpi_rshift(t1, t1, 1);
+ mpi_rshift(t2, t2, 1);
+ mpi_rshift(t3, t3, 1);
+ } else {
+ if (mpi_test_bit(t1, 0))
+ mpi_add(t1, t1, v);
+ mpi_rshift(t1, t1, 1);
+ mpi_rshift(t3, t3, 1);
+ }
+Y4:
+ ;
+ } while (!mpi_test_bit(t3, 0)); /* while t3 is even */
+
+ if (!t3->sign) {
+ mpi_set(u1, t1);
+ if (!odd)
+ mpi_set(u2, t2);
+ mpi_set(u3, t3);
+ } else {
+ mpi_sub(v1, v, t1);
+ sign = u->sign; u->sign = !u->sign;
+ if (!odd)
+ mpi_sub(v2, u, t2);
+ u->sign = sign;
+ sign = t3->sign; t3->sign = !t3->sign;
+ mpi_set(v3, t3);
+ t3->sign = sign;
+ }
+ mpi_sub(t1, u1, v1);
+ if (!odd)
+ mpi_sub(t2, u2, v2);
+ mpi_sub(t3, u3, v3);
+ if (t1->sign) {
+ mpi_add(t1, t1, v);
+ if (!odd)
+ mpi_sub(t2, t2, u);
+ }
+ } while (mpi_cmp_ui(t3, 0)); /* while t3 != 0 */
+ /* mpi_lshift( u3, k ); */
+ mpi_set(x, u1);
+
+ mpi_free(u1);
+ mpi_free(v1);
+ mpi_free(t1);
+ if (!odd) {
+ mpi_free(u2);
+ mpi_free(v2);
+ mpi_free(t2);
+ }
+ mpi_free(u3);
+ mpi_free(v3);
+ mpi_free(t3);
+
+ mpi_free(u);
+ mpi_free(v);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(mpi_invm);
diff --git a/lib/mpi/mpi-mod.c b/lib/mpi/mpi-mod.c
new file mode 100644
index 000000000000..47bc59edd4ff
--- /dev/null
+++ b/lib/mpi/mpi-mod.c
@@ -0,0 +1,155 @@
+/* mpi-mod.c - Modular reduction
+ * Copyright (C) 1998, 1999, 2001, 2002, 2003,
+ * 2007 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ */
+
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+/* Context used with Barrett reduction. */
+struct barrett_ctx_s {
+ MPI m; /* The modulus - may not be modified. */
+ int m_copied; /* If true, M needs to be released. */
+ int k;
+ MPI y;
+ MPI r1; /* Helper MPI. */
+ MPI r2; /* Helper MPI. */
+ MPI r3; /* Helper MPI allocated on demand. */
+};
+
+
+
+void mpi_mod(MPI rem, MPI dividend, MPI divisor)
+{
+ mpi_fdiv_r(rem, dividend, divisor);
+}
+
+/* This function returns a new context for Barrett based operations on
+ * the modulus M. This context needs to be released using
+ * _gcry_mpi_barrett_free. If COPY is true M will be transferred to
+ * the context and the user may change M. If COPY is false, M may not
+ * be changed until gcry_mpi_barrett_free has been called.
+ */
+mpi_barrett_t mpi_barrett_init(MPI m, int copy)
+{
+ mpi_barrett_t ctx;
+ MPI tmp;
+
+ mpi_normalize(m);
+ ctx = kcalloc(1, sizeof(*ctx), GFP_KERNEL);
+
+ if (copy) {
+ ctx->m = mpi_copy(m);
+ ctx->m_copied = 1;
+ } else
+ ctx->m = m;
+
+ ctx->k = mpi_get_nlimbs(m);
+ tmp = mpi_alloc(ctx->k + 1);
+
+ /* Barrett precalculation: y = floor(b^(2k) / m). */
+ mpi_set_ui(tmp, 1);
+ mpi_lshift_limbs(tmp, 2 * ctx->k);
+ mpi_fdiv_q(tmp, tmp, m);
+
+ ctx->y = tmp;
+ ctx->r1 = mpi_alloc(2 * ctx->k + 1);
+ ctx->r2 = mpi_alloc(2 * ctx->k + 1);
+
+ return ctx;
+}
+
+void mpi_barrett_free(mpi_barrett_t ctx)
+{
+ if (ctx) {
+ mpi_free(ctx->y);
+ mpi_free(ctx->r1);
+ mpi_free(ctx->r2);
+ if (ctx->r3)
+ mpi_free(ctx->r3);
+ if (ctx->m_copied)
+ mpi_free(ctx->m);
+ kfree(ctx);
+ }
+}
+
+
+/* R = X mod M
+ *
+ * Using Barrett reduction. Before using this function
+ * _gcry_mpi_barrett_init must have been called to do the
+ * precalculations. CTX is the context created by this precalculation
+ * and also conveys M. If the Barret reduction could no be done a
+ * straightforward reduction method is used.
+ *
+ * We assume that these conditions are met:
+ * Input: x =(x_2k-1 ...x_0)_b
+ * m =(m_k-1 ....m_0)_b with m_k-1 != 0
+ * Output: r = x mod m
+ */
+void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx)
+{
+ MPI m = ctx->m;
+ int k = ctx->k;
+ MPI y = ctx->y;
+ MPI r1 = ctx->r1;
+ MPI r2 = ctx->r2;
+ int sign;
+
+ mpi_normalize(x);
+ if (mpi_get_nlimbs(x) > 2*k) {
+ mpi_mod(r, x, m);
+ return;
+ }
+
+ sign = x->sign;
+ x->sign = 0;
+
+ /* 1. q1 = floor( x / b^k-1)
+ * q2 = q1 * y
+ * q3 = floor( q2 / b^k+1 )
+ * Actually, we don't need qx, we can work direct on r2
+ */
+ mpi_set(r2, x);
+ mpi_rshift_limbs(r2, k-1);
+ mpi_mul(r2, r2, y);
+ mpi_rshift_limbs(r2, k+1);
+
+ /* 2. r1 = x mod b^k+1
+ * r2 = q3 * m mod b^k+1
+ * r = r1 - r2
+ * 3. if r < 0 then r = r + b^k+1
+ */
+ mpi_set(r1, x);
+ if (r1->nlimbs > k+1) /* Quick modulo operation. */
+ r1->nlimbs = k+1;
+ mpi_mul(r2, r2, m);
+ if (r2->nlimbs > k+1) /* Quick modulo operation. */
+ r2->nlimbs = k+1;
+ mpi_sub(r, r1, r2);
+
+ if (mpi_has_sign(r)) {
+ if (!ctx->r3) {
+ ctx->r3 = mpi_alloc(k + 2);
+ mpi_set_ui(ctx->r3, 1);
+ mpi_lshift_limbs(ctx->r3, k + 1);
+ }
+ mpi_add(r, r, ctx->r3);
+ }
+
+ /* 4. while r >= m do r = r - m */
+ while (mpi_cmp(r, m) >= 0)
+ mpi_sub(r, r, m);
+
+ x->sign = sign;
+}
+
+
+void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx)
+{
+ mpi_mul(w, u, v);
+ mpi_mod_barrett(w, w, ctx);
+}
diff --git a/lib/mpi/mpi-mul.c b/lib/mpi/mpi-mul.c
new file mode 100644
index 000000000000..8f5fa200f297
--- /dev/null
+++ b/lib/mpi/mpi-mul.c
@@ -0,0 +1,91 @@
+/* mpi-mul.c - MPI functions
+ * Copyright (C) 1994, 1996, 1998, 2001, 2002,
+ * 2003 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ */
+
+#include "mpi-internal.h"
+
+void mpi_mul(MPI w, MPI u, MPI v)
+{
+ mpi_size_t usize, vsize, wsize;
+ mpi_ptr_t up, vp, wp;
+ mpi_limb_t cy;
+ int usign, vsign, sign_product;
+ int assign_wp = 0;
+ mpi_ptr_t tmp_limb = NULL;
+
+ if (u->nlimbs < v->nlimbs) {
+ /* Swap U and V. */
+ usize = v->nlimbs;
+ usign = v->sign;
+ up = v->d;
+ vsize = u->nlimbs;
+ vsign = u->sign;
+ vp = u->d;
+ } else {
+ usize = u->nlimbs;
+ usign = u->sign;
+ up = u->d;
+ vsize = v->nlimbs;
+ vsign = v->sign;
+ vp = v->d;
+ }
+ sign_product = usign ^ vsign;
+ wp = w->d;
+
+ /* Ensure W has space enough to store the result. */
+ wsize = usize + vsize;
+ if (w->alloced < wsize) {
+ if (wp == up || wp == vp) {
+ wp = mpi_alloc_limb_space(wsize);
+ assign_wp = 1;
+ } else {
+ mpi_resize(w, wsize);
+ wp = w->d;
+ }
+ } else { /* Make U and V not overlap with W. */
+ if (wp == up) {
+ /* W and U are identical. Allocate temporary space for U. */
+ up = tmp_limb = mpi_alloc_limb_space(usize);
+ /* Is V identical too? Keep it identical with U. */
+ if (wp == vp)
+ vp = up;
+ /* Copy to the temporary space. */
+ MPN_COPY(up, wp, usize);
+ } else if (wp == vp) {
+ /* W and V are identical. Allocate temporary space for V. */
+ vp = tmp_limb = mpi_alloc_limb_space(vsize);
+ /* Copy to the temporary space. */
+ MPN_COPY(vp, wp, vsize);
+ }
+ }
+
+ if (!vsize)
+ wsize = 0;
+ else {
+ mpihelp_mul(wp, up, usize, vp, vsize, &cy);
+ wsize -= cy ? 0:1;
+ }
+
+ if (assign_wp)
+ mpi_assign_limb_space(w, wp, wsize);
+ w->nlimbs = wsize;
+ w->sign = sign_product;
+ if (tmp_limb)
+ mpi_free_limb_space(tmp_limb);
+}
+
+void mpi_mulm(MPI w, MPI u, MPI v, MPI m)
+{
+ mpi_mul(w, u, v);
+ mpi_tdiv_r(w, w, m);
+}
+EXPORT_SYMBOL_GPL(mpi_mulm);
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index eead4b339466..7ea225b2204f 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -25,6 +25,7 @@
#include <linux/string.h>
#include "mpi-internal.h"
+#define MAX_EXTERN_SCAN_BYTES (16*1024*1024)
#define MAX_EXTERN_MPI_BITS 16384
/**
@@ -109,6 +110,112 @@ MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread)
}
EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
+/****************
+ * Fill the mpi VAL from the hex string in STR.
+ */
+int mpi_fromstr(MPI val, const char *str)
+{
+ int sign = 0;
+ int prepend_zero = 0;
+ int i, j, c, c1, c2;
+ unsigned int nbits, nbytes, nlimbs;
+ mpi_limb_t a;
+
+ if (*str == '-') {
+ sign = 1;
+ str++;
+ }
+
+ /* Skip optional hex prefix. */
+ if (*str == '0' && str[1] == 'x')
+ str += 2;
+
+ nbits = strlen(str);
+ if (nbits > MAX_EXTERN_SCAN_BYTES) {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ nbits *= 4;
+ if ((nbits % 8))
+ prepend_zero = 1;
+
+ nbytes = (nbits+7) / 8;
+ nlimbs = (nbytes+BYTES_PER_MPI_LIMB-1) / BYTES_PER_MPI_LIMB;
+
+ if (val->alloced < nlimbs)
+ mpi_resize(val, nlimbs);
+
+ i = BYTES_PER_MPI_LIMB - (nbytes % BYTES_PER_MPI_LIMB);
+ i %= BYTES_PER_MPI_LIMB;
+ j = val->nlimbs = nlimbs;
+ val->sign = sign;
+ for (; j > 0; j--) {
+ a = 0;
+ for (; i < BYTES_PER_MPI_LIMB; i++) {
+ if (prepend_zero) {
+ c1 = '0';
+ prepend_zero = 0;
+ } else
+ c1 = *str++;
+
+ if (!c1) {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ c2 = *str++;
+ if (!c2) {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ if (c1 >= '0' && c1 <= '9')
+ c = c1 - '0';
+ else if (c1 >= 'a' && c1 <= 'f')
+ c = c1 - 'a' + 10;
+ else if (c1 >= 'A' && c1 <= 'F')
+ c = c1 - 'A' + 10;
+ else {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ c <<= 4;
+ if (c2 >= '0' && c2 <= '9')
+ c |= c2 - '0';
+ else if (c2 >= 'a' && c2 <= 'f')
+ c |= c2 - 'a' + 10;
+ else if (c2 >= 'A' && c2 <= 'F')
+ c |= c2 - 'A' + 10;
+ else {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ a <<= 8;
+ a |= c;
+ }
+ i = 0;
+ val->d[j-1] = a;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mpi_fromstr);
+
+MPI mpi_scanval(const char *string)
+{
+ MPI a;
+
+ a = mpi_alloc(0);
+ if (!a)
+ return NULL;
+
+ if (mpi_fromstr(a, string)) {
+ mpi_free(a);
+ return NULL;
+ }
+ mpi_normalize(a);
+ return a;
+}
+EXPORT_SYMBOL_GPL(mpi_scanval);
+
static int count_lzeros(MPI a)
{
mpi_limb_t alimb;
@@ -413,3 +520,232 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
return val;
}
EXPORT_SYMBOL_GPL(mpi_read_raw_from_sgl);
+
+/* Perform a two's complement operation on buffer P of size N bytes. */
+static void twocompl(unsigned char *p, unsigned int n)
+{
+ int i;
+
+ for (i = n-1; i >= 0 && !p[i]; i--)
+ ;
+ if (i >= 0) {
+ if ((p[i] & 0x01))
+ p[i] = (((p[i] ^ 0xfe) | 0x01) & 0xff);
+ else if ((p[i] & 0x02))
+ p[i] = (((p[i] ^ 0xfc) | 0x02) & 0xfe);
+ else if ((p[i] & 0x04))
+ p[i] = (((p[i] ^ 0xf8) | 0x04) & 0xfc);
+ else if ((p[i] & 0x08))
+ p[i] = (((p[i] ^ 0xf0) | 0x08) & 0xf8);
+ else if ((p[i] & 0x10))
+ p[i] = (((p[i] ^ 0xe0) | 0x10) & 0xf0);
+ else if ((p[i] & 0x20))
+ p[i] = (((p[i] ^ 0xc0) | 0x20) & 0xe0);
+ else if ((p[i] & 0x40))
+ p[i] = (((p[i] ^ 0x80) | 0x40) & 0xc0);
+ else
+ p[i] = 0x80;
+
+ for (i--; i >= 0; i--)
+ p[i] ^= 0xff;
+ }
+}
+
+int mpi_print(enum gcry_mpi_format format, unsigned char *buffer,
+ size_t buflen, size_t *nwritten, MPI a)
+{
+ unsigned int nbits = mpi_get_nbits(a);
+ size_t len;
+ size_t dummy_nwritten;
+ int negative;
+
+ if (!nwritten)
+ nwritten = &dummy_nwritten;
+
+ /* Libgcrypt does no always care to set clear the sign if the value
+ * is 0. For printing this is a bit of a surprise, in particular
+ * because if some of the formats don't support negative numbers but
+ * should be able to print a zero. Thus we need this extra test
+ * for a negative number.
+ */
+ if (a->sign && mpi_cmp_ui(a, 0))
+ negative = 1;
+ else
+ negative = 0;
+
+ len = buflen;
+ *nwritten = 0;
+ if (format == GCRYMPI_FMT_STD) {
+ unsigned char *tmp;
+ int extra = 0;
+ unsigned int n;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+
+ if (negative) {
+ twocompl(tmp, n);
+ if (!(*tmp & 0x80)) {
+ /* Need to extend the sign. */
+ n++;
+ extra = 2;
+ }
+ } else if (n && (*tmp & 0x80)) {
+ /* Positive but the high bit of the returned buffer is set.
+ * Thus we need to print an extra leading 0x00 so that the
+ * output is interpreted as a positive number.
+ */
+ n++;
+ extra = 1;
+ }
+
+ if (buffer && n > len) {
+ /* The provided buffer is too short. */
+ kfree(tmp);
+ return -E2BIG;
+ }
+ if (buffer) {
+ unsigned char *s = buffer;
+
+ if (extra == 1)
+ *s++ = 0;
+ else if (extra)
+ *s++ = 0xff;
+ memcpy(s, tmp, n-!!extra);
+ }
+ kfree(tmp);
+ *nwritten = n;
+ return 0;
+ } else if (format == GCRYMPI_FMT_USG) {
+ unsigned int n = (nbits + 7)/8;
+
+ /* Note: We ignore the sign for this format. */
+ /* FIXME: for performance reasons we should put this into
+ * mpi_aprint because we can then use the buffer directly.
+ */
+
+ if (buffer && n > len)
+ return -E2BIG;
+ if (buffer) {
+ unsigned char *tmp;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+ memcpy(buffer, tmp, n);
+ kfree(tmp);
+ }
+ *nwritten = n;
+ return 0;
+ } else if (format == GCRYMPI_FMT_PGP) {
+ unsigned int n = (nbits + 7)/8;
+
+ /* The PGP format can only handle unsigned integers. */
+ if (negative)
+ return -EINVAL;
+
+ if (buffer && n+2 > len)
+ return -E2BIG;
+
+ if (buffer) {
+ unsigned char *tmp;
+ unsigned char *s = buffer;
+
+ s[0] = nbits >> 8;
+ s[1] = nbits;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+ memcpy(s+2, tmp, n);
+ kfree(tmp);
+ }
+ *nwritten = n+2;
+ return 0;
+ } else if (format == GCRYMPI_FMT_SSH) {
+ unsigned char *tmp;
+ int extra = 0;
+ unsigned int n;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+
+ if (negative) {
+ twocompl(tmp, n);
+ if (!(*tmp & 0x80)) {
+ /* Need to extend the sign. */
+ n++;
+ extra = 2;
+ }
+ } else if (n && (*tmp & 0x80)) {
+ n++;
+ extra = 1;
+ }
+
+ if (buffer && n+4 > len) {
+ kfree(tmp);
+ return -E2BIG;
+ }
+
+ if (buffer) {
+ unsigned char *s = buffer;
+
+ *s++ = n >> 24;
+ *s++ = n >> 16;
+ *s++ = n >> 8;
+ *s++ = n;
+ if (extra == 1)
+ *s++ = 0;
+ else if (extra)
+ *s++ = 0xff;
+ memcpy(s, tmp, n-!!extra);
+ }
+ kfree(tmp);
+ *nwritten = 4+n;
+ return 0;
+ } else if (format == GCRYMPI_FMT_HEX) {
+ unsigned char *tmp;
+ int i;
+ int extra = 0;
+ unsigned int n = 0;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+ if (!n || (*tmp & 0x80))
+ extra = 2;
+
+ if (buffer && 2*n + extra + negative + 1 > len) {
+ kfree(tmp);
+ return -E2BIG;
+ }
+ if (buffer) {
+ unsigned char *s = buffer;
+
+ if (negative)
+ *s++ = '-';
+ if (extra) {
+ *s++ = '0';
+ *s++ = '0';
+ }
+
+ for (i = 0; i < n; i++) {
+ unsigned int c = tmp[i];
+
+ *s++ = (c >> 4) < 10 ? '0'+(c>>4) : 'A'+(c>>4)-10;
+ c &= 15;
+ *s++ = c < 10 ? '0'+c : 'A'+c-10;
+ }
+ *s++ = 0;
+ *nwritten = s - buffer;
+ } else {
+ *nwritten = 2*n + extra + negative + 1;
+ }
+ kfree(tmp);
+ return 0;
+ } else
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mpi_print);
diff --git a/lib/mpi/mpih-div.c b/lib/mpi/mpih-div.c
index 913a519eb005..be70ee2e42d3 100644
--- a/lib/mpi/mpih-div.c
+++ b/lib/mpi/mpih-div.c
@@ -24,6 +24,150 @@
#define UDIV_TIME UMUL_TIME
#endif
+
+mpi_limb_t
+mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+ mpi_limb_t divisor_limb)
+{
+ mpi_size_t i;
+ mpi_limb_t n1, n0, r;
+ mpi_limb_t dummy __maybe_unused;
+
+ /* Botch: Should this be handled at all? Rely on callers? */
+ if (!dividend_size)
+ return 0;
+
+ /* If multiplication is much faster than division, and the
+ * dividend is large, pre-invert the divisor, and use
+ * only multiplications in the inner loop.
+ *
+ * This test should be read:
+ * Does it ever help to use udiv_qrnnd_preinv?
+ * && Does what we save compensate for the inversion overhead?
+ */
+ if (UDIV_TIME > (2 * UMUL_TIME + 6)
+ && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
+ int normalization_steps;
+
+ normalization_steps = count_leading_zeros(divisor_limb);
+ if (normalization_steps) {
+ mpi_limb_t divisor_limb_inverted;
+
+ divisor_limb <<= normalization_steps;
+
+ /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
+ * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+ * most significant bit (with weight 2**N) implicit.
+ *
+ * Special case for DIVISOR_LIMB == 100...000.
+ */
+ if (!(divisor_limb << 1))
+ divisor_limb_inverted = ~(mpi_limb_t)0;
+ else
+ udiv_qrnnd(divisor_limb_inverted, dummy,
+ -divisor_limb, 0, divisor_limb);
+
+ n1 = dividend_ptr[dividend_size - 1];
+ r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+ /* Possible optimization:
+ * if (r == 0
+ * && divisor_limb > ((n1 << normalization_steps)
+ * | (dividend_ptr[dividend_size - 2] >> ...)))
+ * ...one division less...
+ */
+ for (i = dividend_size - 2; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ UDIV_QRNND_PREINV(dummy, r, r,
+ ((n1 << normalization_steps)
+ | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+ divisor_limb, divisor_limb_inverted);
+ n1 = n0;
+ }
+ UDIV_QRNND_PREINV(dummy, r, r,
+ n1 << normalization_steps,
+ divisor_limb, divisor_limb_inverted);
+ return r >> normalization_steps;
+ } else {
+ mpi_limb_t divisor_limb_inverted;
+
+ /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
+ * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+ * most significant bit (with weight 2**N) implicit.
+ *
+ * Special case for DIVISOR_LIMB == 100...000.
+ */
+ if (!(divisor_limb << 1))
+ divisor_limb_inverted = ~(mpi_limb_t)0;
+ else
+ udiv_qrnnd(divisor_limb_inverted, dummy,
+ -divisor_limb, 0, divisor_limb);
+
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ i--;
+
+ for ( ; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ UDIV_QRNND_PREINV(dummy, r, r,
+ n0, divisor_limb, divisor_limb_inverted);
+ }
+ return r;
+ }
+ } else {
+ if (UDIV_NEEDS_NORMALIZATION) {
+ int normalization_steps;
+
+ normalization_steps = count_leading_zeros(divisor_limb);
+ if (normalization_steps) {
+ divisor_limb <<= normalization_steps;
+
+ n1 = dividend_ptr[dividend_size - 1];
+ r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+ /* Possible optimization:
+ * if (r == 0
+ * && divisor_limb > ((n1 << normalization_steps)
+ * | (dividend_ptr[dividend_size - 2] >> ...)))
+ * ...one division less...
+ */
+ for (i = dividend_size - 2; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd(dummy, r, r,
+ ((n1 << normalization_steps)
+ | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+ divisor_limb);
+ n1 = n0;
+ }
+ udiv_qrnnd(dummy, r, r,
+ n1 << normalization_steps,
+ divisor_limb);
+ return r >> normalization_steps;
+ }
+ }
+ /* No normalization needed, either because udiv_qrnnd doesn't require
+ * it, or because DIVISOR_LIMB is already normalized.
+ */
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ i--;
+
+ for (; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd(dummy, r, r, n0, divisor_limb);
+ }
+ return r;
+ }
+}
+
/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write
* the NSIZE-DSIZE least significant quotient limbs at QP
* and the DSIZE long remainder at NP. If QEXTRA_LIMBS is
@@ -221,3 +365,153 @@ q_test:
return most_significant_q_limb;
}
+
+/****************
+ * Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
+ * Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR.
+ * Return the single-limb remainder.
+ * There are no constraints on the value of the divisor.
+ *
+ * QUOT_PTR and DIVIDEND_PTR might point to the same limb.
+ */
+
+mpi_limb_t
+mpihelp_divmod_1(mpi_ptr_t quot_ptr,
+ mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+ mpi_limb_t divisor_limb)
+{
+ mpi_size_t i;
+ mpi_limb_t n1, n0, r;
+ mpi_limb_t dummy __maybe_unused;
+
+ if (!dividend_size)
+ return 0;
+
+ /* If multiplication is much faster than division, and the
+ * dividend is large, pre-invert the divisor, and use
+ * only multiplications in the inner loop.
+ *
+ * This test should be read:
+ * Does it ever help to use udiv_qrnnd_preinv?
+ * && Does what we save compensate for the inversion overhead?
+ */
+ if (UDIV_TIME > (2 * UMUL_TIME + 6)
+ && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
+ int normalization_steps;
+
+ normalization_steps = count_leading_zeros(divisor_limb);
+ if (normalization_steps) {
+ mpi_limb_t divisor_limb_inverted;
+
+ divisor_limb <<= normalization_steps;
+
+ /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
+ * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+ * most significant bit (with weight 2**N) implicit.
+ */
+ /* Special case for DIVISOR_LIMB == 100...000. */
+ if (!(divisor_limb << 1))
+ divisor_limb_inverted = ~(mpi_limb_t)0;
+ else
+ udiv_qrnnd(divisor_limb_inverted, dummy,
+ -divisor_limb, 0, divisor_limb);
+
+ n1 = dividend_ptr[dividend_size - 1];
+ r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+ /* Possible optimization:
+ * if (r == 0
+ * && divisor_limb > ((n1 << normalization_steps)
+ * | (dividend_ptr[dividend_size - 2] >> ...)))
+ * ...one division less...
+ */
+ for (i = dividend_size - 2; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ UDIV_QRNND_PREINV(quot_ptr[i + 1], r, r,
+ ((n1 << normalization_steps)
+ | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+ divisor_limb, divisor_limb_inverted);
+ n1 = n0;
+ }
+ UDIV_QRNND_PREINV(quot_ptr[0], r, r,
+ n1 << normalization_steps,
+ divisor_limb, divisor_limb_inverted);
+ return r >> normalization_steps;
+ } else {
+ mpi_limb_t divisor_limb_inverted;
+
+ /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
+ * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+ * most significant bit (with weight 2**N) implicit.
+ */
+ /* Special case for DIVISOR_LIMB == 100...000. */
+ if (!(divisor_limb << 1))
+ divisor_limb_inverted = ~(mpi_limb_t) 0;
+ else
+ udiv_qrnnd(divisor_limb_inverted, dummy,
+ -divisor_limb, 0, divisor_limb);
+
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ quot_ptr[i--] = 0;
+
+ for ( ; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ UDIV_QRNND_PREINV(quot_ptr[i], r, r,
+ n0, divisor_limb, divisor_limb_inverted);
+ }
+ return r;
+ }
+ } else {
+ if (UDIV_NEEDS_NORMALIZATION) {
+ int normalization_steps;
+
+ normalization_steps = count_leading_zeros(divisor_limb);
+ if (normalization_steps) {
+ divisor_limb <<= normalization_steps;
+
+ n1 = dividend_ptr[dividend_size - 1];
+ r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+ /* Possible optimization:
+ * if (r == 0
+ * && divisor_limb > ((n1 << normalization_steps)
+ * | (dividend_ptr[dividend_size - 2] >> ...)))
+ * ...one division less...
+ */
+ for (i = dividend_size - 2; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd(quot_ptr[i + 1], r, r,
+ ((n1 << normalization_steps)
+ | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+ divisor_limb);
+ n1 = n0;
+ }
+ udiv_qrnnd(quot_ptr[0], r, r,
+ n1 << normalization_steps,
+ divisor_limb);
+ return r >> normalization_steps;
+ }
+ }
+ /* No normalization needed, either because udiv_qrnnd doesn't require
+ * it, or because DIVISOR_LIMB is already normalized.
+ */
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ quot_ptr[i--] = 0;
+
+ for (; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd(quot_ptr[i], r, r, n0, divisor_limb);
+ }
+ return r;
+ }
+}
diff --git a/lib/mpi/mpih-mul.c b/lib/mpi/mpih-mul.c
index a93647564054..e5f1c84e3c48 100644
--- a/lib/mpi/mpih-mul.c
+++ b/lib/mpi/mpih-mul.c
@@ -317,6 +317,31 @@ mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace)
}
}
+
+void mpihelp_mul_n(mpi_ptr_t prodp,
+ mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
+{
+ if (up == vp) {
+ if (size < KARATSUBA_THRESHOLD)
+ mpih_sqr_n_basecase(prodp, up, size);
+ else {
+ mpi_ptr_t tspace;
+ tspace = mpi_alloc_limb_space(2 * size);
+ mpih_sqr_n(prodp, up, size, tspace);
+ mpi_free_limb_space(tspace);
+ }
+ } else {
+ if (size < KARATSUBA_THRESHOLD)
+ mul_n_basecase(prodp, up, vp, size);
+ else {
+ mpi_ptr_t tspace;
+ tspace = mpi_alloc_limb_space(2 * size);
+ mul_n(prodp, up, vp, size, tspace);
+ mpi_free_limb_space(tspace);
+ }
+ }
+}
+
int
mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
mpi_ptr_t up, mpi_size_t usize,
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 4cd2b335cb7f..3c63710c20c6 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -20,6 +20,63 @@
#include "mpi-internal.h"
+/* Constants allocated right away at startup. */
+static MPI constants[MPI_NUMBER_OF_CONSTANTS];
+
+/* Initialize the MPI subsystem. This is called early and allows to
+ * do some initialization without taking care of threading issues.
+ */
+static int __init mpi_init(void)
+{
+ int idx;
+ unsigned long value;
+
+ for (idx = 0; idx < MPI_NUMBER_OF_CONSTANTS; idx++) {
+ switch (idx) {
+ case MPI_C_ZERO:
+ value = 0;
+ break;
+ case MPI_C_ONE:
+ value = 1;
+ break;
+ case MPI_C_TWO:
+ value = 2;
+ break;
+ case MPI_C_THREE:
+ value = 3;
+ break;
+ case MPI_C_FOUR:
+ value = 4;
+ break;
+ case MPI_C_EIGHT:
+ value = 8;
+ break;
+ default:
+ pr_err("MPI: invalid mpi_const selector %d\n", idx);
+ return -EFAULT;
+ }
+ constants[idx] = mpi_alloc_set_ui(value);
+ constants[idx]->flags = (16|32);
+ }
+
+ return 0;
+}
+postcore_initcall(mpi_init);
+
+/* Return a constant MPI descripbed by NO which is one of the
+ * MPI_C_xxx macros. There is no need to copy this returned value; it
+ * may be used directly.
+ */
+MPI mpi_const(enum gcry_mpi_constants no)
+{
+ if ((int)no < 0 || no > MPI_NUMBER_OF_CONSTANTS)
+ pr_err("MPI: invalid mpi_const selector %d\n", no);
+ if (!constants[no])
+ pr_err("MPI: MPI subsystem not initialized\n");
+ return constants[no];
+}
+EXPORT_SYMBOL_GPL(mpi_const);
+
/****************
* Note: It was a bad idea to use the number of limbs to allocate
* because on a alpha the limbs are large but we normally need
@@ -106,6 +163,15 @@ int mpi_resize(MPI a, unsigned nlimbs)
return 0;
}
+void mpi_clear(MPI a)
+{
+ if (!a)
+ return;
+ a->nlimbs = 0;
+ a->flags = 0;
+}
+EXPORT_SYMBOL_GPL(mpi_clear);
+
void mpi_free(MPI a)
{
if (!a)
@@ -122,5 +188,143 @@ void mpi_free(MPI a)
}
EXPORT_SYMBOL_GPL(mpi_free);
+/****************
+ * Note: This copy function should not interpret the MPI
+ * but copy it transparently.
+ */
+MPI mpi_copy(MPI a)
+{
+ int i;
+ MPI b;
+
+ if (a) {
+ b = mpi_alloc(a->nlimbs);
+ b->nlimbs = a->nlimbs;
+ b->sign = a->sign;
+ b->flags = a->flags;
+ b->flags &= ~(16|32); /* Reset the immutable and constant flags. */
+ for (i = 0; i < b->nlimbs; i++)
+ b->d[i] = a->d[i];
+ } else
+ b = NULL;
+ return b;
+}
+
+/****************
+ * This function allocates an MPI which is optimized to hold
+ * a value as large as the one given in the argument and allocates it
+ * with the same flags as A.
+ */
+MPI mpi_alloc_like(MPI a)
+{
+ MPI b;
+
+ if (a) {
+ b = mpi_alloc(a->nlimbs);
+ b->nlimbs = 0;
+ b->sign = 0;
+ b->flags = a->flags;
+ } else
+ b = NULL;
+
+ return b;
+}
+
+
+/* Set U into W and release U. If W is NULL only U will be released. */
+void mpi_snatch(MPI w, MPI u)
+{
+ if (w) {
+ mpi_assign_limb_space(w, u->d, u->alloced);
+ w->nlimbs = u->nlimbs;
+ w->sign = u->sign;
+ w->flags = u->flags;
+ u->alloced = 0;
+ u->nlimbs = 0;
+ u->d = NULL;
+ }
+ mpi_free(u);
+}
+
+
+MPI mpi_set(MPI w, MPI u)
+{
+ mpi_ptr_t wp, up;
+ mpi_size_t usize = u->nlimbs;
+ int usign = u->sign;
+
+ if (!w)
+ w = mpi_alloc(mpi_get_nlimbs(u));
+ RESIZE_IF_NEEDED(w, usize);
+ wp = w->d;
+ up = u->d;
+ MPN_COPY(wp, up, usize);
+ w->nlimbs = usize;
+ w->flags = u->flags;
+ w->flags &= ~(16|32); /* Reset the immutable and constant flags. */
+ w->sign = usign;
+ return w;
+}
+EXPORT_SYMBOL_GPL(mpi_set);
+
+MPI mpi_set_ui(MPI w, unsigned long u)
+{
+ if (!w)
+ w = mpi_alloc(1);
+ /* FIXME: If U is 0 we have no need to resize and thus possible
+ * allocating the the limbs.
+ */
+ RESIZE_IF_NEEDED(w, 1);
+ w->d[0] = u;
+ w->nlimbs = u ? 1 : 0;
+ w->sign = 0;
+ w->flags = 0;
+ return w;
+}
+EXPORT_SYMBOL_GPL(mpi_set_ui);
+
+MPI mpi_alloc_set_ui(unsigned long u)
+{
+ MPI w = mpi_alloc(1);
+ w->d[0] = u;
+ w->nlimbs = u ? 1 : 0;
+ w->sign = 0;
+ return w;
+}
+
+/****************
+ * Swap the value of A and B, when SWAP is 1.
+ * Leave the value when SWAP is 0.
+ * This implementation should be constant-time regardless of SWAP.
+ */
+void mpi_swap_cond(MPI a, MPI b, unsigned long swap)
+{
+ mpi_size_t i;
+ mpi_size_t nlimbs;
+ mpi_limb_t mask = ((mpi_limb_t)0) - swap;
+ mpi_limb_t x;
+
+ if (a->alloced > b->alloced)
+ nlimbs = b->alloced;
+ else
+ nlimbs = a->alloced;
+ if (a->nlimbs > nlimbs || b->nlimbs > nlimbs)
+ return;
+
+ for (i = 0; i < nlimbs; i++) {
+ x = mask & (a->d[i] ^ b->d[i]);
+ a->d[i] = a->d[i] ^ x;
+ b->d[i] = b->d[i] ^ x;
+ }
+
+ x = mask & (a->nlimbs ^ b->nlimbs);
+ a->nlimbs = a->nlimbs ^ x;
+ b->nlimbs = b->nlimbs ^ x;
+
+ x = mask & (a->sign ^ b->sign);
+ a->sign = a->sign ^ x;
+ b->sign = b->sign ^ x;
+}
+
MODULE_DESCRIPTION("Multiprecision maths library");
MODULE_LICENSE("GPL");
diff --git a/lib/nlattr.c b/lib/nlattr.c
index bc5b5cf608c4..74019c8ebf6b 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -96,8 +96,8 @@ static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
continue;
if (nla_len(entry) < NLA_HDRLEN) {
- NL_SET_ERR_MSG_ATTR(extack, entry,
- "Array element too short");
+ NL_SET_ERR_MSG_ATTR_POL(extack, entry, policy,
+ "Array element too short");
return -ERANGE;
}
@@ -124,6 +124,7 @@ void nla_get_range_unsigned(const struct nla_policy *pt,
range->max = U8_MAX;
break;
case NLA_U16:
+ case NLA_BINARY:
range->max = U16_MAX;
break;
case NLA_U32:
@@ -140,6 +141,7 @@ void nla_get_range_unsigned(const struct nla_policy *pt,
switch (pt->validation_type) {
case NLA_VALIDATE_RANGE:
+ case NLA_VALIDATE_RANGE_WARN_TOO_LONG:
range->min = pt->min;
range->max = pt->max;
break;
@@ -157,9 +159,10 @@ void nla_get_range_unsigned(const struct nla_policy *pt,
}
}
-static int nla_validate_int_range_unsigned(const struct nla_policy *pt,
- const struct nlattr *nla,
- struct netlink_ext_ack *extack)
+static int nla_validate_range_unsigned(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack,
+ unsigned int validate)
{
struct netlink_range_validation range;
u64 value;
@@ -178,15 +181,39 @@ static int nla_validate_int_range_unsigned(const struct nla_policy *pt,
case NLA_MSECS:
value = nla_get_u64(nla);
break;
+ case NLA_BINARY:
+ value = nla_len(nla);
+ break;
default:
return -EINVAL;
}
nla_get_range_unsigned(pt, &range);
+ if (pt->validation_type == NLA_VALIDATE_RANGE_WARN_TOO_LONG &&
+ pt->type == NLA_BINARY && value > range.max) {
+ pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
+ current->comm, pt->type);
+ if (validate & NL_VALIDATE_STRICT_ATTRS) {
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "invalid attribute length");
+ return -EINVAL;
+ }
+
+ /* this assumes min <= max (don't validate against min) */
+ return 0;
+ }
+
if (value < range.min || value > range.max) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "integer out of range");
+ bool binary = pt->type == NLA_BINARY;
+
+ if (binary)
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "binary attribute size out of range");
+ else
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "integer out of range");
+
return -ERANGE;
}
@@ -264,8 +291,8 @@ static int nla_validate_int_range_signed(const struct nla_policy *pt,
nla_get_range_signed(pt, &range);
if (value < range.min || value > range.max) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "integer out of range");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "integer out of range");
return -ERANGE;
}
@@ -274,7 +301,8 @@ static int nla_validate_int_range_signed(const struct nla_policy *pt,
static int nla_validate_int_range(const struct nla_policy *pt,
const struct nlattr *nla,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack,
+ unsigned int validate)
{
switch (pt->type) {
case NLA_U8:
@@ -282,7 +310,8 @@ static int nla_validate_int_range(const struct nla_policy *pt,
case NLA_U32:
case NLA_U64:
case NLA_MSECS:
- return nla_validate_int_range_unsigned(pt, nla, extack);
+ case NLA_BINARY:
+ return nla_validate_range_unsigned(pt, nla, extack, validate);
case NLA_S8:
case NLA_S16:
case NLA_S32:
@@ -294,6 +323,37 @@ static int nla_validate_int_range(const struct nla_policy *pt,
}
}
+static int nla_validate_mask(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ u64 value;
+
+ switch (pt->type) {
+ case NLA_U8:
+ value = nla_get_u8(nla);
+ break;
+ case NLA_U16:
+ value = nla_get_u16(nla);
+ break;
+ case NLA_U32:
+ value = nla_get_u32(nla);
+ break;
+ case NLA_U64:
+ value = nla_get_u64(nla);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (value & ~(u64)pt->mask) {
+ NL_SET_ERR_MSG_ATTR(extack, nla, "reserved bit set");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int validate_nla(const struct nlattr *nla, int maxtype,
const struct nla_policy *policy, unsigned int validate,
struct netlink_ext_ack *extack, unsigned int depth)
@@ -313,15 +373,12 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
BUG_ON(pt->type > NLA_TYPE_MAX);
- if ((nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) ||
- (pt->type == NLA_EXACT_LEN &&
- pt->validation_type == NLA_VALIDATE_WARN_TOO_LONG &&
- attrlen != pt->len)) {
+ if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
current->comm, type);
if (validate & NL_VALIDATE_STRICT_ATTRS) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "invalid attribute length");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "invalid attribute length");
return -EINVAL;
}
}
@@ -329,14 +386,14 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
if (validate & NL_VALIDATE_NESTED) {
if ((pt->type == NLA_NESTED || pt->type == NLA_NESTED_ARRAY) &&
!(nla->nla_type & NLA_F_NESTED)) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "NLA_F_NESTED is missing");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "NLA_F_NESTED is missing");
return -EINVAL;
}
if (pt->type != NLA_NESTED && pt->type != NLA_NESTED_ARRAY &&
pt->type != NLA_UNSPEC && (nla->nla_type & NLA_F_NESTED)) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "NLA_F_NESTED not expected");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "NLA_F_NESTED not expected");
return -EINVAL;
}
}
@@ -449,19 +506,10 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
"Unsupported attribute");
return -EINVAL;
}
- /* fall through */
- case NLA_MIN_LEN:
if (attrlen < pt->len)
goto out_err;
break;
- case NLA_EXACT_LEN:
- if (pt->validation_type != NLA_VALIDATE_WARN_TOO_LONG) {
- if (attrlen != pt->len)
- goto out_err;
- break;
- }
- /* fall through */
default:
if (pt->len)
minlen = pt->len;
@@ -479,9 +527,15 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
break;
case NLA_VALIDATE_RANGE_PTR:
case NLA_VALIDATE_RANGE:
+ case NLA_VALIDATE_RANGE_WARN_TOO_LONG:
case NLA_VALIDATE_MIN:
case NLA_VALIDATE_MAX:
- err = nla_validate_int_range(pt, nla, extack);
+ err = nla_validate_int_range(pt, nla, extack, validate);
+ if (err)
+ return err;
+ break;
+ case NLA_VALIDATE_MASK:
+ err = nla_validate_mask(pt, nla, extack);
if (err)
return err;
break;
@@ -496,7 +550,8 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
return 0;
out_err:
- NL_SET_ERR_MSG_ATTR(extack, nla, "Attribute failed policy validation");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "Attribute failed policy validation");
return err;
}
@@ -816,8 +871,7 @@ EXPORT_SYMBOL(__nla_reserve);
struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
int attrlen, int padattr)
{
- if (nla_need_padding_for_64bit(skb))
- nla_align_64bit(skb, padattr);
+ nla_align_64bit(skb, padattr);
return __nla_reserve(skb, attrtype, attrlen);
}
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 15ca78e1c7d4..8abe1870dba4 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -85,12 +85,16 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
put_cpu();
}
+// Dump stacks even for idle CPUs.
+static bool backtrace_idle;
+module_param(backtrace_idle, bool, 0644);
+
bool nmi_cpu_backtrace(struct pt_regs *regs)
{
int cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
- if (regs && cpu_in_idle(instruction_pointer(regs))) {
+ if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs));
} else {
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 0ba686b8fe57..e59eda07305e 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/wait.h>
+#include <linux/slab.h>
#include <linux/percpu-refcount.h>
/*
@@ -64,18 +65,25 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
__alignof__(unsigned long));
unsigned long start_count = 0;
+ struct percpu_ref_data *data;
ref->percpu_count_ptr = (unsigned long)
__alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
if (!ref->percpu_count_ptr)
return -ENOMEM;
- ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
- ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
+ data = kzalloc(sizeof(*ref->data), gfp);
+ if (!data) {
+ free_percpu((void __percpu *)ref->percpu_count_ptr);
+ return -ENOMEM;
+ }
+
+ data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
+ data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
- ref->allow_reinit = true;
+ data->allow_reinit = true;
} else {
start_count += PERCPU_COUNT_BIAS;
}
@@ -85,14 +93,28 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
else
start_count++;
- atomic_long_set(&ref->count, start_count);
+ atomic_long_set(&data->count, start_count);
- ref->release = release;
- ref->confirm_switch = NULL;
+ data->release = release;
+ data->confirm_switch = NULL;
+ data->ref = ref;
+ ref->data = data;
return 0;
}
EXPORT_SYMBOL_GPL(percpu_ref_init);
+static void __percpu_ref_exit(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+
+ if (percpu_count) {
+ /* non-NULL confirm_switch indicates switching in progress */
+ WARN_ON_ONCE(ref->data && ref->data->confirm_switch);
+ free_percpu(percpu_count);
+ ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
+ }
+}
+
/**
* percpu_ref_exit - undo percpu_ref_init()
* @ref: percpu_ref to exit
@@ -105,27 +127,36 @@ EXPORT_SYMBOL_GPL(percpu_ref_init);
*/
void percpu_ref_exit(struct percpu_ref *ref)
{
- unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+ struct percpu_ref_data *data = ref->data;
+ unsigned long flags;
- if (percpu_count) {
- /* non-NULL confirm_switch indicates switching in progress */
- WARN_ON_ONCE(ref->confirm_switch);
- free_percpu(percpu_count);
- ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
- }
+ __percpu_ref_exit(ref);
+
+ if (!data)
+ return;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+ ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) <<
+ __PERCPU_REF_FLAG_BITS;
+ ref->data = NULL;
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
+
+ kfree(data);
}
EXPORT_SYMBOL_GPL(percpu_ref_exit);
static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
{
- struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
+ struct percpu_ref_data *data = container_of(rcu,
+ struct percpu_ref_data, rcu);
+ struct percpu_ref *ref = data->ref;
- ref->confirm_switch(ref);
- ref->confirm_switch = NULL;
+ data->confirm_switch(ref);
+ data->confirm_switch = NULL;
wake_up_all(&percpu_ref_switch_waitq);
- if (!ref->allow_reinit)
- percpu_ref_exit(ref);
+ if (!data->allow_reinit)
+ __percpu_ref_exit(ref);
/* drop ref from percpu_ref_switch_to_atomic() */
percpu_ref_put(ref);
@@ -133,7 +164,9 @@ static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
{
- struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
+ struct percpu_ref_data *data = container_of(rcu,
+ struct percpu_ref_data, rcu);
+ struct percpu_ref *ref = data->ref;
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
unsigned long count = 0;
int cpu;
@@ -142,7 +175,7 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
count += *per_cpu_ptr(percpu_count, cpu);
pr_debug("global %lu percpu %lu\n",
- atomic_long_read(&ref->count), count);
+ atomic_long_read(&data->count), count);
/*
* It's crucial that we sum the percpu counters _before_ adding the sum
@@ -156,11 +189,11 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
* reaching 0 before we add the percpu counts. But doing it at the same
* time is equivalent and saves us atomic operations:
*/
- atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
+ atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
- WARN_ONCE(atomic_long_read(&ref->count) <= 0,
+ WARN_ONCE(atomic_long_read(&data->count) <= 0,
"percpu ref (%ps) <= 0 (%ld) after switching to atomic",
- ref->release, atomic_long_read(&ref->count));
+ data->release, atomic_long_read(&data->count));
/* @ref is viewed as dead on all CPUs, send out switch confirmation */
percpu_ref_call_confirm_rcu(rcu);
@@ -186,10 +219,11 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
* Non-NULL ->confirm_switch is used to indicate that switching is
* in progress. Use noop one if unspecified.
*/
- ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
+ ref->data->confirm_switch = confirm_switch ?:
+ percpu_ref_noop_confirm_switch;
percpu_ref_get(ref); /* put after confirmation */
- call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
+ call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu);
}
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
@@ -202,10 +236,10 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
return;
- if (WARN_ON_ONCE(!ref->allow_reinit))
+ if (WARN_ON_ONCE(!ref->data->allow_reinit))
return;
- atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
+ atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count);
/*
* Restore per-cpu operation. smp_store_release() is paired
@@ -223,6 +257,8 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
static void __percpu_ref_switch_mode(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
+ struct percpu_ref_data *data = ref->data;
+
lockdep_assert_held(&percpu_ref_switch_lock);
/*
@@ -230,10 +266,10 @@ static void __percpu_ref_switch_mode(struct percpu_ref *ref,
* its completion. If the caller ensures that ATOMIC switching
* isn't in progress, this function can be called from any context.
*/
- wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch,
+ wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
percpu_ref_switch_lock);
- if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+ if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
__percpu_ref_switch_to_atomic(ref, confirm_switch);
else
__percpu_ref_switch_to_percpu(ref);
@@ -266,7 +302,7 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- ref->force_atomic = true;
+ ref->data->force_atomic = true;
__percpu_ref_switch_mode(ref, confirm_switch);
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
@@ -284,7 +320,7 @@ EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
{
percpu_ref_switch_to_atomic(ref, NULL);
- wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
+ wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch);
}
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
@@ -312,7 +348,7 @@ void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- ref->force_atomic = false;
+ ref->data->force_atomic = false;
__percpu_ref_switch_mode(ref, NULL);
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
@@ -344,7 +380,8 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
- "%s called more than once on %ps!", __func__, ref->release);
+ "%s called more than once on %ps!", __func__,
+ ref->data->release);
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
__percpu_ref_switch_mode(ref, confirm_kill);
@@ -355,6 +392,34 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
/**
+ * percpu_ref_is_zero - test whether a percpu refcount reached zero
+ * @ref: percpu_ref to test
+ *
+ * Returns %true if @ref reached zero.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+bool percpu_ref_is_zero(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
+ unsigned long count, flags;
+
+ if (__ref_is_percpu(ref, &percpu_count))
+ return false;
+
+ /* protect us from being destroyed */
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+ if (ref->data)
+ count = atomic_long_read(&ref->data->count);
+ else
+ count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS;
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
+
+ return count == 0;
+}
+EXPORT_SYMBOL_GPL(percpu_ref_is_zero);
+
+/**
* percpu_ref_reinit - re-initialize a percpu refcount
* @ref: perpcu_ref to re-initialize
*
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index a2345de90e93..00f666d94486 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -17,7 +17,7 @@ static DEFINE_SPINLOCK(percpu_counters_lock);
#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
-static struct debug_obj_descr percpu_counter_debug_descr;
+static const struct debug_obj_descr percpu_counter_debug_descr;
static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
{
@@ -33,7 +33,7 @@ static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
}
}
-static struct debug_obj_descr percpu_counter_debug_descr = {
+static const struct debug_obj_descr percpu_counter_debug_descr = {
.name = "percpu_counter",
.fixup_free = percpu_counter_fixup_free,
};
@@ -85,7 +85,7 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
preempt_disable();
count = __this_cpu_read(*fbc->counters) + amount;
- if (count >= batch || count <= -batch) {
+ if (abs(count) >= batch) {
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
fbc->count += count;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 8e4a3a4397f2..3a4da11b804d 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/percpu.h>
-#include <linux/local_lock.h>
#include <linux/preempt.h> /* in_interrupt() */
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
@@ -325,7 +324,7 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
int ret = -ENOMEM;
/*
- * Nodes preloaded by one cgroup can be be used by another cgroup, so
+ * Nodes preloaded by one cgroup can be used by another cgroup, so
* they should never be accounted to any particular memory cgroup.
*/
gfp_mask &= ~__GFP_ACCOUNT;
diff --git a/lib/random32.c b/lib/random32.c
index dfb9981ab798..4d0e05e471d7 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -38,19 +38,10 @@
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <linux/bitops.h>
#include <asm/unaligned.h>
#include <trace/events/random.h>
-#ifdef CONFIG_RANDOM32_SELFTEST
-static void __init prandom_state_selftest(void);
-#else
-static inline void prandom_state_selftest(void)
-{
-}
-#endif
-
-DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
-
/**
* prandom_u32_state - seeded pseudo-random number generator.
* @state: pointer to state structure holding seeded state.
@@ -71,26 +62,6 @@ u32 prandom_u32_state(struct rnd_state *state)
EXPORT_SYMBOL(prandom_u32_state);
/**
- * prandom_u32 - pseudo random number generator
- *
- * A 32 bit pseudo-random number is generated using a fast
- * algorithm suitable for simulation. This algorithm is NOT
- * considered safe for cryptographic use.
- */
-u32 prandom_u32(void)
-{
- struct rnd_state *state = &get_cpu_var(net_rand_state);
- u32 res;
-
- res = prandom_u32_state(state);
- trace_prandom_u32(res);
- put_cpu_var(net_rand_state);
-
- return res;
-}
-EXPORT_SYMBOL(prandom_u32);
-
-/**
* prandom_bytes_state - get the requested number of pseudo-random bytes
*
* @state: pointer to state structure holding seeded state.
@@ -121,20 +92,6 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
}
EXPORT_SYMBOL(prandom_bytes_state);
-/**
- * prandom_bytes - get the requested number of pseudo-random bytes
- * @buf: where to copy the pseudo-random bytes to
- * @bytes: the requested number of bytes
- */
-void prandom_bytes(void *buf, size_t bytes)
-{
- struct rnd_state *state = &get_cpu_var(net_rand_state);
-
- prandom_bytes_state(state, buf, bytes);
- put_cpu_var(net_rand_state);
-}
-EXPORT_SYMBOL(prandom_bytes);
-
static void prandom_warmup(struct rnd_state *state)
{
/* Calling RNG ten times to satisfy recurrence condition */
@@ -150,96 +107,6 @@ static void prandom_warmup(struct rnd_state *state)
prandom_u32_state(state);
}
-static u32 __extract_hwseed(void)
-{
- unsigned int val = 0;
-
- (void)(arch_get_random_seed_int(&val) ||
- arch_get_random_int(&val));
-
- return val;
-}
-
-static void prandom_seed_early(struct rnd_state *state, u32 seed,
- bool mix_with_hwseed)
-{
-#define LCG(x) ((x) * 69069U) /* super-duper LCG */
-#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
- state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
- state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
- state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
- state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
-}
-
-/**
- * prandom_seed - add entropy to pseudo random number generator
- * @entropy: entropy value
- *
- * Add some additional entropy to the prandom pool.
- */
-void prandom_seed(u32 entropy)
-{
- int i;
- /*
- * No locking on the CPUs, but then somewhat random results are, well,
- * expected.
- */
- for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state, i);
-
- state->s1 = __seed(state->s1 ^ entropy, 2U);
- prandom_warmup(state);
- }
-}
-EXPORT_SYMBOL(prandom_seed);
-
-/*
- * Generate some initially weak seeding values to allow
- * to start the prandom_u32() engine.
- */
-static int __init prandom_init(void)
-{
- int i;
-
- prandom_state_selftest();
-
- for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state, i);
- u32 weak_seed = (i + jiffies) ^ random_get_entropy();
-
- prandom_seed_early(state, weak_seed, true);
- prandom_warmup(state);
- }
-
- return 0;
-}
-core_initcall(prandom_init);
-
-static void __prandom_timer(struct timer_list *unused);
-
-static DEFINE_TIMER(seed_timer, __prandom_timer);
-
-static void __prandom_timer(struct timer_list *unused)
-{
- u32 entropy;
- unsigned long expires;
-
- get_random_bytes(&entropy, sizeof(entropy));
- prandom_seed(entropy);
-
- /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
- expires = 40 + prandom_u32_max(40);
- seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
-
- add_timer(&seed_timer);
-}
-
-static void __init __prandom_start_seed_timer(void)
-{
- seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
- add_timer(&seed_timer);
-}
-
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
{
int i;
@@ -259,51 +126,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
}
EXPORT_SYMBOL(prandom_seed_full_state);
-/*
- * Generate better values after random number generator
- * is fully initialized.
- */
-static void __prandom_reseed(bool late)
-{
- unsigned long flags;
- static bool latch = false;
- static DEFINE_SPINLOCK(lock);
-
- /* Asking for random bytes might result in bytes getting
- * moved into the nonblocking pool and thus marking it
- * as initialized. In this case we would double back into
- * this function and attempt to do a late reseed.
- * Ignore the pointless attempt to reseed again if we're
- * already waiting for bytes when the nonblocking pool
- * got initialized.
- */
-
- /* only allow initial seeding (late == false) once */
- if (!spin_trylock_irqsave(&lock, flags))
- return;
-
- if (latch && !late)
- goto out;
-
- latch = true;
- prandom_seed_full_state(&net_rand_state);
-out:
- spin_unlock_irqrestore(&lock, flags);
-}
-
-void prandom_reseed_late(void)
-{
- __prandom_reseed(true);
-}
-
-static int __init prandom_reseed(void)
-{
- __prandom_reseed(false);
- __prandom_start_seed_timer();
- return 0;
-}
-late_initcall(prandom_reseed);
-
#ifdef CONFIG_RANDOM32_SELFTEST
static struct prandom_test1 {
u32 seed;
@@ -423,7 +245,28 @@ static struct prandom_test2 {
{ 407983964U, 921U, 728767059U },
};
-static void __init prandom_state_selftest(void)
+static u32 __extract_hwseed(void)
+{
+ unsigned int val = 0;
+
+ (void)(arch_get_random_seed_int(&val) ||
+ arch_get_random_int(&val));
+
+ return val;
+}
+
+static void prandom_seed_early(struct rnd_state *state, u32 seed,
+ bool mix_with_hwseed)
+{
+#define LCG(x) ((x) * 69069U) /* super-duper LCG */
+#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
+ state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
+ state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
+ state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
+ state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
+}
+
+static int __init prandom_state_selftest(void)
{
int i, j, errors = 0, runs = 0;
bool error = false;
@@ -463,5 +306,327 @@ static void __init prandom_state_selftest(void)
pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
else
pr_info("prandom: %d self tests passed\n", runs);
+ return 0;
}
+core_initcall(prandom_state_selftest);
#endif
+
+/*
+ * The prandom_u32() implementation is now completely separate from the
+ * prandom_state() functions, which are retained (for now) for compatibility.
+ *
+ * Because of (ab)use in the networking code for choosing random TCP/UDP port
+ * numbers, which open DoS possibilities if guessable, we want something
+ * stronger than a standard PRNG. But the performance requirements of
+ * the network code do not allow robust crypto for this application.
+ *
+ * So this is a homebrew Junior Spaceman implementation, based on the
+ * lowest-latency trustworthy crypto primitive available, SipHash.
+ * (The authors of SipHash have not been consulted about this abuse of
+ * their work.)
+ *
+ * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
+ * one word of output. This abbreviated version uses 2 rounds per word
+ * of output.
+ */
+
+struct siprand_state {
+ unsigned long v0;
+ unsigned long v1;
+ unsigned long v2;
+ unsigned long v3;
+};
+
+static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
+DEFINE_PER_CPU(unsigned long, net_rand_noise);
+EXPORT_PER_CPU_SYMBOL(net_rand_noise);
+
+/*
+ * This is the core CPRNG function. As "pseudorandom", this is not used
+ * for truly valuable things, just intended to be a PITA to guess.
+ * For maximum speed, we do just two SipHash rounds per word. This is
+ * the same rate as 4 rounds per 64 bits that SipHash normally uses,
+ * so hopefully it's reasonably secure.
+ *
+ * There are two changes from the official SipHash finalization:
+ * - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
+ * they are there only to make the output rounds distinct from the input
+ * rounds, and this application has no input rounds.
+ * - Rather than returning v0^v1^v2^v3, return v1+v3.
+ * If you look at the SipHash round, the last operation on v3 is
+ * "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
+ * Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but
+ * it still cancels out half of the bits in v2 for no benefit.)
+ * Second, since the last combining operation was xor, continue the
+ * pattern of alternating xor/add for a tiny bit of extra non-linearity.
+ */
+static inline u32 siprand_u32(struct siprand_state *s)
+{
+ unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
+ unsigned long n = raw_cpu_read(net_rand_noise);
+
+ v3 ^= n;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= n;
+ s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3;
+ return v1 + v3;
+}
+
+
+/**
+ * prandom_u32 - pseudo random number generator
+ *
+ * A 32 bit pseudo-random number is generated using a fast
+ * algorithm suitable for simulation. This algorithm is NOT
+ * considered safe for cryptographic use.
+ */
+u32 prandom_u32(void)
+{
+ struct siprand_state *state = get_cpu_ptr(&net_rand_state);
+ u32 res = siprand_u32(state);
+
+ trace_prandom_u32(res);
+ put_cpu_ptr(&net_rand_state);
+ return res;
+}
+EXPORT_SYMBOL(prandom_u32);
+
+/**
+ * prandom_bytes - get the requested number of pseudo-random bytes
+ * @buf: where to copy the pseudo-random bytes to
+ * @bytes: the requested number of bytes
+ */
+void prandom_bytes(void *buf, size_t bytes)
+{
+ struct siprand_state *state = get_cpu_ptr(&net_rand_state);
+ u8 *ptr = buf;
+
+ while (bytes >= sizeof(u32)) {
+ put_unaligned(siprand_u32(state), (u32 *)ptr);
+ ptr += sizeof(u32);
+ bytes -= sizeof(u32);
+ }
+
+ if (bytes > 0) {
+ u32 rem = siprand_u32(state);
+
+ do {
+ *ptr++ = (u8)rem;
+ rem >>= BITS_PER_BYTE;
+ } while (--bytes > 0);
+ }
+ put_cpu_ptr(&net_rand_state);
+}
+EXPORT_SYMBOL(prandom_bytes);
+
+/**
+ * prandom_seed - add entropy to pseudo random number generator
+ * @entropy: entropy value
+ *
+ * Add some additional seed material to the prandom pool.
+ * The "entropy" is actually our IP address (the only caller is
+ * the network code), not for unpredictability, but to ensure that
+ * different machines are initialized differently.
+ */
+void prandom_seed(u32 entropy)
+{
+ int i;
+
+ add_device_randomness(&entropy, sizeof(entropy));
+
+ for_each_possible_cpu(i) {
+ struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
+ unsigned long v0 = state->v0, v1 = state->v1;
+ unsigned long v2 = state->v2, v3 = state->v3;
+
+ do {
+ v3 ^= entropy;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= entropy;
+ } while (unlikely(!v0 || !v1 || !v2 || !v3));
+
+ WRITE_ONCE(state->v0, v0);
+ WRITE_ONCE(state->v1, v1);
+ WRITE_ONCE(state->v2, v2);
+ WRITE_ONCE(state->v3, v3);
+ }
+}
+EXPORT_SYMBOL(prandom_seed);
+
+/*
+ * Generate some initially weak seeding values to allow
+ * the prandom_u32() engine to be started.
+ */
+static int __init prandom_init_early(void)
+{
+ int i;
+ unsigned long v0, v1, v2, v3;
+
+ if (!arch_get_random_long(&v0))
+ v0 = jiffies;
+ if (!arch_get_random_long(&v1))
+ v1 = random_get_entropy();
+ v2 = v0 ^ PRND_K0;
+ v3 = v1 ^ PRND_K1;
+
+ for_each_possible_cpu(i) {
+ struct siprand_state *state;
+
+ v3 ^= i;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= i;
+
+ state = per_cpu_ptr(&net_rand_state, i);
+ state->v0 = v0; state->v1 = v1;
+ state->v2 = v2; state->v3 = v3;
+ }
+
+ return 0;
+}
+core_initcall(prandom_init_early);
+
+
+/* Stronger reseeding when available, and periodically thereafter. */
+static void prandom_reseed(struct timer_list *unused);
+
+static DEFINE_TIMER(seed_timer, prandom_reseed);
+
+static void prandom_reseed(struct timer_list *unused)
+{
+ unsigned long expires;
+ int i;
+
+ /*
+ * Reinitialize each CPU's PRNG with 128 bits of key.
+ * No locking on the CPUs, but then somewhat random results are,
+ * well, expected.
+ */
+ for_each_possible_cpu(i) {
+ struct siprand_state *state;
+ unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
+ unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
+#if BITS_PER_LONG == 32
+ int j;
+
+ /*
+ * On 32-bit machines, hash in two extra words to
+ * approximate 128-bit key length. Not that the hash
+ * has that much security, but this prevents a trivial
+ * 64-bit brute force.
+ */
+ for (j = 0; j < 2; j++) {
+ unsigned long m = get_random_long();
+
+ v3 ^= m;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= m;
+ }
+#endif
+ /*
+ * Probably impossible in practice, but there is a
+ * theoretical risk that a race between this reseeding
+ * and the target CPU writing its state back could
+ * create the all-zero SipHash fixed point.
+ *
+ * To ensure that never happens, ensure the state
+ * we write contains no zero words.
+ */
+ state = per_cpu_ptr(&net_rand_state, i);
+ WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
+ WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
+ WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
+ WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
+ }
+
+ /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
+ expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
+ mod_timer(&seed_timer, expires);
+}
+
+/*
+ * The random ready callback can be called from almost any interrupt.
+ * To avoid worrying about whether it's safe to delay that interrupt
+ * long enough to seed all CPUs, just schedule an immediate timer event.
+ */
+static void prandom_timer_start(struct random_ready_callback *unused)
+{
+ mod_timer(&seed_timer, jiffies);
+}
+
+#ifdef CONFIG_RANDOM32_SELFTEST
+/* Principle: True 32-bit random numbers will all have 16 differing bits on
+ * average. For each 32-bit number, there are 601M numbers differing by 16
+ * bits, and 89% of the numbers differ by at least 12 bits. Note that more
+ * than 16 differing bits also implies a correlation with inverted bits. Thus
+ * we take 1024 random numbers and compare each of them to the other ones,
+ * counting the deviation of correlated bits to 16. Constants report 32,
+ * counters 32-log2(TEST_SIZE), and pure randoms, around 6 or lower. With the
+ * u32 total, TEST_SIZE may be as large as 4096 samples.
+ */
+#define TEST_SIZE 1024
+static int __init prandom32_state_selftest(void)
+{
+ unsigned int x, y, bits, samples;
+ u32 xor, flip;
+ u32 total;
+ u32 *data;
+
+ data = kmalloc(sizeof(*data) * TEST_SIZE, GFP_KERNEL);
+ if (!data)
+ return 0;
+
+ for (samples = 0; samples < TEST_SIZE; samples++)
+ data[samples] = prandom_u32();
+
+ flip = total = 0;
+ for (x = 0; x < samples; x++) {
+ for (y = 0; y < samples; y++) {
+ if (x == y)
+ continue;
+ xor = data[x] ^ data[y];
+ flip |= xor;
+ bits = hweight32(xor);
+ total += (bits - 16) * (bits - 16);
+ }
+ }
+
+ /* We'll return the average deviation as 2*sqrt(corr/samples), which
+ * is also sqrt(4*corr/samples) which provides a better resolution.
+ */
+ bits = int_sqrt(total / (samples * (samples - 1)) * 4);
+ if (bits > 6)
+ pr_warn("prandom32: self test failed (at least %u bits"
+ " correlated, fixed_mask=%#x fixed_value=%#x\n",
+ bits, ~flip, data[0] & ~flip);
+ else
+ pr_info("prandom32: self test passed (less than %u bits"
+ " correlated)\n",
+ bits+1);
+ kfree(data);
+ return 0;
+}
+core_initcall(prandom32_state_selftest);
+#endif /* CONFIG_RANDOM32_SELFTEST */
+
+/*
+ * Start periodic full reseeding as soon as strong
+ * random numbers are available.
+ */
+static int __init prandom_init_late(void)
+{
+ static struct random_ready_callback random_ready = {
+ .func = prandom_timer_start
+ };
+ int ret = add_random_ready_callback(&random_ready);
+
+ if (ret == -EALREADY) {
+ prandom_timer_start(&random_ready);
+ ret = 0;
+ }
+ return ret;
+}
+late_initcall(prandom_init_late);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 5d63a8857f36..0a482ef988e5 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -365,6 +365,37 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
}
EXPORT_SYMBOL(sg_alloc_table);
+static struct scatterlist *get_next_sg(struct sg_table *table,
+ struct scatterlist *cur,
+ unsigned long needed_sges,
+ gfp_t gfp_mask)
+{
+ struct scatterlist *new_sg, *next_sg;
+ unsigned int alloc_size;
+
+ if (cur) {
+ next_sg = sg_next(cur);
+ /* Check if last entry should be keeped for chainning */
+ if (!sg_is_last(next_sg) || needed_sges == 1)
+ return next_sg;
+ }
+
+ alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC);
+ new_sg = sg_kmalloc(alloc_size, gfp_mask);
+ if (!new_sg)
+ return ERR_PTR(-ENOMEM);
+ sg_init_table(new_sg, alloc_size);
+ if (cur) {
+ __sg_chain(next_sg, new_sg);
+ table->orig_nents += alloc_size - 1;
+ } else {
+ table->sgl = new_sg;
+ table->orig_nents = alloc_size;
+ table->nents = 0;
+ }
+ return new_sg;
+}
+
/**
* __sg_alloc_table_from_pages - Allocate and initialize an sg table from
* an array of pages
@@ -373,30 +404,69 @@ EXPORT_SYMBOL(sg_alloc_table);
* @n_pages: Number of pages in the pages array
* @offset: Offset from start of the first page to the start of a buffer
* @size: Number of valid bytes in the buffer (after offset)
- * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
+ * @max_segment: Maximum size of a scatterlist element in bytes
+ * @prv: Last populated sge in sgt
+ * @left_pages: Left pages caller have to set after this call
* @gfp_mask: GFP allocation mask
*
- * Description:
- * Allocate and initialize an sg table from a list of pages. Contiguous
- * ranges of the pages are squashed into a single scatterlist node up to the
- * maximum size specified in @max_segment. An user may provide an offset at a
- * start and a size of valid data in a buffer specified by the page array.
- * The returned sg table is released by sg_free_table.
+ * Description:
+ * If @prv is NULL, allocate and initialize an sg table from a list of pages,
+ * else reuse the scatterlist passed in at @prv.
+ * Contiguous ranges of the pages are squashed into a single scatterlist
+ * entry up to the maximum size specified in @max_segment. A user may
+ * provide an offset at a start and a size of valid data in a buffer
+ * specified by the page array.
*
* Returns:
- * 0 on success, negative error on failure
+ * Last SGE in sgt on success, PTR_ERR on otherwise.
+ * The allocation in @sgt must be released by sg_free_table.
+ *
+ * Notes:
+ * If this function returns non-0 (eg failure), the caller must call
+ * sg_free_table() to cleanup any leftover allocations.
*/
-int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
- unsigned int n_pages, unsigned int offset,
- unsigned long size, unsigned int max_segment,
- gfp_t gfp_mask)
+struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
+ struct page **pages, unsigned int n_pages, unsigned int offset,
+ unsigned long size, unsigned int max_segment,
+ struct scatterlist *prv, unsigned int left_pages,
+ gfp_t gfp_mask)
{
- unsigned int chunks, cur_page, seg_len, i;
- int ret;
- struct scatterlist *s;
+ unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
+ unsigned int added_nents = 0;
+ struct scatterlist *s = prv;
- if (WARN_ON(!max_segment || offset_in_page(max_segment)))
- return -EINVAL;
+ /*
+ * The algorithm below requires max_segment to be aligned to PAGE_SIZE
+ * otherwise it can overshoot.
+ */
+ max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
+ if (WARN_ON(max_segment < PAGE_SIZE))
+ return ERR_PTR(-EINVAL);
+
+ if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && prv)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (prv) {
+ unsigned long paddr = (page_to_pfn(sg_page(prv)) * PAGE_SIZE +
+ prv->offset + prv->length) /
+ PAGE_SIZE;
+
+ if (WARN_ON(offset))
+ return ERR_PTR(-EINVAL);
+
+ /* Merge contiguous pages into the last SG */
+ prv_len = prv->length;
+ while (n_pages && page_to_pfn(pages[0]) == paddr) {
+ if (prv->length + PAGE_SIZE > max_segment)
+ break;
+ prv->length += PAGE_SIZE;
+ paddr++;
+ pages++;
+ n_pages--;
+ }
+ if (!n_pages)
+ goto out;
+ }
/* compute number of contiguous chunks */
chunks = 1;
@@ -410,13 +480,9 @@ int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
}
}
- ret = sg_alloc_table(sgt, chunks, gfp_mask);
- if (unlikely(ret))
- return ret;
-
/* merging chunks and putting them into the scatterlist */
cur_page = 0;
- for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
+ for (i = 0; i < chunks; i++) {
unsigned int j, chunk_size;
/* look for the end of the current chunk */
@@ -429,15 +495,30 @@ int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
break;
}
+ /* Pass how many chunks might be left */
+ s = get_next_sg(sgt, s, chunks - i + left_pages, gfp_mask);
+ if (IS_ERR(s)) {
+ /*
+ * Adjust entry length to be as before function was
+ * called.
+ */
+ if (prv)
+ prv->length = prv_len;
+ return s;
+ }
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
sg_set_page(s, pages[cur_page],
min_t(unsigned long, size, chunk_size), offset);
+ added_nents++;
size -= chunk_size;
offset = 0;
cur_page = j;
}
-
- return 0;
+ sgt->nents += added_nents;
+out:
+ if (!left_pages)
+ sg_mark_end(s);
+ return s;
}
EXPORT_SYMBOL(__sg_alloc_table_from_pages);
@@ -465,8 +546,8 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
unsigned int n_pages, unsigned int offset,
unsigned long size, gfp_t gfp_mask)
{
- return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
- SCATTERLIST_MAX_SEGMENT, gfp_mask);
+ return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages,
+ offset, size, UINT_MAX, NULL, 0, gfp_mask));
}
EXPORT_SYMBOL(sg_alloc_table_from_pages);
@@ -504,7 +585,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
nalloc++;
}
sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
- (gfp & ~GFP_DMA) | __GFP_ZERO);
+ gfp & ~GFP_DMA);
if (!sgl)
return NULL;
@@ -514,7 +595,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
elem_len = min_t(u64, length, PAGE_SIZE << order);
page = alloc_pages(gfp, order);
if (!page) {
- sgl_free(sgl);
+ sgl_free_order(sgl, order);
return NULL;
}
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 963050c0283e..7f2d5fbaf243 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -649,3 +649,26 @@ char *kstrdup_quotable_file(struct file *file, gfp_t gfp)
return pathname;
}
EXPORT_SYMBOL_GPL(kstrdup_quotable_file);
+
+/**
+ * kfree_strarray - free a number of dynamically allocated strings contained
+ * in an array and the array itself
+ *
+ * @array: Dynamically allocated array of strings to free.
+ * @n: Number of strings (starting from the beginning of the array) to free.
+ *
+ * Passing a non-NULL @array and @n == 0 as well as NULL @array are valid
+ * use-cases. If @array is NULL, the function does nothing.
+ */
+void kfree_strarray(char **array, size_t n)
+{
+ unsigned int i;
+
+ if (!array)
+ return;
+
+ for (i = 0; i < n; i++)
+ kfree(array[i]);
+ kfree(array);
+}
+EXPORT_SYMBOL_GPL(kfree_strarray);
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 34696a348864..e6d5fcc2cdf3 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/export.h>
+#include <linux/fault-inject-usercopy.h>
#include <linux/kasan-checks.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
@@ -99,6 +100,8 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
unsigned long max_addr, src_addr;
might_fault();
+ if (should_fail_usercopy())
+ return -EFAULT;
if (unlikely(count <= 0))
return 0;
diff --git a/lib/syscall.c b/lib/syscall.c
index fb328e7ccb08..8533d2fea2d7 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -44,7 +44,7 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info
* .data.instruction_pointer - filled with user PC
*
* If @target is blocked in a system call, returns zero with @info.data.nr
- * set to the the call's number and @info.data.args filled in with its
+ * set to the call's number and @info.data.args filled in with its
* arguments. Registers not used for system call arguments may not be available
* and it is not kosher to use &struct user_regset calls while the system
* call is still in progress. Note we may get this result if @target
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index df903c53952b..4425a1dd4ef1 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -354,50 +354,37 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
};
-static void __init __test_bitmap_parselist(int is_user)
+static void __init test_bitmap_parselist(void)
{
int i;
int err;
ktime_t time;
DECLARE_BITMAP(bmap, 2048);
- char *mode = is_user ? "_user" : "";
for (i = 0; i < ARRAY_SIZE(parselist_tests); i++) {
#define ptest parselist_tests[i]
- if (is_user) {
- mm_segment_t orig_fs = get_fs();
- size_t len = strlen(ptest.in);
-
- set_fs(KERNEL_DS);
- time = ktime_get();
- err = bitmap_parselist_user((__force const char __user *)ptest.in, len,
- bmap, ptest.nbits);
- time = ktime_get() - time;
- set_fs(orig_fs);
- } else {
- time = ktime_get();
- err = bitmap_parselist(ptest.in, bmap, ptest.nbits);
- time = ktime_get() - time;
- }
+ time = ktime_get();
+ err = bitmap_parselist(ptest.in, bmap, ptest.nbits);
+ time = ktime_get() - time;
if (err != ptest.errno) {
- pr_err("parselist%s: %d: input is %s, errno is %d, expected %d\n",
- mode, i, ptest.in, err, ptest.errno);
+ pr_err("parselist: %d: input is %s, errno is %d, expected %d\n",
+ i, ptest.in, err, ptest.errno);
continue;
}
if (!err && ptest.expected
&& !__bitmap_equal(bmap, ptest.expected, ptest.nbits)) {
- pr_err("parselist%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
- mode, i, ptest.in, bmap[0],
+ pr_err("parselist: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
+ i, ptest.in, bmap[0],
*ptest.expected);
continue;
}
if (ptest.flags & PARSE_TIME)
- pr_err("parselist%s: %d: input is '%s' OK, Time: %llu\n",
- mode, i, ptest.in, time);
+ pr_err("parselist: %d: input is '%s' OK, Time: %llu\n",
+ i, ptest.in, time);
#undef ptest
}
@@ -443,75 +430,41 @@ static const struct test_bitmap_parselist parse_tests[] __initconst = {
#undef step
};
-static void __init __test_bitmap_parse(int is_user)
+static void __init test_bitmap_parse(void)
{
int i;
int err;
ktime_t time;
DECLARE_BITMAP(bmap, 2048);
- char *mode = is_user ? "_user" : "";
for (i = 0; i < ARRAY_SIZE(parse_tests); i++) {
struct test_bitmap_parselist test = parse_tests[i];
+ size_t len = test.flags & NO_LEN ? UINT_MAX : strlen(test.in);
- if (is_user) {
- size_t len = strlen(test.in);
- mm_segment_t orig_fs = get_fs();
-
- set_fs(KERNEL_DS);
- time = ktime_get();
- err = bitmap_parse_user((__force const char __user *)test.in, len,
- bmap, test.nbits);
- time = ktime_get() - time;
- set_fs(orig_fs);
- } else {
- size_t len = test.flags & NO_LEN ?
- UINT_MAX : strlen(test.in);
- time = ktime_get();
- err = bitmap_parse(test.in, len, bmap, test.nbits);
- time = ktime_get() - time;
- }
+ time = ktime_get();
+ err = bitmap_parse(test.in, len, bmap, test.nbits);
+ time = ktime_get() - time;
if (err != test.errno) {
- pr_err("parse%s: %d: input is %s, errno is %d, expected %d\n",
- mode, i, test.in, err, test.errno);
+ pr_err("parse: %d: input is %s, errno is %d, expected %d\n",
+ i, test.in, err, test.errno);
continue;
}
if (!err && test.expected
&& !__bitmap_equal(bmap, test.expected, test.nbits)) {
- pr_err("parse%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
- mode, i, test.in, bmap[0],
+ pr_err("parse: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
+ i, test.in, bmap[0],
*test.expected);
continue;
}
if (test.flags & PARSE_TIME)
- pr_err("parse%s: %d: input is '%s' OK, Time: %llu\n",
- mode, i, test.in, time);
+ pr_err("parse: %d: input is '%s' OK, Time: %llu\n",
+ i, test.in, time);
}
}
-static void __init test_bitmap_parselist(void)
-{
- __test_bitmap_parselist(0);
-}
-
-static void __init test_bitmap_parselist_user(void)
-{
- __test_bitmap_parselist(1);
-}
-
-static void __init test_bitmap_parse(void)
-{
- __test_bitmap_parse(0);
-}
-
-static void __init test_bitmap_parse_user(void)
-{
- __test_bitmap_parse(1);
-}
-
#define EXP1_IN_BITS (sizeof(exp1) * 8)
static void __init test_bitmap_arr32(void)
@@ -675,9 +628,7 @@ static void __init selftest(void)
test_replace();
test_bitmap_arr32();
test_bitmap_parse();
- test_bitmap_parse_user();
test_bitmap_parselist();
- test_bitmap_parselist_user();
test_mem_optimisations();
test_for_each_set_clump8();
test_bitmap_cut();
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 06c955057756..2baa275a6ddf 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -52,6 +52,9 @@ struct test_batched_req {
* @name: the name of the firmware file to look for
* @into_buf: when the into_buf is used if this is true
* request_firmware_into_buf() will be used instead.
+ * @buf_size: size of buf to allocate when into_buf is true
+ * @file_offset: file offset to request when calling request_firmware_into_buf
+ * @partial: partial read opt when calling request_firmware_into_buf
* @sync_direct: when the sync trigger is used if this is true
* request_firmware_direct() will be used instead.
* @send_uevent: whether or not to send a uevent for async requests
@@ -91,6 +94,9 @@ struct test_batched_req {
struct test_config {
char *name;
bool into_buf;
+ size_t buf_size;
+ size_t file_offset;
+ bool partial;
bool sync_direct;
bool send_uevent;
u8 num_requests;
@@ -185,6 +191,9 @@ static int __test_firmware_config_init(void)
test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
test_fw_config->send_uevent = true;
test_fw_config->into_buf = false;
+ test_fw_config->buf_size = TEST_FIRMWARE_BUF_SIZE;
+ test_fw_config->file_offset = 0;
+ test_fw_config->partial = false;
test_fw_config->sync_direct = false;
test_fw_config->req_firmware = request_firmware;
test_fw_config->test_result = 0;
@@ -238,28 +247,35 @@ static ssize_t config_show(struct device *dev,
dev_name(dev));
if (test_fw_config->name)
- len += scnprintf(buf+len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"name:\t%s\n",
test_fw_config->name);
else
- len += scnprintf(buf+len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"name:\tEMTPY\n");
- len += scnprintf(buf+len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"num_requests:\t%u\n", test_fw_config->num_requests);
- len += scnprintf(buf+len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"send_uevent:\t\t%s\n",
test_fw_config->send_uevent ?
"FW_ACTION_HOTPLUG" :
"FW_ACTION_NOHOTPLUG");
- len += scnprintf(buf+len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"into_buf:\t\t%s\n",
test_fw_config->into_buf ? "true" : "false");
- len += scnprintf(buf+len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "buf_size:\t%zu\n", test_fw_config->buf_size);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "file_offset:\t%zu\n", test_fw_config->file_offset);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "partial:\t\t%s\n",
+ test_fw_config->partial ? "true" : "false");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"sync_direct:\t\t%s\n",
test_fw_config->sync_direct ? "true" : "false");
- len += scnprintf(buf+len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
mutex_unlock(&test_fw_mutex);
@@ -317,6 +333,30 @@ static ssize_t test_dev_config_show_bool(char *buf, bool val)
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
+static int test_dev_config_update_size_t(const char *buf,
+ size_t size,
+ size_t *cfg)
+{
+ int ret;
+ long new;
+
+ ret = kstrtol(buf, 10, &new);
+ if (ret)
+ return ret;
+
+ mutex_lock(&test_fw_mutex);
+ *(size_t *)cfg = new;
+ mutex_unlock(&test_fw_mutex);
+
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static ssize_t test_dev_config_show_size_t(char *buf, size_t val)
+{
+ return snprintf(buf, PAGE_SIZE, "%zu\n", val);
+}
+
static ssize_t test_dev_config_show_int(char *buf, int val)
{
return snprintf(buf, PAGE_SIZE, "%d\n", val);
@@ -402,6 +442,83 @@ static ssize_t config_into_buf_show(struct device *dev,
}
static DEVICE_ATTR_RW(config_into_buf);
+static ssize_t config_buf_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+
+ mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ pr_err("Must call release_all_firmware prior to changing config\n");
+ rc = -EINVAL;
+ mutex_unlock(&test_fw_mutex);
+ goto out;
+ }
+ mutex_unlock(&test_fw_mutex);
+
+ rc = test_dev_config_update_size_t(buf, count,
+ &test_fw_config->buf_size);
+
+out:
+ return rc;
+}
+
+static ssize_t config_buf_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_size_t(buf, test_fw_config->buf_size);
+}
+static DEVICE_ATTR_RW(config_buf_size);
+
+static ssize_t config_file_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+
+ mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ pr_err("Must call release_all_firmware prior to changing config\n");
+ rc = -EINVAL;
+ mutex_unlock(&test_fw_mutex);
+ goto out;
+ }
+ mutex_unlock(&test_fw_mutex);
+
+ rc = test_dev_config_update_size_t(buf, count,
+ &test_fw_config->file_offset);
+
+out:
+ return rc;
+}
+
+static ssize_t config_file_offset_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_size_t(buf, test_fw_config->file_offset);
+}
+static DEVICE_ATTR_RW(config_file_offset);
+
+static ssize_t config_partial_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return test_dev_config_update_bool(buf,
+ count,
+ &test_fw_config->partial);
+}
+
+static ssize_t config_partial_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_bool(buf, test_fw_config->partial);
+}
+static DEVICE_ATTR_RW(config_partial);
+
static ssize_t config_sync_direct_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -659,11 +776,21 @@ static int test_fw_run_batch_request(void *data)
if (!test_buf)
return -ENOSPC;
- req->rc = request_firmware_into_buf(&req->fw,
- req->name,
- req->dev,
- test_buf,
- TEST_FIRMWARE_BUF_SIZE);
+ if (test_fw_config->partial)
+ req->rc = request_partial_firmware_into_buf
+ (&req->fw,
+ req->name,
+ req->dev,
+ test_buf,
+ test_fw_config->buf_size,
+ test_fw_config->file_offset);
+ else
+ req->rc = request_firmware_into_buf
+ (&req->fw,
+ req->name,
+ req->dev,
+ test_buf,
+ test_fw_config->buf_size);
if (!req->fw)
kfree(test_buf);
} else {
@@ -936,6 +1063,9 @@ static struct attribute *test_dev_attrs[] = {
TEST_FW_DEV_ATTR(config_name),
TEST_FW_DEV_ATTR(config_num_requests),
TEST_FW_DEV_ATTR(config_into_buf),
+ TEST_FW_DEV_ATTR(config_buf_size),
+ TEST_FW_DEV_ATTR(config_file_offset),
+ TEST_FW_DEV_ATTR(config_partial),
TEST_FW_DEV_ATTR(config_sync_direct),
TEST_FW_DEV_ATTR(config_send_uevent),
TEST_FW_DEV_ATTR(config_read_fw_idx),
diff --git a/lib/test_free_pages.c b/lib/test_free_pages.c
new file mode 100644
index 000000000000..074e76bd76b2
--- /dev/null
+++ b/lib/test_free_pages.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_free_pages.c: Check that free_pages() doesn't leak memory
+ * Copyright (c) 2020 Oracle
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+static void test_free_pages(gfp_t gfp)
+{
+ unsigned int i;
+
+ for (i = 0; i < 1000 * 1000; i++) {
+ unsigned long addr = __get_free_pages(gfp, 3);
+ struct page *page = virt_to_page(addr);
+
+ /* Simulate page cache getting a speculative reference */
+ get_page(page);
+ free_pages(addr, 3);
+ put_page(page);
+ }
+}
+
+static int m_in(void)
+{
+ test_free_pages(GFP_KERNEL);
+ test_free_pages(GFP_KERNEL | __GFP_COMP);
+
+ return 0;
+}
+
+static void m_ex(void)
+{
+}
+
+module_init(m_in);
+module_exit(m_ex);
+MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index e7dc3de355b7..80a78877bd93 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -36,7 +36,6 @@
static const struct dev_pagemap_ops dmirror_devmem_ops;
static const struct mmu_interval_notifier_ops dmirror_min_ops;
static dev_t dmirror_dev;
-static struct page *dmirror_zero_page;
struct dmirror_device;
@@ -460,6 +459,22 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
unsigned long pfn_last;
void *ptr;
+ devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
+ if (!devmem)
+ return false;
+
+ res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
+ "hmm_dmirror");
+ if (IS_ERR(res))
+ goto err_devmem;
+
+ devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ devmem->pagemap.range.start = res->start;
+ devmem->pagemap.range.end = res->end;
+ devmem->pagemap.nr_range = 1;
+ devmem->pagemap.ops = &dmirror_devmem_ops;
+ devmem->pagemap.owner = mdevice;
+
mutex_lock(&mdevice->devmem_lock);
if (mdevice->devmem_count == mdevice->devmem_capacity) {
@@ -472,33 +487,18 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
sizeof(new_chunks[0]) * new_capacity,
GFP_KERNEL);
if (!new_chunks)
- goto err;
+ goto err_release;
mdevice->devmem_capacity = new_capacity;
mdevice->devmem_chunks = new_chunks;
}
- res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
- "hmm_dmirror");
- if (IS_ERR(res))
- goto err;
-
- devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
- if (!devmem)
- goto err_release;
-
- devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
- devmem->pagemap.res = *res;
- devmem->pagemap.ops = &dmirror_devmem_ops;
- devmem->pagemap.owner = mdevice;
-
ptr = memremap_pages(&devmem->pagemap, numa_node_id());
if (IS_ERR(ptr))
- goto err_free;
+ goto err_release;
devmem->mdevice = mdevice;
- pfn_first = devmem->pagemap.res.start >> PAGE_SHIFT;
- pfn_last = pfn_first +
- (resource_size(&devmem->pagemap.res) >> PAGE_SHIFT);
+ pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
+ pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT);
mdevice->devmem_chunks[mdevice->devmem_count++] = devmem;
mutex_unlock(&mdevice->devmem_lock);
@@ -525,12 +525,12 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
return true;
-err_free:
- kfree(devmem);
err_release:
- release_mem_region(res->start, resource_size(res));
-err:
mutex_unlock(&mdevice->devmem_lock);
+ release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
+err_devmem:
+ kfree(devmem);
+
return false;
}
@@ -1100,8 +1100,8 @@ static void dmirror_device_remove(struct dmirror_device *mdevice)
mdevice->devmem_chunks[i];
memunmap_pages(&devmem->pagemap);
- release_mem_region(devmem->pagemap.res.start,
- resource_size(&devmem->pagemap.res));
+ release_mem_region(devmem->pagemap.range.start,
+ range_len(&devmem->pagemap.range));
kfree(devmem);
}
kfree(mdevice->devmem_chunks);
@@ -1126,17 +1126,6 @@ static int __init hmm_dmirror_init(void)
goto err_chrdev;
}
- /*
- * Allocate a zero page to simulate a reserved page of device private
- * memory which is always zero. The zero_pfn page isn't used just to
- * make the code here simpler (i.e., we need a struct page for it).
- */
- dmirror_zero_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
- if (!dmirror_zero_page) {
- ret = -ENOMEM;
- goto err_chrdev;
- }
-
pr_info("HMM test module loaded. This is only for testing HMM.\n");
return 0;
@@ -1152,8 +1141,6 @@ static void __exit hmm_dmirror_exit(void)
{
int id;
- if (dmirror_zero_page)
- __free_page(dmirror_zero_page);
for (id = 0; id < DMIRROR_NDEVICES; id++)
dmirror_device_remove(dmirror_devices + id);
unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES);
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 53e953bb1d1d..63c26171a791 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -5,8 +5,6 @@
* Author: Andrey Ryabinin <a.ryabinin@samsung.com>
*/
-#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
-
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/kasan.h>
@@ -23,6 +21,8 @@
#include <asm/page.h>
+#include <kunit/test.h>
+
#include "../mm/kasan/kasan.h"
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
@@ -32,418 +32,370 @@
* are not eliminated as dead code.
*/
-int kasan_int_result;
void *kasan_ptr_result;
+int kasan_int_result;
-/*
- * Note: test functions are marked noinline so that their names appear in
- * reports.
- */
+static struct kunit_resource resource;
+static struct kunit_kasan_expectation fail_data;
+static bool multishot;
-static noinline void __init kmalloc_oob_right(void)
+static int kasan_test_init(struct kunit *test)
+{
+ /*
+ * Temporarily enable multi-shot mode and set panic_on_warn=0.
+ * Otherwise, we'd only get a report for the first case.
+ */
+ multishot = kasan_save_enable_multi_shot();
+
+ return 0;
+}
+
+static void kasan_test_exit(struct kunit *test)
+{
+ kasan_restore_multi_shot(multishot);
+}
+
+/**
+ * KUNIT_EXPECT_KASAN_FAIL() - Causes a test failure when the expression does
+ * not cause a KASAN error. This uses a KUnit resource named "kasan_data." Do
+ * Do not use this name for a KUnit resource outside here.
+ *
+ */
+#define KUNIT_EXPECT_KASAN_FAIL(test, condition) do { \
+ fail_data.report_expected = true; \
+ fail_data.report_found = false; \
+ kunit_add_named_resource(test, \
+ NULL, \
+ NULL, \
+ &resource, \
+ "kasan_data", &fail_data); \
+ condition; \
+ KUNIT_EXPECT_EQ(test, \
+ fail_data.report_expected, \
+ fail_data.report_found); \
+} while (0)
+
+static void kmalloc_oob_right(struct kunit *test)
{
char *ptr;
size_t size = 123;
- pr_info("out-of-bounds to right\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- ptr[size + OOB_TAG_OFF] = 'x';
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
kfree(ptr);
}
-static noinline void __init kmalloc_oob_left(void)
+static void kmalloc_oob_left(struct kunit *test)
{
char *ptr;
size_t size = 15;
- pr_info("out-of-bounds to left\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- *ptr = *(ptr - 1);
+ KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
kfree(ptr);
}
-static noinline void __init kmalloc_node_oob_right(void)
+static void kmalloc_node_oob_right(struct kunit *test)
{
char *ptr;
size_t size = 4096;
- pr_info("kmalloc_node(): out-of-bounds to right\n");
ptr = kmalloc_node(size, GFP_KERNEL, 0);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- ptr[size] = 0;
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
kfree(ptr);
}
-#ifdef CONFIG_SLUB
-static noinline void __init kmalloc_pagealloc_oob_right(void)
+static void kmalloc_pagealloc_oob_right(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
+ if (!IS_ENABLED(CONFIG_SLUB)) {
+ kunit_info(test, "CONFIG_SLUB is not enabled.");
+ return;
+ }
+
/* Allocate a chunk that does not fit into a SLUB cache to trigger
* the page allocator fallback.
*/
- pr_info("kmalloc pagealloc allocation: out-of-bounds to right\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- ptr[size + OOB_TAG_OFF] = 0;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
kfree(ptr);
}
-static noinline void __init kmalloc_pagealloc_uaf(void)
+static void kmalloc_pagealloc_uaf(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- pr_info("kmalloc pagealloc allocation: use-after-free\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
+ if (!IS_ENABLED(CONFIG_SLUB)) {
+ kunit_info(test, "CONFIG_SLUB is not enabled.");
return;
}
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
kfree(ptr);
- ptr[0] = 0;
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
}
-static noinline void __init kmalloc_pagealloc_invalid_free(void)
+static void kmalloc_pagealloc_invalid_free(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- pr_info("kmalloc pagealloc allocation: invalid-free\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
+ if (!IS_ENABLED(CONFIG_SLUB)) {
+ kunit_info(test, "CONFIG_SLUB is not enabled.");
return;
}
- kfree(ptr + 1);
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
}
-#endif
-static noinline void __init kmalloc_large_oob_right(void)
+static void kmalloc_large_oob_right(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
/* Allocate a chunk that is large enough, but still fits into a slab
* and does not trigger the page allocator fallback in SLUB.
*/
- pr_info("kmalloc large allocation: out-of-bounds to right\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- ptr[size] = 0;
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
kfree(ptr);
}
-static noinline void __init kmalloc_oob_krealloc_more(void)
+static void kmalloc_oob_krealloc_more(struct kunit *test)
{
char *ptr1, *ptr2;
size_t size1 = 17;
size_t size2 = 19;
- pr_info("out-of-bounds after krealloc more\n");
ptr1 = kmalloc(size1, GFP_KERNEL);
- ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
- if (!ptr1 || !ptr2) {
- pr_err("Allocation failed\n");
- kfree(ptr1);
- kfree(ptr2);
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
- ptr2[size2 + OOB_TAG_OFF] = 'x';
+ ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
kfree(ptr2);
}
-static noinline void __init kmalloc_oob_krealloc_less(void)
+static void kmalloc_oob_krealloc_less(struct kunit *test)
{
char *ptr1, *ptr2;
size_t size1 = 17;
size_t size2 = 15;
- pr_info("out-of-bounds after krealloc less\n");
ptr1 = kmalloc(size1, GFP_KERNEL);
- ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
- if (!ptr1 || !ptr2) {
- pr_err("Allocation failed\n");
- kfree(ptr1);
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
- ptr2[size2 + OOB_TAG_OFF] = 'x';
+ ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
kfree(ptr2);
}
-static noinline void __init kmalloc_oob_16(void)
+static void kmalloc_oob_16(struct kunit *test)
{
struct {
u64 words[2];
} *ptr1, *ptr2;
- pr_info("kmalloc out-of-bounds for 16-bytes access\n");
ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
+
ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
- if (!ptr1 || !ptr2) {
- pr_err("Allocation failed\n");
- kfree(ptr1);
- kfree(ptr2);
- return;
- }
- *ptr1 = *ptr2;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
kfree(ptr1);
kfree(ptr2);
}
-static noinline void __init kmalloc_oob_memset_2(void)
+static void kmalloc_oob_memset_2(struct kunit *test)
{
char *ptr;
size_t size = 8;
- pr_info("out-of-bounds in memset2\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- memset(ptr + 7 + OOB_TAG_OFF, 0, 2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
kfree(ptr);
}
-static noinline void __init kmalloc_oob_memset_4(void)
+static void kmalloc_oob_memset_4(struct kunit *test)
{
char *ptr;
size_t size = 8;
- pr_info("out-of-bounds in memset4\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- memset(ptr + 5 + OOB_TAG_OFF, 0, 4);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
kfree(ptr);
}
-static noinline void __init kmalloc_oob_memset_8(void)
+static void kmalloc_oob_memset_8(struct kunit *test)
{
char *ptr;
size_t size = 8;
- pr_info("out-of-bounds in memset8\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- memset(ptr + 1 + OOB_TAG_OFF, 0, 8);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
kfree(ptr);
}
-static noinline void __init kmalloc_oob_memset_16(void)
+static void kmalloc_oob_memset_16(struct kunit *test)
{
char *ptr;
size_t size = 16;
- pr_info("out-of-bounds in memset16\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- memset(ptr + 1 + OOB_TAG_OFF, 0, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
kfree(ptr);
}
-static noinline void __init kmalloc_oob_in_memset(void)
+static void kmalloc_oob_in_memset(struct kunit *test)
{
char *ptr;
size_t size = 666;
- pr_info("out-of-bounds in memset\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- memset(ptr, 0, size + 5 + OOB_TAG_OFF);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
kfree(ptr);
}
-static noinline void __init kmalloc_memmove_invalid_size(void)
+static void kmalloc_memmove_invalid_size(struct kunit *test)
{
char *ptr;
size_t size = 64;
volatile size_t invalid_size = -2;
- pr_info("invalid size in memmove\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset((char *)ptr, 0, 64);
- memmove((char *)ptr, (char *)ptr + 4, invalid_size);
+
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ memmove((char *)ptr, (char *)ptr + 4, invalid_size));
kfree(ptr);
}
-static noinline void __init kmalloc_uaf(void)
+static void kmalloc_uaf(struct kunit *test)
{
char *ptr;
size_t size = 10;
- pr_info("use-after-free\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
- *(ptr + 8) = 'x';
+ KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
}
-static noinline void __init kmalloc_uaf_memset(void)
+static void kmalloc_uaf_memset(struct kunit *test)
{
char *ptr;
size_t size = 33;
- pr_info("use-after-free in memset\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
- memset(ptr, 0, size);
+ KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
}
-static noinline void __init kmalloc_uaf2(void)
+static void kmalloc_uaf2(struct kunit *test)
{
char *ptr1, *ptr2;
size_t size = 43;
- pr_info("use-after-free after another kmalloc\n");
ptr1 = kmalloc(size, GFP_KERNEL);
- if (!ptr1) {
- pr_err("Allocation failed\n");
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
kfree(ptr1);
+
ptr2 = kmalloc(size, GFP_KERNEL);
- if (!ptr2) {
- pr_err("Allocation failed\n");
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
+ KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
- ptr1[40] = 'x';
- if (ptr1 == ptr2)
- pr_err("Could not detect use-after-free: ptr1 == ptr2\n");
kfree(ptr2);
}
-static noinline void __init kfree_via_page(void)
+static void kfree_via_page(struct kunit *test)
{
char *ptr;
size_t size = 8;
struct page *page;
unsigned long offset;
- pr_info("invalid-free false positive (via page)\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
page = virt_to_page(ptr);
offset = offset_in_page(ptr);
kfree(page_address(page) + offset);
}
-static noinline void __init kfree_via_phys(void)
+static void kfree_via_phys(struct kunit *test)
{
char *ptr;
size_t size = 8;
phys_addr_t phys;
- pr_info("invalid-free false positive (via phys)\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
phys = virt_to_phys(ptr);
kfree(phys_to_virt(phys));
}
-static noinline void __init kmem_cache_oob(void)
+static void kmem_cache_oob(struct kunit *test)
{
char *p;
size_t size = 200;
struct kmem_cache *cache = kmem_cache_create("test_cache",
size, 0,
0, NULL);
- if (!cache) {
- pr_err("Cache allocation failed\n");
- return;
- }
- pr_info("out-of-bounds in kmem_cache_alloc\n");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
p = kmem_cache_alloc(cache, GFP_KERNEL);
if (!p) {
- pr_err("Allocation failed\n");
+ kunit_err(test, "Allocation failed: %s\n", __func__);
kmem_cache_destroy(cache);
return;
}
- *p = p[size + OOB_TAG_OFF];
-
+ KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
kmem_cache_free(cache, p);
kmem_cache_destroy(cache);
}
-static noinline void __init memcg_accounted_kmem_cache(void)
+static void memcg_accounted_kmem_cache(struct kunit *test)
{
int i;
char *p;
@@ -451,12 +403,8 @@ static noinline void __init memcg_accounted_kmem_cache(void)
struct kmem_cache *cache;
cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
- if (!cache) {
- pr_err("Cache allocation failed\n");
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
- pr_info("allocate memcg accounted object\n");
/*
* Several allocations with a delay to allow for lazy per memcg kmem
* cache creation.
@@ -476,134 +424,93 @@ free_cache:
static char global_array[10];
-static noinline void __init kasan_global_oob(void)
+static void kasan_global_oob(struct kunit *test)
{
volatile int i = 3;
char *p = &global_array[ARRAY_SIZE(global_array) + i];
- pr_info("out-of-bounds global variable\n");
- *(volatile char *)p;
+ KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
-static noinline void __init kasan_stack_oob(void)
-{
- char stack_array[10];
- volatile int i = OOB_TAG_OFF;
- char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
-
- pr_info("out-of-bounds on stack\n");
- *(volatile char *)p;
-}
-
-static noinline void __init ksize_unpoisons_memory(void)
+static void ksize_unpoisons_memory(struct kunit *test)
{
char *ptr;
size_t size = 123, real_size;
- pr_info("ksize() unpoisons the whole allocated chunk\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
real_size = ksize(ptr);
/* This access doesn't trigger an error. */
ptr[size] = 'x';
/* This one does. */
- ptr[real_size] = 'y';
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
kfree(ptr);
}
-static noinline void __init copy_user_test(void)
+static void kasan_stack_oob(struct kunit *test)
{
- char *kmem;
- char __user *usermem;
- size_t size = 10;
- int unused;
-
- kmem = kmalloc(size, GFP_KERNEL);
- if (!kmem)
- return;
+ char stack_array[10];
+ volatile int i = OOB_TAG_OFF;
+ char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
- usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_ANONYMOUS | MAP_PRIVATE, 0);
- if (IS_ERR(usermem)) {
- pr_err("Failed to allocate user memory\n");
- kfree(kmem);
+ if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
+ kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
return;
}
- pr_info("out-of-bounds in copy_from_user()\n");
- unused = copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
-
- pr_info("out-of-bounds in copy_to_user()\n");
- unused = copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
-
- pr_info("out-of-bounds in __copy_from_user()\n");
- unused = __copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
-
- pr_info("out-of-bounds in __copy_to_user()\n");
- unused = __copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
-
- pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
- unused = __copy_from_user_inatomic(kmem, usermem, size + 1 + OOB_TAG_OFF);
-
- pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
- unused = __copy_to_user_inatomic(usermem, kmem, size + 1 + OOB_TAG_OFF);
-
- pr_info("out-of-bounds in strncpy_from_user()\n");
- unused = strncpy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
-
- vm_munmap((unsigned long)usermem, PAGE_SIZE);
- kfree(kmem);
+ KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
-static noinline void __init kasan_alloca_oob_left(void)
+static void kasan_alloca_oob_left(struct kunit *test)
{
volatile int i = 10;
char alloca_array[i];
char *p = alloca_array - 1;
- pr_info("out-of-bounds to left on alloca\n");
- *(volatile char *)p;
+ if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
+ kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
+ return;
+ }
+
+ KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
-static noinline void __init kasan_alloca_oob_right(void)
+static void kasan_alloca_oob_right(struct kunit *test)
{
volatile int i = 10;
char alloca_array[i];
char *p = alloca_array + i;
- pr_info("out-of-bounds to right on alloca\n");
- *(volatile char *)p;
+ if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
+ kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
+ return;
+ }
+
+ KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
-static noinline void __init kmem_cache_double_free(void)
+static void kmem_cache_double_free(struct kunit *test)
{
char *p;
size_t size = 200;
struct kmem_cache *cache;
cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
- if (!cache) {
- pr_err("Cache allocation failed\n");
- return;
- }
- pr_info("double-free on heap object\n");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
p = kmem_cache_alloc(cache, GFP_KERNEL);
if (!p) {
- pr_err("Allocation failed\n");
+ kunit_err(test, "Allocation failed: %s\n", __func__);
kmem_cache_destroy(cache);
return;
}
kmem_cache_free(cache, p);
- kmem_cache_free(cache, p);
+ KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
kmem_cache_destroy(cache);
}
-static noinline void __init kmem_cache_invalid_free(void)
+static void kmem_cache_invalid_free(struct kunit *test)
{
char *p;
size_t size = 200;
@@ -611,20 +518,17 @@ static noinline void __init kmem_cache_invalid_free(void)
cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
NULL);
- if (!cache) {
- pr_err("Cache allocation failed\n");
- return;
- }
- pr_info("invalid-free of heap object\n");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
p = kmem_cache_alloc(cache, GFP_KERNEL);
if (!p) {
- pr_err("Allocation failed\n");
+ kunit_err(test, "Allocation failed: %s\n", __func__);
kmem_cache_destroy(cache);
return;
}
/* Trigger invalid free, the object doesn't get freed */
- kmem_cache_free(cache, p + 1);
+ KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
/*
* Properly free the object to prevent the "Objects remaining in
@@ -635,45 +539,63 @@ static noinline void __init kmem_cache_invalid_free(void)
kmem_cache_destroy(cache);
}
-static noinline void __init kasan_memchr(void)
+static void kasan_memchr(struct kunit *test)
{
char *ptr;
size_t size = 24;
- pr_info("out-of-bounds in memchr\n");
- ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
- if (!ptr)
+ /* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
+ kunit_info(test,
+ "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
return;
+ }
+
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ kasan_ptr_result = memchr(ptr, '1', size + 1));
- kasan_ptr_result = memchr(ptr, '1', size + 1);
kfree(ptr);
}
-static noinline void __init kasan_memcmp(void)
+static void kasan_memcmp(struct kunit *test)
{
char *ptr;
size_t size = 24;
int arr[9];
- pr_info("out-of-bounds in memcmp\n");
- ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
- if (!ptr)
+ /* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
+ kunit_info(test,
+ "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
return;
+ }
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset(arr, 0, sizeof(arr));
- kasan_int_result = memcmp(ptr, arr, size + 1);
+
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ kasan_int_result = memcmp(ptr, arr, size+1));
kfree(ptr);
}
-static noinline void __init kasan_strings(void)
+static void kasan_strings(struct kunit *test)
{
char *ptr;
size_t size = 24;
- pr_info("use-after-free in strchr\n");
- ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
- if (!ptr)
+ /* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
+ kunit_info(test,
+ "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
return;
+ }
+
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
@@ -684,220 +606,164 @@ static noinline void __init kasan_strings(void)
* will likely point to zeroed byte.
*/
ptr += 16;
- kasan_ptr_result = strchr(ptr, '1');
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
- pr_info("use-after-free in strrchr\n");
- kasan_ptr_result = strrchr(ptr, '1');
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
- pr_info("use-after-free in strcmp\n");
- kasan_int_result = strcmp(ptr, "2");
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
- pr_info("use-after-free in strncmp\n");
- kasan_int_result = strncmp(ptr, "2", 1);
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
- pr_info("use-after-free in strlen\n");
- kasan_int_result = strlen(ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
- pr_info("use-after-free in strnlen\n");
- kasan_int_result = strnlen(ptr, 1);
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
}
-static noinline void __init kasan_bitops(void)
+static void kasan_bitops(struct kunit *test)
{
/*
* Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
* this way we do not actually corrupt other memory.
*/
long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
- if (!bits)
- return;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
/*
* Below calls try to access bit within allocated memory; however, the
* below accesses are still out-of-bounds, since bitops are defined to
* operate on the whole long the bit is in.
*/
- pr_info("out-of-bounds in set_bit\n");
- set_bit(BITS_PER_LONG, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test, set_bit(BITS_PER_LONG, bits));
- pr_info("out-of-bounds in __set_bit\n");
- __set_bit(BITS_PER_LONG, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(BITS_PER_LONG, bits));
- pr_info("out-of-bounds in clear_bit\n");
- clear_bit(BITS_PER_LONG, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(BITS_PER_LONG, bits));
- pr_info("out-of-bounds in __clear_bit\n");
- __clear_bit(BITS_PER_LONG, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(BITS_PER_LONG, bits));
- pr_info("out-of-bounds in clear_bit_unlock\n");
- clear_bit_unlock(BITS_PER_LONG, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(BITS_PER_LONG, bits));
- pr_info("out-of-bounds in __clear_bit_unlock\n");
- __clear_bit_unlock(BITS_PER_LONG, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(BITS_PER_LONG, bits));
- pr_info("out-of-bounds in change_bit\n");
- change_bit(BITS_PER_LONG, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test, change_bit(BITS_PER_LONG, bits));
- pr_info("out-of-bounds in __change_bit\n");
- __change_bit(BITS_PER_LONG, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(BITS_PER_LONG, bits));
/*
* Below calls try to access bit beyond allocated memory.
*/
- pr_info("out-of-bounds in test_and_set_bit\n");
- test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
- pr_info("out-of-bounds in __test_and_set_bit\n");
- __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
- pr_info("out-of-bounds in test_and_set_bit_lock\n");
- test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits));
- pr_info("out-of-bounds in test_and_clear_bit\n");
- test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
- pr_info("out-of-bounds in __test_and_clear_bit\n");
- __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
- pr_info("out-of-bounds in test_and_change_bit\n");
- test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
- pr_info("out-of-bounds in __test_and_change_bit\n");
- __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
- pr_info("out-of-bounds in test_bit\n");
- kasan_int_result = test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ kasan_int_result =
+ test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
#if defined(clear_bit_unlock_is_negative_byte)
- pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
- kasan_int_result = clear_bit_unlock_is_negative_byte(BITS_PER_LONG +
- BITS_PER_BYTE, bits);
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ kasan_int_result = clear_bit_unlock_is_negative_byte(
+ BITS_PER_LONG + BITS_PER_BYTE, bits));
#endif
kfree(bits);
}
-static noinline void __init kmalloc_double_kzfree(void)
+static void kmalloc_double_kzfree(struct kunit *test)
{
char *ptr;
size_t size = 16;
- pr_info("double-free (kfree_sensitive)\n");
ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree_sensitive(ptr);
- kfree_sensitive(ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
}
-#ifdef CONFIG_KASAN_VMALLOC
-static noinline void __init vmalloc_oob(void)
+static void vmalloc_oob(struct kunit *test)
{
void *area;
- pr_info("vmalloc out-of-bounds\n");
+ if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+ kunit_info(test, "CONFIG_KASAN_VMALLOC is not enabled.");
+ return;
+ }
/*
* We have to be careful not to hit the guard page.
* The MMU will catch that and crash us.
*/
area = vmalloc(3000);
- if (!area) {
- pr_err("Allocation failed\n");
- return;
- }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
- ((volatile char *)area)[3100];
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
vfree(area);
}
-#else
-static void __init vmalloc_oob(void) {}
-#endif
-static struct kasan_rcu_info {
- int i;
- struct rcu_head rcu;
-} *global_rcu_ptr;
-
-static noinline void __init kasan_rcu_reclaim(struct rcu_head *rp)
-{
- struct kasan_rcu_info *fp = container_of(rp,
- struct kasan_rcu_info, rcu);
-
- kfree(fp);
- fp->i = 1;
-}
-
-static noinline void __init kasan_rcu_uaf(void)
-{
- struct kasan_rcu_info *ptr;
-
- pr_info("use-after-free in kasan_rcu_reclaim\n");
- ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- global_rcu_ptr = rcu_dereference_protected(ptr, NULL);
- call_rcu(&global_rcu_ptr->rcu, kasan_rcu_reclaim);
-}
-
-static int __init kmalloc_tests_init(void)
-{
- /*
- * Temporarily enable multi-shot mode. Otherwise, we'd only get a
- * report for the first case.
- */
- bool multishot = kasan_save_enable_multi_shot();
-
- kmalloc_oob_right();
- kmalloc_oob_left();
- kmalloc_node_oob_right();
-#ifdef CONFIG_SLUB
- kmalloc_pagealloc_oob_right();
- kmalloc_pagealloc_uaf();
- kmalloc_pagealloc_invalid_free();
-#endif
- kmalloc_large_oob_right();
- kmalloc_oob_krealloc_more();
- kmalloc_oob_krealloc_less();
- kmalloc_oob_16();
- kmalloc_oob_in_memset();
- kmalloc_oob_memset_2();
- kmalloc_oob_memset_4();
- kmalloc_oob_memset_8();
- kmalloc_oob_memset_16();
- kmalloc_memmove_invalid_size();
- kmalloc_uaf();
- kmalloc_uaf_memset();
- kmalloc_uaf2();
- kfree_via_page();
- kfree_via_phys();
- kmem_cache_oob();
- memcg_accounted_kmem_cache();
- kasan_stack_oob();
- kasan_global_oob();
- kasan_alloca_oob_left();
- kasan_alloca_oob_right();
- ksize_unpoisons_memory();
- copy_user_test();
- kmem_cache_double_free();
- kmem_cache_invalid_free();
- kasan_memchr();
- kasan_memcmp();
- kasan_strings();
- kasan_bitops();
- kmalloc_double_kzfree();
- vmalloc_oob();
- kasan_rcu_uaf();
-
- kasan_restore_multi_shot(multishot);
-
- return -EAGAIN;
-}
+static struct kunit_case kasan_kunit_test_cases[] = {
+ KUNIT_CASE(kmalloc_oob_right),
+ KUNIT_CASE(kmalloc_oob_left),
+ KUNIT_CASE(kmalloc_node_oob_right),
+ KUNIT_CASE(kmalloc_pagealloc_oob_right),
+ KUNIT_CASE(kmalloc_pagealloc_uaf),
+ KUNIT_CASE(kmalloc_pagealloc_invalid_free),
+ KUNIT_CASE(kmalloc_large_oob_right),
+ KUNIT_CASE(kmalloc_oob_krealloc_more),
+ KUNIT_CASE(kmalloc_oob_krealloc_less),
+ KUNIT_CASE(kmalloc_oob_16),
+ KUNIT_CASE(kmalloc_oob_in_memset),
+ KUNIT_CASE(kmalloc_oob_memset_2),
+ KUNIT_CASE(kmalloc_oob_memset_4),
+ KUNIT_CASE(kmalloc_oob_memset_8),
+ KUNIT_CASE(kmalloc_oob_memset_16),
+ KUNIT_CASE(kmalloc_memmove_invalid_size),
+ KUNIT_CASE(kmalloc_uaf),
+ KUNIT_CASE(kmalloc_uaf_memset),
+ KUNIT_CASE(kmalloc_uaf2),
+ KUNIT_CASE(kfree_via_page),
+ KUNIT_CASE(kfree_via_phys),
+ KUNIT_CASE(kmem_cache_oob),
+ KUNIT_CASE(memcg_accounted_kmem_cache),
+ KUNIT_CASE(kasan_global_oob),
+ KUNIT_CASE(kasan_stack_oob),
+ KUNIT_CASE(kasan_alloca_oob_left),
+ KUNIT_CASE(kasan_alloca_oob_right),
+ KUNIT_CASE(ksize_unpoisons_memory),
+ KUNIT_CASE(kmem_cache_double_free),
+ KUNIT_CASE(kmem_cache_invalid_free),
+ KUNIT_CASE(kasan_memchr),
+ KUNIT_CASE(kasan_memcmp),
+ KUNIT_CASE(kasan_strings),
+ KUNIT_CASE(kasan_bitops),
+ KUNIT_CASE(kmalloc_double_kzfree),
+ KUNIT_CASE(vmalloc_oob),
+ {}
+};
+
+static struct kunit_suite kasan_kunit_test_suite = {
+ .name = "kasan",
+ .init = kasan_test_init,
+ .test_cases = kasan_kunit_test_cases,
+ .exit = kasan_test_exit,
+};
+
+kunit_test_suite(kasan_kunit_test_suite);
-module_init(kmalloc_tests_init);
MODULE_LICENSE("GPL");
diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c
new file mode 100644
index 000000000000..2d68db6ae67b
--- /dev/null
+++ b/lib/test_kasan_module.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ */
+
+#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
+
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "../mm/kasan/kasan.h"
+
+#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
+
+static noinline void __init copy_user_test(void)
+{
+ char *kmem;
+ char __user *usermem;
+ size_t size = 10;
+ int unused;
+
+ kmem = kmalloc(size, GFP_KERNEL);
+ if (!kmem)
+ return;
+
+ usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (IS_ERR(usermem)) {
+ pr_err("Failed to allocate user memory\n");
+ kfree(kmem);
+ return;
+ }
+
+ pr_info("out-of-bounds in copy_from_user()\n");
+ unused = copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
+
+ pr_info("out-of-bounds in copy_to_user()\n");
+ unused = copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
+
+ pr_info("out-of-bounds in __copy_from_user()\n");
+ unused = __copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
+
+ pr_info("out-of-bounds in __copy_to_user()\n");
+ unused = __copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
+
+ pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
+ unused = __copy_from_user_inatomic(kmem, usermem, size + 1 + OOB_TAG_OFF);
+
+ pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
+ unused = __copy_to_user_inatomic(usermem, kmem, size + 1 + OOB_TAG_OFF);
+
+ pr_info("out-of-bounds in strncpy_from_user()\n");
+ unused = strncpy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
+
+ vm_munmap((unsigned long)usermem, PAGE_SIZE);
+ kfree(kmem);
+}
+
+static struct kasan_rcu_info {
+ int i;
+ struct rcu_head rcu;
+} *global_rcu_ptr;
+
+static noinline void __init kasan_rcu_reclaim(struct rcu_head *rp)
+{
+ struct kasan_rcu_info *fp = container_of(rp,
+ struct kasan_rcu_info, rcu);
+
+ kfree(fp);
+ fp->i = 1;
+}
+
+static noinline void __init kasan_rcu_uaf(void)
+{
+ struct kasan_rcu_info *ptr;
+
+ pr_info("use-after-free in kasan_rcu_reclaim\n");
+ ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ global_rcu_ptr = rcu_dereference_protected(ptr, NULL);
+ call_rcu(&global_rcu_ptr->rcu, kasan_rcu_reclaim);
+}
+
+
+static int __init test_kasan_module_init(void)
+{
+ /*
+ * Temporarily enable multi-shot mode. Otherwise, we'd only get a
+ * report for the first case.
+ */
+ bool multishot = kasan_save_enable_multi_shot();
+
+ copy_user_test();
+ kasan_rcu_uaf();
+
+ kasan_restore_multi_shot(multishot);
+ return -EAGAIN;
+}
+
+module_init(test_kasan_module_init);
+MODULE_LICENSE("GPL");
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index 98bc92a91662..3750323973f4 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -16,7 +16,7 @@
*/
/*
- * This module provides an interface to the the proc sysctl interfaces. This
+ * This module provides an interface to the proc sysctl interfaces. This
* driver requires CONFIG_PROC_SYSCTL. It will not normally be loaded by the
* system unless explicitly requested by name. You can also build this driver
* into your kernel.
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index d4f97925dbd8..8294f43f4981 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -289,6 +289,27 @@ static noinline void check_xa_mark_2(struct xarray *xa)
xa_destroy(xa);
}
+static noinline void check_xa_mark_3(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ XA_STATE(xas, xa, 0x41);
+ void *entry;
+ int count = 0;
+
+ xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL);
+ xa_set_mark(xa, 0x41, XA_MARK_0);
+
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) {
+ count++;
+ XA_BUG_ON(xa, entry != xa_mk_index(0x40));
+ }
+ XA_BUG_ON(xa, count != 1);
+ rcu_read_unlock();
+ xa_destroy(xa);
+#endif
+}
+
static noinline void check_xa_mark(struct xarray *xa)
{
unsigned long index;
@@ -297,6 +318,7 @@ static noinline void check_xa_mark(struct xarray *xa)
check_xa_mark_1(xa, index);
check_xa_mark_2(xa);
+ check_xa_mark_3(xa);
}
static noinline void check_xa_shrink(struct xarray *xa)
@@ -393,6 +415,9 @@ static noinline void check_cmpxchg(struct xarray *xa)
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
+ XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
xa_erase_index(xa, 12345678);
xa_erase_index(xa, 5);
XA_BUG_ON(xa, !xa_empty(xa));
@@ -1503,6 +1528,49 @@ static noinline void check_store_range(struct xarray *xa)
}
}
+#ifdef CONFIG_XARRAY_MULTI
+static void check_split_1(struct xarray *xa, unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, index);
+ void *entry;
+ unsigned int i = 0;
+
+ xa_store_order(xa, index, order, xa, GFP_KERNEL);
+
+ xas_split_alloc(&xas, xa, order, GFP_KERNEL);
+ xas_lock(&xas);
+ xas_split(&xas, xa, order);
+ xas_unlock(&xas);
+
+ xa_for_each(xa, index, entry) {
+ XA_BUG_ON(xa, entry != xa);
+ i++;
+ }
+ XA_BUG_ON(xa, i != 1 << order);
+
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+
+ xa_destroy(xa);
+}
+
+static noinline void check_split(struct xarray *xa)
+{
+ unsigned int order;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
+ check_split_1(xa, 0, order);
+ check_split_1(xa, 1UL << order, order);
+ check_split_1(xa, 3UL << order, order);
+ }
+}
+#else
+static void check_split(struct xarray *xa) { }
+#endif
+
static void check_align_1(struct xarray *xa, char *name)
{
int i;
@@ -1575,14 +1643,9 @@ static noinline void shadow_remove(struct xarray *xa)
xa_lock(xa);
while ((node = list_first_entry_or_null(&shadow_nodes,
struct xa_node, private_list))) {
- XA_STATE(xas, node->array, 0);
XA_BUG_ON(xa, node->array != xa);
list_del_init(&node->private_list);
- xas.xa_node = xa_parent_locked(node->array, node);
- xas.xa_offset = node->offset;
- xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
- xas_set_update(&xas, test_update_node);
- xas_store(&xas, NULL);
+ xa_delete_node(node, test_update_node);
}
xa_unlock(xa);
}
@@ -1649,6 +1712,26 @@ static noinline void check_account(struct xarray *xa)
#endif
}
+static noinline void check_get_order(struct xarray *xa)
+{
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned int order;
+ unsigned long i, j;
+
+ for (i = 0; i < 3; i++)
+ XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
+
+ for (order = 0; order < max_order; order++) {
+ for (i = 0; i < 10; i++) {
+ xa_store_order(xa, i << order, order,
+ xa_mk_index(i << order), GFP_KERNEL);
+ for (j = i << order; j < (i + 1) << order; j++)
+ XA_BUG_ON(xa, xa_get_order(xa, j) != order);
+ xa_erase(xa, i << order);
+ }
+ }
+}
+
static noinline void check_destroy(struct xarray *xa)
{
unsigned long index;
@@ -1697,6 +1780,7 @@ static int xarray_checks(void)
check_reserve(&array);
check_reserve(&xa0);
check_multi_store(&array);
+ check_get_order(&array);
check_xa_alloc();
check_find(&array);
check_find_entry(&array);
@@ -1708,6 +1792,7 @@ static int xarray_checks(void)
check_store_range(&array);
check_store_iter(&array);
check_align(&xa0);
+ check_split(&array);
check_workingset(&array, 0);
check_workingset(&array, 64);
diff --git a/lib/usercopy.c b/lib/usercopy.c
index b26509f112f9..7413dd300516 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
+#include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h>
#include <linux/uaccess.h>
@@ -10,7 +11,7 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
{
unsigned long res = n;
might_fault();
- if (likely(access_ok(from, n))) {
+ if (!should_fail_usercopy() && likely(access_ok(from, n))) {
instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n);
}
@@ -25,6 +26,8 @@ EXPORT_SYMBOL(_copy_from_user);
unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
+ if (should_fail_usercopy())
+ return n;
if (likely(access_ok(to, n))) {
instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index afb9521ddf91..14c9a6af1b23 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -940,13 +940,13 @@ char *bdev_name(char *buf, char *end, struct block_device *bdev,
hd = bdev->bd_disk;
buf = string(buf, end, hd->disk_name, spec);
- if (bdev->bd_part->partno) {
+ if (bdev->bd_partno) {
if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) {
if (buf < end)
*buf = 'p';
buf++;
}
- buf = number(buf, end, bdev->bd_part->partno, spec);
+ buf = number(buf, end, bdev->bd_partno, spec);
}
return buf;
}
diff --git a/lib/xarray.c b/lib/xarray.c
index e9e641d3c0c3..5fa51614802a 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -266,13 +266,14 @@ static void xa_node_free(struct xa_node *node)
*/
static void xas_destroy(struct xa_state *xas)
{
- struct xa_node *node = xas->xa_alloc;
+ struct xa_node *next, *node = xas->xa_alloc;
- if (!node)
- return;
- XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
- kmem_cache_free(radix_tree_node_cachep, node);
- xas->xa_alloc = NULL;
+ while (node) {
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+ next = rcu_dereference_raw(node->parent);
+ radix_tree_node_rcu_free(&node->rcu_head);
+ xas->xa_alloc = node = next;
+ }
}
/**
@@ -304,6 +305,7 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp)
xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
if (!xas->xa_alloc)
return false;
+ xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_node = XAS_RESTART;
return true;
@@ -339,6 +341,7 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
}
if (!xas->xa_alloc)
return false;
+ xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_node = XAS_RESTART;
return true;
@@ -403,7 +406,7 @@ static unsigned long xas_size(const struct xa_state *xas)
/*
* Use this to calculate the maximum index that will need to be created
* in order to add the entry described by @xas. Because we cannot store a
- * multiple-index entry at index 0, the calculation is a little more complex
+ * multi-index entry at index 0, the calculation is a little more complex
* than you might expect.
*/
static unsigned long xas_max(struct xa_state *xas)
@@ -703,7 +706,7 @@ void xas_create_range(struct xa_state *xas)
unsigned char shift = xas->xa_shift;
unsigned char sibs = xas->xa_sibs;
- xas->xa_index |= ((sibs + 1) << shift) - 1;
+ xas->xa_index |= ((sibs + 1UL) << shift) - 1;
if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
xas->xa_offset |= sibs;
xas->xa_shift = 0;
@@ -946,6 +949,153 @@ void xas_init_marks(const struct xa_state *xas)
}
EXPORT_SYMBOL_GPL(xas_init_marks);
+#ifdef CONFIG_XARRAY_MULTI
+static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
+{
+ unsigned int marks = 0;
+ xa_mark_t mark = XA_MARK_0;
+
+ for (;;) {
+ if (node_get_mark(node, offset, mark))
+ marks |= 1 << (__force unsigned int)mark;
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+
+ return marks;
+}
+
+static void node_set_marks(struct xa_node *node, unsigned int offset,
+ struct xa_node *child, unsigned int marks)
+{
+ xa_mark_t mark = XA_MARK_0;
+
+ for (;;) {
+ if (marks & (1 << (__force unsigned int)mark)) {
+ node_set_mark(node, offset, mark);
+ if (child)
+ node_mark_all(child, mark);
+ }
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+}
+
+/**
+ * xas_split_alloc() - Allocate memory for splitting an entry.
+ * @xas: XArray operation state.
+ * @entry: New entry which will be stored in the array.
+ * @order: New entry order.
+ * @gfp: Memory allocation flags.
+ *
+ * This function should be called before calling xas_split().
+ * If necessary, it will allocate new nodes (and fill them with @entry)
+ * to prepare for the upcoming split of an entry of @order size into
+ * entries of the order stored in the @xas.
+ *
+ * Context: May sleep if @gfp flags permit.
+ */
+void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
+ gfp_t gfp)
+{
+ unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
+ unsigned int mask = xas->xa_sibs;
+
+ /* XXX: no support for splitting really large entries yet */
+ if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
+ goto nomem;
+ if (xas->xa_shift + XA_CHUNK_SHIFT > order)
+ return;
+
+ do {
+ unsigned int i;
+ void *sibling;
+ struct xa_node *node;
+
+ node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ if (!node)
+ goto nomem;
+ node->array = xas->xa;
+ for (i = 0; i < XA_CHUNK_SIZE; i++) {
+ if ((i & mask) == 0) {
+ RCU_INIT_POINTER(node->slots[i], entry);
+ sibling = xa_mk_sibling(0);
+ } else {
+ RCU_INIT_POINTER(node->slots[i], sibling);
+ }
+ }
+ RCU_INIT_POINTER(node->parent, xas->xa_alloc);
+ xas->xa_alloc = node;
+ } while (sibs-- > 0);
+
+ return;
+nomem:
+ xas_destroy(xas);
+ xas_set_err(xas, -ENOMEM);
+}
+EXPORT_SYMBOL_GPL(xas_split_alloc);
+
+/**
+ * xas_split() - Split a multi-index entry into smaller entries.
+ * @xas: XArray operation state.
+ * @entry: New entry to store in the array.
+ * @order: New entry order.
+ *
+ * The value in the entry is copied to all the replacement entries.
+ *
+ * Context: Any context. The caller should hold the xa_lock.
+ */
+void xas_split(struct xa_state *xas, void *entry, unsigned int order)
+{
+ unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
+ unsigned int offset, marks;
+ struct xa_node *node;
+ void *curr = xas_load(xas);
+ int values = 0;
+
+ node = xas->xa_node;
+ if (xas_top(node))
+ return;
+
+ marks = node_get_marks(node, xas->xa_offset);
+
+ offset = xas->xa_offset + sibs;
+ do {
+ if (xas->xa_shift < node->shift) {
+ struct xa_node *child = xas->xa_alloc;
+
+ xas->xa_alloc = rcu_dereference_raw(child->parent);
+ child->shift = node->shift - XA_CHUNK_SHIFT;
+ child->offset = offset;
+ child->count = XA_CHUNK_SIZE;
+ child->nr_values = xa_is_value(entry) ?
+ XA_CHUNK_SIZE : 0;
+ RCU_INIT_POINTER(child->parent, node);
+ node_set_marks(node, offset, child, marks);
+ rcu_assign_pointer(node->slots[offset],
+ xa_mk_node(child));
+ if (xa_is_value(curr))
+ values--;
+ } else {
+ unsigned int canon = offset - xas->xa_sibs;
+
+ node_set_marks(node, canon, NULL, marks);
+ rcu_assign_pointer(node->slots[canon], entry);
+ while (offset > canon)
+ rcu_assign_pointer(node->slots[offset--],
+ xa_mk_sibling(canon));
+ values += (xa_is_value(entry) - xa_is_value(curr)) *
+ (xas->xa_sibs + 1);
+ }
+ } while (offset-- > xas->xa_offset);
+
+ node->nr_values += values;
+}
+EXPORT_SYMBOL_GPL(xas_split);
+#endif
+
/**
* xas_pause() - Pause a walk to drop a lock.
* @xas: XArray operation state.
@@ -1407,7 +1557,7 @@ EXPORT_SYMBOL(__xa_store);
* @gfp: Memory allocation flags.
*
* After this function returns, loads from this index will return @entry.
- * Storing into an existing multislot entry updates the entry of every index.
+ * Storing into an existing multi-index entry updates the entry of every index.
* The marks associated with @index are unaffected unless @entry is %NULL.
*
* Context: Any context. Takes and releases the xa_lock.
@@ -1549,7 +1699,7 @@ static void xas_set_range(struct xa_state *xas, unsigned long first,
*
* After this function returns, loads from any index between @first and @last,
* inclusive will return @entry.
- * Storing into an existing multislot entry updates the entry of every index.
+ * Storing into an existing multi-index entry updates the entry of every index.
* The marks associated with @index are unaffected unless @entry is %NULL.
*
* Context: Process context. Takes and releases the xa_lock. May sleep
@@ -1592,6 +1742,46 @@ unlock:
return xas_result(&xas, NULL);
}
EXPORT_SYMBOL(xa_store_range);
+
+/**
+ * xa_get_order() - Get the order of an entry.
+ * @xa: XArray.
+ * @index: Index of the entry.
+ *
+ * Return: A number between 0 and 63 indicating the order of the entry.
+ */
+int xa_get_order(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ void *entry;
+ int order = 0;
+
+ rcu_read_lock();
+ entry = xas_load(&xas);
+
+ if (!entry)
+ goto unlock;
+
+ if (!xas.xa_node)
+ goto unlock;
+
+ for (;;) {
+ unsigned int slot = xas.xa_offset + (1 << order);
+
+ if (slot >= XA_CHUNK_SIZE)
+ break;
+ if (!xa_is_sibling(xas.xa_node->slots[slot]))
+ break;
+ order++;
+ }
+
+ order += xas.xa_node->shift;
+unlock:
+ rcu_read_unlock();
+
+ return order;
+}
+EXPORT_SYMBOL(xa_get_order);
#endif /* CONFIG_XARRAY_MULTI */
/**
@@ -1974,6 +2164,29 @@ unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
EXPORT_SYMBOL(xa_extract);
/**
+ * xa_delete_node() - Private interface for workingset code.
+ * @node: Node to be removed from the tree.
+ * @update: Function to call to update ancestor nodes.
+ *
+ * Context: xa_lock must be held on entry and will not be released.
+ */
+void xa_delete_node(struct xa_node *node, xa_update_node_t update)
+{
+ struct xa_state xas = {
+ .xa = node->array,
+ .xa_index = (unsigned long)node->offset <<
+ (node->shift + XA_CHUNK_SHIFT),
+ .xa_shift = node->shift + XA_CHUNK_SHIFT,
+ .xa_offset = node->offset,
+ .xa_node = xa_parent_locked(node->array, node),
+ .xa_update = update,
+ };
+
+ xas_store(&xas, NULL);
+}
+EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */
+
+/**
* xa_destroy() - Free all internal data structures.
* @xa: XArray.
*