diff options
author | Michal Marek <mmarek@suse.cz> | 2010-08-04 13:59:13 +0200 |
---|---|---|
committer | Michal Marek <mmarek@suse.cz> | 2010-08-04 13:59:13 +0200 |
commit | 772320e84588dcbe1600ffb83e5f328f2209ac2a (patch) | |
tree | a7de21b79340aeaa17c58126f6b801b82c77b53a /lib | |
parent | 1ce53adf13a54375d2a5c7cdbe341b2558389615 (diff) | |
parent | 9fe6206f400646a2322096b56c59891d530e8d51 (diff) | |
download | linux-772320e84588dcbe1600ffb83e5f328f2209ac2a.tar.gz linux-772320e84588dcbe1600ffb83e5f328f2209ac2a.tar.bz2 linux-772320e84588dcbe1600ffb83e5f328f2209ac2a.zip |
Merge commit 'v2.6.35' into kbuild/kbuild
Conflicts:
arch/powerpc/Makefile
Diffstat (limited to 'lib')
46 files changed, 2160 insertions, 983 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 97b136ff117e..5b916bc0fbae 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -160,6 +160,9 @@ config TEXTSEARCH_BM config TEXTSEARCH_FSM tristate +config BTREE + boolean + config HAS_IOMEM boolean depends on !NO_IOMEM @@ -178,9 +181,6 @@ config HAS_DMA config CHECK_SIGNATURE bool -config HAVE_LMB - boolean - config CPUMASK_OFFSTACK bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS help diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2af5d84ec824..083b23d211d6 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -103,7 +103,8 @@ config HEADERS_CHECK config DEBUG_SECTION_MISMATCH bool "Enable full Section mismatch analysis" - depends on UNDEFINED + depends on UNDEFINED || (BLACKFIN) + default y # This option is on purpose disabled for now. # It will be enabled when we are down to a reasonable number # of section mismatch warnings (< 10 for an allyesconfig build) @@ -355,7 +356,7 @@ config SLUB_STATS config DEBUG_KMEMLEAK bool "Kernel memory leak detector" depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ - (X86 || ARM || PPC || S390) + (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE) select DEBUG_FS if SYSFS select STACKTRACE if STACKTRACE_SUPPORT @@ -499,6 +500,30 @@ config PROVE_LOCKING For more details, see Documentation/lockdep-design.txt. +config PROVE_RCU + bool "RCU debugging: prove RCU correctness" + depends on PROVE_LOCKING + default n + help + This feature enables lockdep extensions that check for correct + use of RCU APIs. This is currently under development. Say Y + if you want to debug RCU usage or help work on the PROVE_RCU + feature. + + Say N if you are unsure. + +config PROVE_RCU_REPEATEDLY + bool "RCU debugging: don't disable PROVE_RCU on first splat" + depends on PROVE_RCU + default n + help + By itself, PROVE_RCU will disable checking upon issuing the + first warning (or "splat"). This feature prevents such + disabling, allowing multiple RCU-lockdep warnings to be printed + on a single reboot. + + Say N if you are unsure. + config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT @@ -520,6 +545,14 @@ config LOCK_STAT For more details, see Documentation/lockstat.txt + This also enables lock events required by "perf lock", + subcommand of perf. + If you want to use "perf lock", you also need to turn on + CONFIG_EVENT_TRACING. + + CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. + (CONFIG_LOCKDEP defines "acquire" and "release" events.) + config DEBUG_LOCKDEP bool "Lock dependency engine debugging" depends on DEBUG_KERNEL && LOCKDEP @@ -778,10 +811,22 @@ config RCU_CPU_STALL_DETECTOR CPUs are delaying the current grace period, but only when the grace period extends for excessive time periods. - Say Y if you want RCU to perform such checks. + Say N if you want to disable such checks. + + Say Y if you are unsure. + +config RCU_CPU_STALL_VERBOSE + bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" + depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU + default y + help + This option causes RCU to printk detailed per-task information + for any tasks that are stalling the current RCU grace period. Say N if you are unsure. + Say Y if you want to enable such checks. + config KPROBES_SANITY_TEST bool "Kprobes sanity tests" depends on DEBUG_KERNEL @@ -853,8 +898,7 @@ config DEBUG_FORCE_WEAK_PER_CPU config LKDTM tristate "Linux Kernel Dump Test Tool Module" - depends on DEBUG_KERNEL - depends on KPROBES + depends on DEBUG_FS depends on BLOCK default n help @@ -865,7 +909,19 @@ config LKDTM called lkdtm. Documentation on how to use the module can be found in - drivers/misc/lkdtm.c + Documentation/fault-injection/provoke-crashes.txt + +config CPU_NOTIFIER_ERROR_INJECT + tristate "CPU notifier error injection module" + depends on HOTPLUG_CPU && DEBUG_KERNEL + help + This option provides a kernel module that can be used to test + the error handling of the cpu notifiers + + To compile this code as a module, choose M here: the module will + be called cpu-notifier-error-inject. + + If unsure, say N. config FAULT_INJECTION bool "Fault-injection framework" @@ -1008,10 +1064,10 @@ config DYNAMIC_DEBUG Usage: - Dynamic debugging is controlled via the 'dynamic_debug/ddebug' file, + Dynamic debugging is controlled via the 'dynamic_debug/control' file, which is contained in the 'debugfs' filesystem. Thus, the debugfs filesystem must first be mounted before making use of this feature. - We refer the control file as: <debugfs>/dynamic_debug/ddebug. This + We refer the control file as: <debugfs>/dynamic_debug/control. This file contains a list of the debug statements that can be enabled. The format for each line of the file is: @@ -1026,7 +1082,7 @@ config DYNAMIC_DEBUG From a live system: - nullarbor:~ # cat <debugfs>/dynamic_debug/ddebug + nullarbor:~ # cat <debugfs>/dynamic_debug/control # filename:lineno [module]function flags format fs/aio.c:222 [aio]__put_ioctx - "__put_ioctx:\040freeing\040%p\012" fs/aio.c:248 [aio]ioctx_alloc - "ENOMEM:\040nr_events\040too\040high\012" @@ -1036,23 +1092,23 @@ config DYNAMIC_DEBUG // enable the message at line 1603 of file svcsock.c nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' > - <debugfs>/dynamic_debug/ddebug + <debugfs>/dynamic_debug/control // enable all the messages in file svcsock.c nullarbor:~ # echo -n 'file svcsock.c +p' > - <debugfs>/dynamic_debug/ddebug + <debugfs>/dynamic_debug/control // enable all the messages in the NFS server module nullarbor:~ # echo -n 'module nfsd +p' > - <debugfs>/dynamic_debug/ddebug + <debugfs>/dynamic_debug/control // enable all 12 messages in the function svc_process() nullarbor:~ # echo -n 'func svc_process +p' > - <debugfs>/dynamic_debug/ddebug + <debugfs>/dynamic_debug/control // disable all 12 messages in the function svc_process() nullarbor:~ # echo -n 'func svc_process -p' > - <debugfs>/dynamic_debug/ddebug + <debugfs>/dynamic_debug/control See Documentation/dynamic-debug-howto.txt for additional information. @@ -1067,6 +1123,13 @@ config DMA_API_DEBUG This option causes a performance degredation. Use only if you want to debug device drivers. If unsure, say N. +config ATOMIC64_SELFTEST + bool "Perform an atomic64_t self-test at boot" + help + Enable this option to test the atomic64_t functions at boot. + + If unsure, say N. + source "samples/Kconfig" source "lib/Kconfig.kgdb" diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 9b5d1d7f2ef7..43cb93fa2651 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -3,7 +3,7 @@ config HAVE_ARCH_KGDB bool menuconfig KGDB - bool "KGDB: kernel debugging with remote gdb" + bool "KGDB: kernel debugger" depends on HAVE_ARCH_KGDB depends on DEBUG_KERNEL && EXPERIMENTAL help @@ -57,4 +57,26 @@ config KGDB_TESTS_BOOT_STRING information about other strings you could use beyond the default of V1F100. +config KGDB_LOW_LEVEL_TRAP + bool "KGDB: Allow debugging with traps in notifiers" + depends on X86 || MIPS + default n + help + This will add an extra call back to kgdb for the breakpoint + exception handler on which will will allow kgdb to step + through a notify handler. + +config KGDB_KDB + bool "KGDB_KDB: include kdb frontend for kgdb" + default n + help + KDB frontend for kernel + +config KDB_KEYBOARD + bool "KGDB_KDB: keyboard as input device" + depends on VT && KGDB_KDB + default n + help + KDB can use a PS/2 type keyboard for an input device + endif # KGDB diff --git a/lib/Makefile b/lib/Makefile index 3b0b4a696db9..0bfabba1bb32 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ - string_helpers.o gcd.o list_sort.o + string_helpers.o gcd.o lcm.o list_sort.o uuid.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG @@ -39,8 +39,12 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o + +CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o + obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o +obj-$(CONFIG_BTREE) += btree.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o obj-$(CONFIG_DEBUG_LIST) += list_debug.o obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o @@ -81,11 +85,10 @@ obj-$(CONFIG_AUDIT_GENERIC) += audit.o obj-$(CONFIG_SWIOTLB) += swiotlb.o obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o +obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o lib-$(CONFIG_GENERIC_BUG) += bug.o -obj-$(CONFIG_HAVE_LMB) += lmb.o - obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o @@ -100,6 +103,8 @@ obj-$(CONFIG_GENERIC_CSUM) += checksum.o obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o +obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/atomic64.c b/lib/atomic64.c index 8bee16ec7524..a21c12bc727c 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -162,12 +162,12 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u) { unsigned long flags; spinlock_t *lock = lock_addr(v); - int ret = 1; + int ret = 0; spin_lock_irqsave(lock, flags); if (v->counter != u) { v->counter += a; - ret = 0; + ret = 1; } spin_unlock_irqrestore(lock, flags); return ret; diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c new file mode 100644 index 000000000000..250ed11d3ed2 --- /dev/null +++ b/lib/atomic64_test.c @@ -0,0 +1,166 @@ +/* + * Testsuite for atomic64_t functions + * + * Copyright © 2010 Luca Barbieri + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/atomic.h> + +#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) +static __init int test_atomic64(void) +{ + long long v0 = 0xaaa31337c001d00dLL; + long long v1 = 0xdeadbeefdeafcafeLL; + long long v2 = 0xfaceabadf00df001LL; + long long onestwos = 0x1111111122222222LL; + long long one = 1LL; + + atomic64_t v = ATOMIC64_INIT(v0); + long long r = v0; + BUG_ON(v.counter != r); + + atomic64_set(&v, v1); + r = v1; + BUG_ON(v.counter != r); + BUG_ON(atomic64_read(&v) != r); + + INIT(v0); + atomic64_add(onestwos, &v); + r += onestwos; + BUG_ON(v.counter != r); + + INIT(v0); + atomic64_add(-one, &v); + r += -one; + BUG_ON(v.counter != r); + + INIT(v0); + r += onestwos; + BUG_ON(atomic64_add_return(onestwos, &v) != r); + BUG_ON(v.counter != r); + + INIT(v0); + r += -one; + BUG_ON(atomic64_add_return(-one, &v) != r); + BUG_ON(v.counter != r); + + INIT(v0); + atomic64_sub(onestwos, &v); + r -= onestwos; + BUG_ON(v.counter != r); + + INIT(v0); + atomic64_sub(-one, &v); + r -= -one; + BUG_ON(v.counter != r); + + INIT(v0); + r -= onestwos; + BUG_ON(atomic64_sub_return(onestwos, &v) != r); + BUG_ON(v.counter != r); + + INIT(v0); + r -= -one; + BUG_ON(atomic64_sub_return(-one, &v) != r); + BUG_ON(v.counter != r); + + INIT(v0); + atomic64_inc(&v); + r += one; + BUG_ON(v.counter != r); + + INIT(v0); + r += one; + BUG_ON(atomic64_inc_return(&v) != r); + BUG_ON(v.counter != r); + + INIT(v0); + atomic64_dec(&v); + r -= one; + BUG_ON(v.counter != r); + + INIT(v0); + r -= one; + BUG_ON(atomic64_dec_return(&v) != r); + BUG_ON(v.counter != r); + + INIT(v0); + BUG_ON(atomic64_xchg(&v, v1) != v0); + r = v1; + BUG_ON(v.counter != r); + + INIT(v0); + BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0); + r = v1; + BUG_ON(v.counter != r); + + INIT(v0); + BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0); + BUG_ON(v.counter != r); + + INIT(v0); + BUG_ON(atomic64_add_unless(&v, one, v0)); + BUG_ON(v.counter != r); + + INIT(v0); + BUG_ON(!atomic64_add_unless(&v, one, v1)); + r += one; + BUG_ON(v.counter != r); + +#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \ + defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) + INIT(onestwos); + BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); + r -= one; + BUG_ON(v.counter != r); + + INIT(0); + BUG_ON(atomic64_dec_if_positive(&v) != -one); + BUG_ON(v.counter != r); + + INIT(-one); + BUG_ON(atomic64_dec_if_positive(&v) != (-one - one)); + BUG_ON(v.counter != r); +#else +#warning Please implement atomic64_dec_if_positive for your architecture, and add it to the IF above +#endif + + INIT(onestwos); + BUG_ON(!atomic64_inc_not_zero(&v)); + r += one; + BUG_ON(v.counter != r); + + INIT(0); + BUG_ON(atomic64_inc_not_zero(&v)); + BUG_ON(v.counter != r); + + INIT(-one); + BUG_ON(!atomic64_inc_not_zero(&v)); + r += one; + BUG_ON(v.counter != r); + +#ifdef CONFIG_X86 + printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n", +#ifdef CONFIG_X86_64 + "x86-64", +#elif defined(CONFIG_X86_CMPXCHG64) + "i586+", +#else + "i386+", +#endif + boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without", + boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without"); +#else + printk(KERN_INFO "atomic64 test passed\n"); +#endif + + return 0; +} + +core_initcall(test_atomic64); diff --git a/lib/bitmap.c b/lib/bitmap.c index 11bf49750583..ffb78c916ccd 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -487,7 +487,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, EXPORT_SYMBOL(__bitmap_parse); /** - * bitmap_parse_user() + * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap * * @ubuf: pointer to user buffer containing string. * @ulen: buffer size in bytes. If string is smaller than this @@ -619,7 +619,7 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) EXPORT_SYMBOL(bitmap_parselist); /** - * bitmap_pos_to_ord(buf, pos, bits) + * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap * @buf: pointer to a bitmap * @pos: a bit position in @buf (0 <= @pos < @bits) * @bits: number of valid bit positions in @buf @@ -655,7 +655,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) } /** - * bitmap_ord_to_pos(buf, ord, bits) + * bitmap_ord_to_pos - find position of n-th set bit in bitmap * @buf: pointer to bitmap * @ord: ordinal bit position (n-th set bit, n >= 0) * @bits: number of valid bit positions in @buf @@ -733,10 +733,9 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src, bitmap_zero(dst, bits); w = bitmap_weight(new, bits); - for (oldbit = find_first_bit(src, bits); - oldbit < bits; - oldbit = find_next_bit(src, bits, oldbit + 1)) { + for_each_set_bit(oldbit, src, bits) { int n = bitmap_pos_to_ord(old, oldbit, bits); + if (n < 0 || w == 0) set_bit(oldbit, dst); /* identity map */ else @@ -903,9 +902,7 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig, */ m = 0; - for (n = find_first_bit(relmap, bits); - n < bits; - n = find_next_bit(relmap, bits, n + 1)) { + for_each_set_bit(n, relmap, bits) { /* m == bitmap_pos_to_ord(relmap, n, bits) */ if (test_bit(m, orig)) set_bit(n, dst); @@ -934,9 +931,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig, return; bitmap_zero(dst, bits); - for (oldbit = find_first_bit(orig, bits); - oldbit < bits; - oldbit = find_next_bit(orig, bits, oldbit + 1)) + for_each_set_bit(oldbit, orig, bits) set_bit(oldbit % sz, dst); } EXPORT_SYMBOL(bitmap_fold); diff --git a/lib/btree.c b/lib/btree.c new file mode 100644 index 000000000000..c9c6f0351526 --- /dev/null +++ b/lib/btree.c @@ -0,0 +1,798 @@ +/* + * lib/btree.c - Simple In-memory B+Tree + * + * As should be obvious for Linux kernel code, license is GPLv2 + * + * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> + * Bits and pieces stolen from Peter Zijlstra's code, which is + * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com> + * GPLv2 + * + * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch + * + * A relatively simple B+Tree implementation. I have written it as a learning + * excercise to understand how B+Trees work. Turned out to be useful as well. + * + * B+Trees can be used similar to Linux radix trees (which don't have anything + * in common with textbook radix trees, beware). Prerequisite for them working + * well is that access to a random tree node is much faster than a large number + * of operations within each node. + * + * Disks have fulfilled the prerequisite for a long time. More recently DRAM + * has gained similar properties, as memory access times, when measured in cpu + * cycles, have increased. Cacheline sizes have increased as well, which also + * helps B+Trees. + * + * Compared to radix trees, B+Trees are more efficient when dealing with a + * sparsely populated address space. Between 25% and 50% of the memory is + * occupied with valid pointers. When densely populated, radix trees contain + * ~98% pointers - hard to beat. Very sparse radix trees contain only ~2% + * pointers. + * + * This particular implementation stores pointers identified by a long value. + * Storing NULL pointers is illegal, lookup will return NULL when no entry + * was found. + * + * A tricks was used that is not commonly found in textbooks. The lowest + * values are to the right, not to the left. All used slots within a node + * are on the left, all unused slots contain NUL values. Most operations + * simply loop once over all slots and terminate on the first NUL. + */ + +#include <linux/btree.h> +#include <linux/cache.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/module.h> + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#define NODESIZE MAX(L1_CACHE_BYTES, 128) + +struct btree_geo { + int keylen; + int no_pairs; + int no_longs; +}; + +struct btree_geo btree_geo32 = { + .keylen = 1, + .no_pairs = NODESIZE / sizeof(long) / 2, + .no_longs = NODESIZE / sizeof(long) / 2, +}; +EXPORT_SYMBOL_GPL(btree_geo32); + +#define LONG_PER_U64 (64 / BITS_PER_LONG) +struct btree_geo btree_geo64 = { + .keylen = LONG_PER_U64, + .no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64), + .no_longs = LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + LONG_PER_U64)), +}; +EXPORT_SYMBOL_GPL(btree_geo64); + +struct btree_geo btree_geo128 = { + .keylen = 2 * LONG_PER_U64, + .no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64), + .no_longs = 2 * LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64)), +}; +EXPORT_SYMBOL_GPL(btree_geo128); + +static struct kmem_cache *btree_cachep; + +void *btree_alloc(gfp_t gfp_mask, void *pool_data) +{ + return kmem_cache_alloc(btree_cachep, gfp_mask); +} +EXPORT_SYMBOL_GPL(btree_alloc); + +void btree_free(void *element, void *pool_data) +{ + kmem_cache_free(btree_cachep, element); +} +EXPORT_SYMBOL_GPL(btree_free); + +static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp) +{ + unsigned long *node; + + node = mempool_alloc(head->mempool, gfp); + if (likely(node)) + memset(node, 0, NODESIZE); + return node; +} + +static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n) +{ + size_t i; + + for (i = 0; i < n; i++) { + if (l1[i] < l2[i]) + return -1; + if (l1[i] > l2[i]) + return 1; + } + return 0; +} + +static unsigned long *longcpy(unsigned long *dest, const unsigned long *src, + size_t n) +{ + size_t i; + + for (i = 0; i < n; i++) + dest[i] = src[i]; + return dest; +} + +static unsigned long *longset(unsigned long *s, unsigned long c, size_t n) +{ + size_t i; + + for (i = 0; i < n; i++) + s[i] = c; + return s; +} + +static void dec_key(struct btree_geo *geo, unsigned long *key) +{ + unsigned long val; + int i; + + for (i = geo->keylen - 1; i >= 0; i--) { + val = key[i]; + key[i] = val - 1; + if (val) + break; + } +} + +static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n) +{ + return &node[n * geo->keylen]; +} + +static void *bval(struct btree_geo *geo, unsigned long *node, int n) +{ + return (void *)node[geo->no_longs + n]; +} + +static void setkey(struct btree_geo *geo, unsigned long *node, int n, + unsigned long *key) +{ + longcpy(bkey(geo, node, n), key, geo->keylen); +} + +static void setval(struct btree_geo *geo, unsigned long *node, int n, + void *val) +{ + node[geo->no_longs + n] = (unsigned long) val; +} + +static void clearpair(struct btree_geo *geo, unsigned long *node, int n) +{ + longset(bkey(geo, node, n), 0, geo->keylen); + node[geo->no_longs + n] = 0; +} + +static inline void __btree_init(struct btree_head *head) +{ + head->node = NULL; + head->height = 0; +} + +void btree_init_mempool(struct btree_head *head, mempool_t *mempool) +{ + __btree_init(head); + head->mempool = mempool; +} +EXPORT_SYMBOL_GPL(btree_init_mempool); + +int btree_init(struct btree_head *head) +{ + __btree_init(head); + head->mempool = mempool_create(0, btree_alloc, btree_free, NULL); + if (!head->mempool) + return -ENOMEM; + return 0; +} +EXPORT_SYMBOL_GPL(btree_init); + +void btree_destroy(struct btree_head *head) +{ + mempool_destroy(head->mempool); + head->mempool = NULL; +} +EXPORT_SYMBOL_GPL(btree_destroy); + +void *btree_last(struct btree_head *head, struct btree_geo *geo, + unsigned long *key) +{ + int height = head->height; + unsigned long *node = head->node; + + if (height == 0) + return NULL; + + for ( ; height > 1; height--) + node = bval(geo, node, 0); + + longcpy(key, bkey(geo, node, 0), geo->keylen); + return bval(geo, node, 0); +} +EXPORT_SYMBOL_GPL(btree_last); + +static int keycmp(struct btree_geo *geo, unsigned long *node, int pos, + unsigned long *key) +{ + return longcmp(bkey(geo, node, pos), key, geo->keylen); +} + +static int keyzero(struct btree_geo *geo, unsigned long *key) +{ + int i; + + for (i = 0; i < geo->keylen; i++) + if (key[i]) + return 0; + + return 1; +} + +void *btree_lookup(struct btree_head *head, struct btree_geo *geo, + unsigned long *key) +{ + int i, height = head->height; + unsigned long *node = head->node; + + if (height == 0) + return NULL; + + for ( ; height > 1; height--) { + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) <= 0) + break; + if (i == geo->no_pairs) + return NULL; + node = bval(geo, node, i); + if (!node) + return NULL; + } + + if (!node) + return NULL; + + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) == 0) + return bval(geo, node, i); + return NULL; +} +EXPORT_SYMBOL_GPL(btree_lookup); + +int btree_update(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val) +{ + int i, height = head->height; + unsigned long *node = head->node; + + if (height == 0) + return -ENOENT; + + for ( ; height > 1; height--) { + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) <= 0) + break; + if (i == geo->no_pairs) + return -ENOENT; + node = bval(geo, node, i); + if (!node) + return -ENOENT; + } + + if (!node) + return -ENOENT; + + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) == 0) { + setval(geo, node, i, val); + return 0; + } + return -ENOENT; +} +EXPORT_SYMBOL_GPL(btree_update); + +/* + * Usually this function is quite similar to normal lookup. But the key of + * a parent node may be smaller than the smallest key of all its siblings. + * In such a case we cannot just return NULL, as we have only proven that no + * key smaller than __key, but larger than this parent key exists. + * So we set __key to the parent key and retry. We have to use the smallest + * such parent key, which is the last parent key we encountered. + */ +void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, + unsigned long *__key) +{ + int i, height; + unsigned long *node, *oldnode; + unsigned long *retry_key = NULL, key[geo->keylen]; + + if (keyzero(geo, __key)) + return NULL; + + if (head->height == 0) + return NULL; +retry: + longcpy(key, __key, geo->keylen); + dec_key(geo, key); + + node = head->node; + for (height = head->height ; height > 1; height--) { + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) <= 0) + break; + if (i == geo->no_pairs) + goto miss; + oldnode = node; + node = bval(geo, node, i); + if (!node) + goto miss; + retry_key = bkey(geo, oldnode, i); + } + + if (!node) + goto miss; + + for (i = 0; i < geo->no_pairs; i++) { + if (keycmp(geo, node, i, key) <= 0) { + if (bval(geo, node, i)) { + longcpy(__key, bkey(geo, node, i), geo->keylen); + return bval(geo, node, i); + } else + goto miss; + } + } +miss: + if (retry_key) { + __key = retry_key; + retry_key = NULL; + goto retry; + } + return NULL; +} + +static int getpos(struct btree_geo *geo, unsigned long *node, + unsigned long *key) +{ + int i; + + for (i = 0; i < geo->no_pairs; i++) { + if (keycmp(geo, node, i, key) <= 0) + break; + } + return i; +} + +static int getfill(struct btree_geo *geo, unsigned long *node, int start) +{ + int i; + + for (i = start; i < geo->no_pairs; i++) + if (!bval(geo, node, i)) + break; + return i; +} + +/* + * locate the correct leaf node in the btree + */ +static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, int level) +{ + unsigned long *node = head->node; + int i, height; + + for (height = head->height; height > level; height--) { + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) <= 0) + break; + + if ((i == geo->no_pairs) || !bval(geo, node, i)) { + /* right-most key is too large, update it */ + /* FIXME: If the right-most key on higher levels is + * always zero, this wouldn't be necessary. */ + i--; + setkey(geo, node, i, key); + } + BUG_ON(i < 0); + node = bval(geo, node, i); + } + BUG_ON(!node); + return node; +} + +static int btree_grow(struct btree_head *head, struct btree_geo *geo, + gfp_t gfp) +{ + unsigned long *node; + int fill; + + node = btree_node_alloc(head, gfp); + if (!node) + return -ENOMEM; + if (head->node) { + fill = getfill(geo, head->node, 0); + setkey(geo, node, 0, bkey(geo, head->node, fill - 1)); + setval(geo, node, 0, head->node); + } + head->node = node; + head->height++; + return 0; +} + +static void btree_shrink(struct btree_head *head, struct btree_geo *geo) +{ + unsigned long *node; + int fill; + + if (head->height <= 1) + return; + + node = head->node; + fill = getfill(geo, node, 0); + BUG_ON(fill > 1); + head->node = bval(geo, node, 0); + head->height--; + mempool_free(node, head->mempool); +} + +static int btree_insert_level(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val, int level, + gfp_t gfp) +{ + unsigned long *node; + int i, pos, fill, err; + + BUG_ON(!val); + if (head->height < level) { + err = btree_grow(head, geo, gfp); + if (err) + return err; + } + +retry: + node = find_level(head, geo, key, level); + pos = getpos(geo, node, key); + fill = getfill(geo, node, pos); + /* two identical keys are not allowed */ + BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0); + + if (fill == geo->no_pairs) { + /* need to split node */ + unsigned long *new; + + new = btree_node_alloc(head, gfp); + if (!new) + return -ENOMEM; + err = btree_insert_level(head, geo, + bkey(geo, node, fill / 2 - 1), + new, level + 1, gfp); + if (err) { + mempool_free(new, head->mempool); + return err; + } + for (i = 0; i < fill / 2; i++) { + setkey(geo, new, i, bkey(geo, node, i)); + setval(geo, new, i, bval(geo, node, i)); + setkey(geo, node, i, bkey(geo, node, i + fill / 2)); + setval(geo, node, i, bval(geo, node, i + fill / 2)); + clearpair(geo, node, i + fill / 2); + } + if (fill & 1) { + setkey(geo, node, i, bkey(geo, node, fill - 1)); + setval(geo, node, i, bval(geo, node, fill - 1)); + clearpair(geo, node, fill - 1); + } + goto retry; + } + BUG_ON(fill >= geo->no_pairs); + + /* shift and insert */ + for (i = fill; i > pos; i--) { + setkey(geo, node, i, bkey(geo, node, i - 1)); + setval(geo, node, i, bval(geo, node, i - 1)); + } + setkey(geo, node, pos, key); + setval(geo, node, pos, val); + + return 0; +} + +int btree_insert(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val, gfp_t gfp) +{ + return btree_insert_level(head, geo, key, val, 1, gfp); +} +EXPORT_SYMBOL_GPL(btree_insert); + +static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, int level); +static void merge(struct btree_head *head, struct btree_geo *geo, int level, + unsigned long *left, int lfill, + unsigned long *right, int rfill, + unsigned long *parent, int lpos) +{ + int i; + + for (i = 0; i < rfill; i++) { + /* Move all keys to the left */ + setkey(geo, left, lfill + i, bkey(geo, right, i)); + setval(geo, left, lfill + i, bval(geo, right, i)); + } + /* Exchange left and right child in parent */ + setval(geo, parent, lpos, right); + setval(geo, parent, lpos + 1, left); + /* Remove left (formerly right) child from parent */ + btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1); + mempool_free(right, head->mempool); +} + +static void rebalance(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, int level, unsigned long *child, int fill) +{ + unsigned long *parent, *left = NULL, *right = NULL; + int i, no_left, no_right; + + if (fill == 0) { + /* Because we don't steal entries from a neigbour, this case + * can happen. Parent node contains a single child, this + * node, so merging with a sibling never happens. + */ + btree_remove_level(head, geo, key, level + 1); + mempool_free(child, head->mempool); + return; + } + + parent = find_level(head, geo, key, level + 1); + i = getpos(geo, parent, key); + BUG_ON(bval(geo, parent, i) != child); + + if (i > 0) { + left = bval(geo, parent, i - 1); + no_left = getfill(geo, left, 0); + if (fill + no_left <= geo->no_pairs) { + merge(head, geo, level, + left, no_left, + child, fill, + parent, i - 1); + return; + } + } + if (i + 1 < getfill(geo, parent, i)) { + right = bval(geo, parent, i + 1); + no_right = getfill(geo, right, 0); + if (fill + no_right <= geo->no_pairs) { + merge(head, geo, level, + child, fill, + right, no_right, + parent, i); + return; + } + } + /* + * We could also try to steal one entry from the left or right + * neighbor. By not doing so we changed the invariant from + * "all nodes are at least half full" to "no two neighboring + * nodes can be merged". Which means that the average fill of + * all nodes is still half or better. + */ +} + +static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, int level) +{ + unsigned long *node; + int i, pos, fill; + void *ret; + + if (level > head->height) { + /* we recursed all the way up */ + head->height = 0; + head->node = NULL; + return NULL; + } + + node = find_level(head, geo, key, level); + pos = getpos(geo, node, key); + fill = getfill(geo, node, pos); + if ((level == 1) && (keycmp(geo, node, pos, key) != 0)) + return NULL; + ret = bval(geo, node, pos); + + /* remove and shift */ + for (i = pos; i < fill - 1; i++) { + setkey(geo, node, i, bkey(geo, node, i + 1)); + setval(geo, node, i, bval(geo, node, i + 1)); + } + clearpair(geo, node, fill - 1); + + if (fill - 1 < geo->no_pairs / 2) { + if (level < head->height) + rebalance(head, geo, key, level, node, fill - 1); + else if (fill - 1 == 1) + btree_shrink(head, geo); + } + + return ret; +} + +void *btree_remove(struct btree_head *head, struct btree_geo *geo, + unsigned long *key) +{ + if (head->height == 0) + return NULL; + + return btree_remove_level(head, geo, key, 1); +} +EXPORT_SYMBOL_GPL(btree_remove); + +int btree_merge(struct btree_head *target, struct btree_head *victim, + struct btree_geo *geo, gfp_t gfp) +{ + unsigned long key[geo->keylen]; + unsigned long dup[geo->keylen]; + void *val; + int err; + + BUG_ON(target == victim); + + if (!(target->node)) { + /* target is empty, just copy fields over */ + target->node = victim->node; + target->height = victim->height; + __btree_init(victim); + return 0; + } + + /* TODO: This needs some optimizations. Currently we do three tree + * walks to remove a single object from the victim. + */ + for (;;) { + if (!btree_last(victim, geo, key)) + break; + val = btree_lookup(victim, geo, key); + err = btree_insert(target, geo, key, val, gfp); + if (err) + return err; + /* We must make a copy of the key, as the original will get + * mangled inside btree_remove. */ + longcpy(dup, key, geo->keylen); + btree_remove(victim, geo, dup); + } + return 0; +} +EXPORT_SYMBOL_GPL(btree_merge); + +static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo, + unsigned long *node, unsigned long opaque, + void (*func)(void *elem, unsigned long opaque, + unsigned long *key, size_t index, + void *func2), + void *func2, int reap, int height, size_t count) +{ + int i; + unsigned long *child; + + for (i = 0; i < geo->no_pairs; i++) { + child = bval(geo, node, i); + if (!child) + break; + if (height > 1) + count = __btree_for_each(head, geo, child, opaque, + func, func2, reap, height - 1, count); + else + func(child, opaque, bkey(geo, node, i), count++, + func2); + } + if (reap) + mempool_free(node, head->mempool); + return count; +} + +static void empty(void *elem, unsigned long opaque, unsigned long *key, + size_t index, void *func2) +{ +} + +void visitorl(void *elem, unsigned long opaque, unsigned long *key, + size_t index, void *__func) +{ + visitorl_t func = __func; + + func(elem, opaque, *key, index); +} +EXPORT_SYMBOL_GPL(visitorl); + +void visitor32(void *elem, unsigned long opaque, unsigned long *__key, + size_t index, void *__func) +{ + visitor32_t func = __func; + u32 *key = (void *)__key; + + func(elem, opaque, *key, index); +} +EXPORT_SYMBOL_GPL(visitor32); + +void visitor64(void *elem, unsigned long opaque, unsigned long *__key, + size_t index, void *__func) +{ + visitor64_t func = __func; + u64 *key = (void *)__key; + + func(elem, opaque, *key, index); +} +EXPORT_SYMBOL_GPL(visitor64); + +void visitor128(void *elem, unsigned long opaque, unsigned long *__key, + size_t index, void *__func) +{ + visitor128_t func = __func; + u64 *key = (void *)__key; + + func(elem, opaque, key[0], key[1], index); +} +EXPORT_SYMBOL_GPL(visitor128); + +size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, + unsigned long opaque, + void (*func)(void *elem, unsigned long opaque, + unsigned long *key, + size_t index, void *func2), + void *func2) +{ + size_t count = 0; + + if (!func2) + func = empty; + if (head->node) + count = __btree_for_each(head, geo, head->node, opaque, func, + func2, 0, head->height, 0); + return count; +} +EXPORT_SYMBOL_GPL(btree_visitor); + +size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, + unsigned long opaque, + void (*func)(void *elem, unsigned long opaque, + unsigned long *key, + size_t index, void *func2), + void *func2) +{ + size_t count = 0; + + if (!func2) + func = empty; + if (head->node) + count = __btree_for_each(head, geo, head->node, opaque, func, + func2, 1, head->height, 0); + __btree_init(head); + return count; +} +EXPORT_SYMBOL_GPL(btree_grim_visitor); + +static int __init btree_module_init(void) +{ + btree_cachep = kmem_cache_create("btree_node", NODESIZE, 0, + SLAB_HWCACHE_ALIGN, NULL); + return 0; +} + +static void __exit btree_module_exit(void) +{ + kmem_cache_destroy(btree_cachep); +} + +/* If core code starts using btree, initialization should happen even earlier */ +module_init(btree_module_init); +module_exit(btree_module_exit); + +MODULE_AUTHOR("Joern Engel <joern@logfs.org>"); +MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); +MODULE_LICENSE("GPL"); diff --git a/lib/bug.c b/lib/bug.c index 300e41afbf97..f13daf435211 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -165,7 +165,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) (void *)bugaddr); show_regs(regs); - add_taint(TAINT_WARN); + add_taint(BUG_GET_TAINT(bug)); return BUG_TRAP_TYPE_WARN; } diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c new file mode 100644 index 000000000000..4dc20321b0d5 --- /dev/null +++ b/lib/cpu-notifier-error-inject.c @@ -0,0 +1,63 @@ +#include <linux/kernel.h> +#include <linux/cpu.h> +#include <linux/module.h> +#include <linux/notifier.h> + +static int priority; +static int cpu_up_prepare_error; +static int cpu_down_prepare_error; + +module_param(priority, int, 0); +MODULE_PARM_DESC(priority, "specify cpu notifier priority"); + +module_param(cpu_up_prepare_error, int, 0644); +MODULE_PARM_DESC(cpu_up_prepare_error, + "specify error code to inject CPU_UP_PREPARE action"); + +module_param(cpu_down_prepare_error, int, 0644); +MODULE_PARM_DESC(cpu_down_prepare_error, + "specify error code to inject CPU_DOWN_PREPARE action"); + +static int err_inject_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + int err = 0; + + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + err = cpu_up_prepare_error; + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + err = cpu_down_prepare_error; + break; + } + if (err) + printk(KERN_INFO "Injecting error (%d) at cpu notifier\n", err); + + return notifier_from_errno(err); +} + +static struct notifier_block err_inject_cpu_notifier = { + .notifier_call = err_inject_cpu_callback, +}; + +static int err_inject_init(void) +{ + err_inject_cpu_notifier.priority = priority; + + return register_hotcpu_notifier(&err_inject_cpu_notifier); +} + +static void err_inject_exit(void) +{ + unregister_hotcpu_notifier(&err_inject_cpu_notifier); +} + +module_init(err_inject_init); +module_exit(err_inject_exit); + +MODULE_DESCRIPTION("CPU notifier error injection module"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>"); diff --git a/lib/cpumask.c b/lib/cpumask.c index 7bb4142a502f..05d6aca7fc19 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -1,3 +1,4 @@ +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/cpumask.h> diff --git a/lib/crc32.c b/lib/crc32.c index 02e3b31b3a79..4855995fcde9 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -25,16 +25,19 @@ #include <linux/module.h> #include <linux/compiler.h> #include <linux/types.h> -#include <linux/slab.h> #include <linux/init.h> #include <asm/atomic.h> #include "crc32defs.h" #if CRC_LE_BITS == 8 -#define tole(x) __constant_cpu_to_le32(x) -#define tobe(x) __constant_cpu_to_be32(x) +# define tole(x) __constant_cpu_to_le32(x) #else -#define tole(x) (x) -#define tobe(x) (x) +# define tole(x) (x) +#endif + +#if CRC_BE_BITS == 8 +# define tobe(x) __constant_cpu_to_be32(x) +#else +# define tobe(x) (x) #endif #include "crc32table.h" @@ -45,33 +48,37 @@ MODULE_LICENSE("GPL"); #if CRC_LE_BITS == 8 || CRC_BE_BITS == 8 static inline u32 -crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab) +crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) { # ifdef __LITTLE_ENDIAN -# define DO_CRC(x) crc = tab[(crc ^ (x)) & 255 ] ^ (crc >> 8) +# define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8) +# define DO_CRC4 crc = tab[3][(crc) & 255] ^ \ + tab[2][(crc >> 8) & 255] ^ \ + tab[1][(crc >> 16) & 255] ^ \ + tab[0][(crc >> 24) & 255] # else -# define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) +# define DO_CRC(x) crc = tab[0][((crc >> 24) ^ (x)) & 255] ^ (crc << 8) +# define DO_CRC4 crc = tab[0][(crc) & 255] ^ \ + tab[1][(crc >> 8) & 255] ^ \ + tab[2][(crc >> 16) & 255] ^ \ + tab[3][(crc >> 24) & 255] # endif - const u32 *b = (const u32 *)buf; + const u32 *b; size_t rem_len; /* Align it */ - if (unlikely((long)b & 3 && len)) { - u8 *p = (u8 *)b; + if (unlikely((long)buf & 3 && len)) { do { - DO_CRC(*p++); - } while ((--len) && ((long)p)&3); - b = (u32 *)p; + DO_CRC(*buf++); + } while ((--len) && ((long)buf)&3); } rem_len = len & 3; /* load data 32 bits wide, xor data 32 bits wide. */ len = len >> 2; + b = (const u32 *)buf; for (--b; len; --len) { crc ^= *++b; /* use pre increment for speed */ - DO_CRC(0); - DO_CRC(0); - DO_CRC(0); - DO_CRC(0); + DO_CRC4; } len = rem_len; /* And the last few bytes */ @@ -82,6 +89,8 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab) } while (--len); } return crc; +#undef DO_CRC +#undef DO_CRC4 } #endif /** @@ -114,14 +123,11 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) { # if CRC_LE_BITS == 8 - const u32 *tab = crc32table_le; + const u32 (*tab)[] = crc32table_le; crc = __cpu_to_le32(crc); crc = crc32_body(crc, p, len, tab); return __le32_to_cpu(crc); -#undef ENDIAN_SHIFT -#undef DO_CRC - # elif CRC_LE_BITS == 4 while (len--) { crc ^= *p++; @@ -174,14 +180,11 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) { # if CRC_BE_BITS == 8 - const u32 *tab = crc32table_be; + const u32 (*tab)[] = crc32table_be; crc = __cpu_to_be32(crc); crc = crc32_body(crc, p, len, tab); return __be32_to_cpu(crc); -#undef ENDIAN_SHIFT -#undef DO_CRC - # elif CRC_BE_BITS == 4 while (len--) { crc ^= *p++ << 24; diff --git a/lib/debug_locks.c b/lib/debug_locks.c index bc3b11731b9c..5bf0020b9248 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c @@ -23,6 +23,7 @@ * shut up after that. */ int debug_locks = 1; +EXPORT_SYMBOL_GPL(debug_locks); /* * The locking-testsuite uses <debug_locks_silent> to get a diff --git a/lib/debugobjects.c b/lib/debugobjects.c index a9a8996d286a..deebcc57d4e6 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -12,6 +12,7 @@ #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/debugfs.h> +#include <linux/slab.h> #include <linux/hash.h> #define ODEBUG_HASH_BITS 14 @@ -140,6 +141,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) obj->object = addr; obj->descr = descr; obj->state = ODEBUG_STATE_NONE; + obj->astate = 0; hlist_del(&obj->node); hlist_add_head(&obj->node, &b->list); @@ -251,8 +253,10 @@ static void debug_print_object(struct debug_obj *obj, char *msg) if (limit < 5 && obj->descr != descr_test) { limit++; - WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, - obj_states[obj->state], obj->descr->name); + WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " + "object type: %s\n", + msg, obj_states[obj->state], obj->astate, + obj->descr->name); } debug_objects_warnings++; } @@ -446,7 +450,10 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: case ODEBUG_STATE_ACTIVE: - obj->state = ODEBUG_STATE_INACTIVE; + if (!obj->astate) + obj->state = ODEBUG_STATE_INACTIVE; + else + debug_print_object(obj, "deactivate"); break; case ODEBUG_STATE_DESTROYED: @@ -552,6 +559,53 @@ out_unlock: raw_spin_unlock_irqrestore(&db->lock, flags); } +/** + * debug_object_active_state - debug checks object usage state machine + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + * @expect: expected state + * @next: state to move to if expected state is found + */ +void +debug_object_active_state(void *addr, struct debug_obj_descr *descr, + unsigned int expect, unsigned int next) +{ + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + raw_spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (obj) { + switch (obj->state) { + case ODEBUG_STATE_ACTIVE: + if (obj->astate == expect) + obj->astate = next; + else + debug_print_object(obj, "active_state"); + break; + + default: + debug_print_object(obj, "active_state"); + break; + } + } else { + struct debug_obj o = { .object = addr, + .state = ODEBUG_STATE_NOTAVAILABLE, + .descr = descr }; + + debug_print_object(&o, "active_state"); + } + + raw_spin_unlock_irqrestore(&db->lock, flags); +} + #ifdef CONFIG_DEBUG_OBJECTS_FREE static void __debug_check_no_obj_freed(const void *address, unsigned long size) { @@ -773,7 +827,7 @@ static int __init fixup_free(void *addr, enum debug_obj_state state) } } -static int +static int __init check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) { struct debug_bucket *db; @@ -916,7 +970,7 @@ void __init debug_objects_early_init(void) /* * Convert the statically allocated objects to dynamic ones: */ -static int debug_objects_replace_static_objects(void) +static int __init debug_objects_replace_static_objects(void) { struct debug_bucket *db = obj_hash; struct hlist_node *node, *tmp; diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c index db521f45626e..bcb3a4bd68ff 100644 --- a/lib/decompress_unlzo.c +++ b/lib/decompress_unlzo.c @@ -97,7 +97,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, u32 src_len, dst_len; size_t tmp; u8 *in_buf, *in_buf_save, *out_buf; - int obytes_processed = 0; + int ret = -1; set_error_fn(error_fn); @@ -174,15 +174,22 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, /* decompress */ tmp = dst_len; - r = lzo1x_decompress_safe((u8 *) in_buf, src_len, + + /* When the input data is not compressed at all, + * lzo1x_decompress_safe will fail, so call memcpy() + * instead */ + if (unlikely(dst_len == src_len)) + memcpy(out_buf, in_buf, src_len); + else { + r = lzo1x_decompress_safe((u8 *) in_buf, src_len, out_buf, &tmp); - if (r != LZO_E_OK || dst_len != tmp) { - error("Compressed data violation"); - goto exit_2; + if (r != LZO_E_OK || dst_len != tmp) { + error("Compressed data violation"); + goto exit_2; + } } - obytes_processed += dst_len; if (flush) flush(out_buf, dst_len); if (output) @@ -196,6 +203,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, in_buf += src_len; } + ret = 0; exit_2: if (!input) free(in_buf); @@ -203,7 +211,7 @@ exit_1: if (!output) free(out_buf); exit: - return obytes_processed; + return ret; } #define decompress unlzo diff --git a/lib/devres.c b/lib/devres.c index 72c8909006da..49368608f988 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -1,5 +1,6 @@ #include <linux/pci.h> #include <linux/io.h> +#include <linux/gfp.h> #include <linux/module.h> void devm_ioremap_release(struct device *dev, void *res) diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 7d2f0b33e5a8..01e64270e246 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -570,7 +570,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf, * Now parse out the first token and use it as the name for the * driver to filter for. */ - for (i = 0; i < NAME_MAX_LEN; ++i) { + for (i = 0; i < NAME_MAX_LEN - 1; ++i) { current_driver_name[i] = buf[i]; if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) break; @@ -587,7 +587,7 @@ out_unlock: return count; } -const struct file_operations filter_fops = { +static const struct file_operations filter_fops = { .read = filter_read, .write = filter_write, }; diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index f93502915988..02afc2533728 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -25,6 +25,7 @@ #include <linux/uaccess.h> #include <linux/dynamic_debug.h> #include <linux/debugfs.h> +#include <linux/slab.h> extern struct _ddebug __start___verbose[]; extern struct _ddebug __stop___verbose[]; @@ -455,7 +456,7 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, __func__, (int)len); nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS); - if (nwords < 0) + if (nwords <= 0) return -EINVAL; if (ddebug_parse_query(words, nwords-1, &query)) return -EINVAL; @@ -691,7 +692,7 @@ static void ddebug_table_free(struct ddebug_table *dt) * Called in response to a module being unloaded. Removes * any ddebug_table's which point at the module. */ -int ddebug_remove_module(char *mod_name) +int ddebug_remove_module(const char *mod_name) { struct ddebug_table *dt, *nextdt; int ret = -ENOENT; diff --git a/lib/flex_array.c b/lib/flex_array.c index 66eef2e4483e..41b1804fa728 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c @@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, ret->element_size = element_size; ret->total_nr_elements = total; if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) - memset(ret->parts[0], FLEX_ARRAY_FREE, + memset(&ret->parts[0], FLEX_ARRAY_FREE, FLEX_ARRAY_BASE_BYTES_LEFT); return ret; } diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c index bea5d97df991..85d0e412a04f 100644 --- a/lib/gen_crc32table.c +++ b/lib/gen_crc32table.c @@ -7,8 +7,8 @@ #define LE_TABLE_SIZE (1 << CRC_LE_BITS) #define BE_TABLE_SIZE (1 << CRC_BE_BITS) -static uint32_t crc32table_le[LE_TABLE_SIZE]; -static uint32_t crc32table_be[BE_TABLE_SIZE]; +static uint32_t crc32table_le[4][LE_TABLE_SIZE]; +static uint32_t crc32table_be[4][BE_TABLE_SIZE]; /** * crc32init_le() - allocate and initialize LE table data @@ -22,12 +22,19 @@ static void crc32init_le(void) unsigned i, j; uint32_t crc = 1; - crc32table_le[0] = 0; + crc32table_le[0][0] = 0; for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) { crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); for (j = 0; j < LE_TABLE_SIZE; j += 2 * i) - crc32table_le[i + j] = crc ^ crc32table_le[j]; + crc32table_le[0][i + j] = crc ^ crc32table_le[0][j]; + } + for (i = 0; i < LE_TABLE_SIZE; i++) { + crc = crc32table_le[0][i]; + for (j = 1; j < 4; j++) { + crc = crc32table_le[0][crc & 0xff] ^ (crc >> 8); + crc32table_le[j][i] = crc; + } } } @@ -39,25 +46,35 @@ static void crc32init_be(void) unsigned i, j; uint32_t crc = 0x80000000; - crc32table_be[0] = 0; + crc32table_be[0][0] = 0; for (i = 1; i < BE_TABLE_SIZE; i <<= 1) { crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0); for (j = 0; j < i; j++) - crc32table_be[i + j] = crc ^ crc32table_be[j]; + crc32table_be[0][i + j] = crc ^ crc32table_be[0][j]; + } + for (i = 0; i < BE_TABLE_SIZE; i++) { + crc = crc32table_be[0][i]; + for (j = 1; j < 4; j++) { + crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8); + crc32table_be[j][i] = crc; + } } } -static void output_table(uint32_t table[], int len, char *trans) +static void output_table(uint32_t table[4][256], int len, char *trans) { - int i; + int i, j; - for (i = 0; i < len - 1; i++) { - if (i % ENTRIES_PER_LINE == 0) - printf("\n"); - printf("%s(0x%8.8xL), ", trans, table[i]); + for (j = 0 ; j < 4; j++) { + printf("{"); + for (i = 0; i < len - 1; i++) { + if (i % ENTRIES_PER_LINE == 0) + printf("\n"); + printf("%s(0x%8.8xL), ", trans, table[j][i]); + } + printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]); } - printf("%s(0x%8.8xL)\n", trans, table[len - 1]); } int main(int argc, char** argv) @@ -66,14 +83,14 @@ int main(int argc, char** argv) if (CRC_LE_BITS > 1) { crc32init_le(); - printf("static const u32 crc32table_le[] = {"); + printf("static const u32 crc32table_le[4][256] = {"); output_table(crc32table_le, LE_TABLE_SIZE, "tole"); printf("};\n"); } if (CRC_BE_BITS > 1) { crc32init_be(); - printf("static const u32 crc32table_be[] = {"); + printf("static const u32 crc32table_be[4][256] = {"); output_table(crc32table_be, BE_TABLE_SIZE, "tobe"); printf("};\n"); } diff --git a/lib/genalloc.c b/lib/genalloc.c index e67f97495dd5..1923f1490e72 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -10,6 +10,7 @@ * Version 2. See the file COPYING for more details. */ +#include <linux/slab.h> #include <linux/module.h> #include <linux/bitmap.h> #include <linux/genalloc.h> @@ -127,7 +128,6 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); end_bit = (chunk->end_addr - chunk->start_addr) >> order; - end_bit -= nbits + 1; spin_lock_irqsave(&chunk->lock, flags); start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, diff --git a/lib/hexdump.c b/lib/hexdump.c index 39af2560f765..5d7a4802c562 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -16,6 +16,24 @@ const char hex_asc[] = "0123456789abcdef"; EXPORT_SYMBOL(hex_asc); /** + * hex_to_bin - convert a hex digit to its real value + * @ch: ascii character represents hex digit + * + * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad + * input. + */ +int hex_to_bin(char ch) +{ + if ((ch >= '0') && (ch <= '9')) + return ch - '0'; + ch = tolower(ch); + if ((ch >= 'a') && (ch <= 'f')) + return ch - 'a' + 10; + return -1; +} +EXPORT_SYMBOL(hex_to_bin); + +/** * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory * @buf: data blob to dump * @len: number of bytes in the @buf @@ -34,7 +52,7 @@ EXPORT_SYMBOL(hex_asc); * * E.g.: * hex_dump_to_buffer(frame->data, frame->len, 16, 1, - * linebuf, sizeof(linebuf), 1); + * linebuf, sizeof(linebuf), true); * * example output buffer: * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO @@ -65,8 +83,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, - "%s%16.16llx", j ? " " : "", - (unsigned long long)*(ptr8 + j)); + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); ascii_column = 17 * ngroups + 2; break; } @@ -77,7 +95,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, - "%s%8.8x", j ? " " : "", *(ptr4 + j)); + "%s%8.8x", j ? " " : "", *(ptr4 + j)); ascii_column = 9 * ngroups + 2; break; } @@ -88,7 +106,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, - "%s%4.4x", j ? " " : "", *(ptr2 + j)); + "%s%4.4x", j ? " " : "", *(ptr2 + j)); ascii_column = 5 * ngroups + 2; break; } @@ -111,9 +129,10 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) linebuf[lx++] = ' '; - for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) - linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] - : '.'; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.'; + } nil: linebuf[lx++] = '\0'; } @@ -143,7 +162,7 @@ EXPORT_SYMBOL(hex_dump_to_buffer); * * E.g.: * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS, - * 16, 1, frame->data, frame->len, 1); + * 16, 1, frame->data, frame->len, true); * * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode: * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO @@ -151,12 +170,12 @@ EXPORT_SYMBOL(hex_dump_to_buffer); * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~. */ void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, - int rowsize, int groupsize, - const void *buf, size_t len, bool ascii) + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) { const u8 *ptr = buf; int i, linelen, remaining = len; - unsigned char linebuf[200]; + unsigned char linebuf[32 * 3 + 2 + 32 + 1]; if (rowsize != 16 && rowsize != 32) rowsize = 16; @@ -164,13 +183,14 @@ void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, for (i = 0; i < len; i += rowsize) { linelen = min(remaining, rowsize); remaining -= rowsize; + hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, - linebuf, sizeof(linebuf), ascii); + linebuf, sizeof(linebuf), ascii); switch (prefix_type) { case DUMP_PREFIX_ADDRESS: - printk("%s%s%*p: %s\n", level, prefix_str, - (int)(2 * sizeof(void *)), ptr + i, linebuf); + printk("%s%s%p: %s\n", + level, prefix_str, ptr + i, linebuf); break; case DUMP_PREFIX_OFFSET: printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); @@ -196,9 +216,9 @@ EXPORT_SYMBOL(print_hex_dump); * rowsize of 16, groupsize of 1, and ASCII output included. */ void print_hex_dump_bytes(const char *prefix_str, int prefix_type, - const void *buf, size_t len) + const void *buf, size_t len) { print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1, - buf, len, 1); + buf, len, true); } EXPORT_SYMBOL(print_hex_dump_bytes); diff --git a/lib/hweight.c b/lib/hweight.c index 389424ecb129..3c79d50814cf 100644 --- a/lib/hweight.c +++ b/lib/hweight.c @@ -9,37 +9,45 @@ * The Hamming Weight of a number is the total number of bits set in it. */ -unsigned int hweight32(unsigned int w) +unsigned int __sw_hweight32(unsigned int w) { +#ifdef ARCH_HAS_FAST_MULTIPLIER + w -= (w >> 1) & 0x55555555; + w = (w & 0x33333333) + ((w >> 2) & 0x33333333); + w = (w + (w >> 4)) & 0x0f0f0f0f; + return (w * 0x01010101) >> 24; +#else unsigned int res = w - ((w >> 1) & 0x55555555); res = (res & 0x33333333) + ((res >> 2) & 0x33333333); res = (res + (res >> 4)) & 0x0F0F0F0F; res = res + (res >> 8); return (res + (res >> 16)) & 0x000000FF; +#endif } -EXPORT_SYMBOL(hweight32); +EXPORT_SYMBOL(__sw_hweight32); -unsigned int hweight16(unsigned int w) +unsigned int __sw_hweight16(unsigned int w) { unsigned int res = w - ((w >> 1) & 0x5555); res = (res & 0x3333) + ((res >> 2) & 0x3333); res = (res + (res >> 4)) & 0x0F0F; return (res + (res >> 8)) & 0x00FF; } -EXPORT_SYMBOL(hweight16); +EXPORT_SYMBOL(__sw_hweight16); -unsigned int hweight8(unsigned int w) +unsigned int __sw_hweight8(unsigned int w) { unsigned int res = w - ((w >> 1) & 0x55); res = (res & 0x33) + ((res >> 2) & 0x33); return (res + (res >> 4)) & 0x0F; } -EXPORT_SYMBOL(hweight8); +EXPORT_SYMBOL(__sw_hweight8); -unsigned long hweight64(__u64 w) +unsigned long __sw_hweight64(__u64 w) { #if BITS_PER_LONG == 32 - return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); + return __sw_hweight32((unsigned int)(w >> 32)) + + __sw_hweight32((unsigned int)w); #elif BITS_PER_LONG == 64 #ifdef ARCH_HAS_FAST_MULTIPLIER w -= (w >> 1) & 0x5555555555555555ul; @@ -56,4 +64,4 @@ unsigned long hweight64(__u64 w) #endif #endif } -EXPORT_SYMBOL(hweight64); +EXPORT_SYMBOL(__sw_hweight64); diff --git a/lib/idr.c b/lib/idr.c index 1cac726c44bc..7f1a4f0acf50 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; /* if already at the top layer, we need to grow */ - if (!(p = pa[l])) { + if (id >= 1 << (idp->layers * IDR_BITS)) { *starting_id = id; return IDR_NEED_TO_GROW; } + p = pa[l]; + BUG_ON(!p); /* If we need to go up one layer, continue the * loop; otherwise, restart from the top. @@ -443,6 +445,7 @@ EXPORT_SYMBOL(idr_remove); void idr_remove_all(struct idr *idp) { int n, id, max; + int bt_mask; struct idr_layer *p; struct idr_layer *pa[MAX_LEVEL]; struct idr_layer **paa = &pa[0]; @@ -460,8 +463,10 @@ void idr_remove_all(struct idr *idp) p = p->ary[(id >> n) & IDR_MASK]; } + bt_mask = id; id += 1 << n; - while (n < fls(id)) { + /* Get the highest bit that the above add changed from 0->1. */ + while (n < fls(id ^ bt_mask)) { if (p) free_layer(p); n += IDR_BITS; @@ -502,7 +507,7 @@ void *idr_find(struct idr *idp, int id) int n; struct idr_layer *p; - p = rcu_dereference(idp->top); + p = rcu_dereference_raw(idp->top); if (!p) return NULL; n = (p->layer+1) * IDR_BITS; @@ -517,7 +522,7 @@ void *idr_find(struct idr *idp, int id) while (n > 0 && p) { n -= IDR_BITS; BUG_ON(n != p->layer*IDR_BITS); - p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); + p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); } return((void *)p); } @@ -550,7 +555,7 @@ int idr_for_each(struct idr *idp, struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; - p = rcu_dereference(idp->top); + p = rcu_dereference_raw(idp->top); max = 1 << n; id = 0; @@ -558,7 +563,7 @@ int idr_for_each(struct idr *idp, while (n > 0 && p) { n -= IDR_BITS; *paa++ = p; - p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); + p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); } if (p) { @@ -597,7 +602,7 @@ void *idr_get_next(struct idr *idp, int *nextidp) /* find first ent */ n = idp->layers * IDR_BITS; max = 1 << n; - p = rcu_dereference(idp->top); + p = rcu_dereference_raw(idp->top); if (!p) return NULL; @@ -605,7 +610,7 @@ void *idr_get_next(struct idr *idp, int *nextidp) while (n > 0 && p) { n -= IDR_BITS; *paa++ = p; - p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); + p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); } if (p) { @@ -621,7 +626,7 @@ void *idr_get_next(struct idr *idp, int *nextidp) } return NULL; } - +EXPORT_SYMBOL(idr_get_next); /** diff --git a/lib/inflate.c b/lib/inflate.c index d10255973a9f..677b738c2204 100644 --- a/lib/inflate.c +++ b/lib/inflate.c @@ -103,6 +103,7 @@ the two sets of lengths. */ #include <linux/compiler.h> +#include <linux/slab.h> #ifdef RCSID static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; diff --git a/lib/kasprintf.c b/lib/kasprintf.c index c5ff1fd10030..9c4233b23783 100644 --- a/lib/kasprintf.c +++ b/lib/kasprintf.c @@ -6,6 +6,7 @@ #include <stdarg.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> diff --git a/lib/kobject.c b/lib/kobject.c index b512b746d2af..f07c57252e82 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr, return ret; } -struct sysfs_ops kobj_sysfs_ops = { +const struct sysfs_ops kobj_sysfs_ops = { .show = kobj_attr_show, .store = kobj_attr_store, }; @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = { * If the kset was not able to be created, NULL will be returned. */ static struct kset *kset_create(const char *name, - struct kset_uevent_ops *uevent_ops, + const struct kset_uevent_ops *uevent_ops, struct kobject *parent_kobj) { struct kset *kset; @@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name, * If the kset was not able to be created, NULL will be returned. */ struct kset *kset_create_and_add(const char *name, - struct kset_uevent_ops *uevent_ops, + const struct kset_uevent_ops *uevent_ops, struct kobject *parent_kobj) { struct kset *kset; @@ -850,6 +850,121 @@ struct kset *kset_create_and_add(const char *name, } EXPORT_SYMBOL_GPL(kset_create_and_add); + +static DEFINE_SPINLOCK(kobj_ns_type_lock); +static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES]; + +int kobj_ns_type_register(const struct kobj_ns_type_operations *ops) +{ + enum kobj_ns_type type = ops->type; + int error; + + spin_lock(&kobj_ns_type_lock); + + error = -EINVAL; + if (type >= KOBJ_NS_TYPES) + goto out; + + error = -EINVAL; + if (type <= KOBJ_NS_TYPE_NONE) + goto out; + + error = -EBUSY; + if (kobj_ns_ops_tbl[type]) + goto out; + + error = 0; + kobj_ns_ops_tbl[type] = ops; + +out: + spin_unlock(&kobj_ns_type_lock); + return error; +} + +int kobj_ns_type_registered(enum kobj_ns_type type) +{ + int registered = 0; + + spin_lock(&kobj_ns_type_lock); + if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES)) + registered = kobj_ns_ops_tbl[type] != NULL; + spin_unlock(&kobj_ns_type_lock); + + return registered; +} + +const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent) +{ + const struct kobj_ns_type_operations *ops = NULL; + + if (parent && parent->ktype->child_ns_type) + ops = parent->ktype->child_ns_type(parent); + + return ops; +} + +const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj) +{ + return kobj_child_ns_ops(kobj->parent); +} + + +const void *kobj_ns_current(enum kobj_ns_type type) +{ + const void *ns = NULL; + + spin_lock(&kobj_ns_type_lock); + if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && + kobj_ns_ops_tbl[type]) + ns = kobj_ns_ops_tbl[type]->current_ns(); + spin_unlock(&kobj_ns_type_lock); + + return ns; +} + +const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk) +{ + const void *ns = NULL; + + spin_lock(&kobj_ns_type_lock); + if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && + kobj_ns_ops_tbl[type]) + ns = kobj_ns_ops_tbl[type]->netlink_ns(sk); + spin_unlock(&kobj_ns_type_lock); + + return ns; +} + +const void *kobj_ns_initial(enum kobj_ns_type type) +{ + const void *ns = NULL; + + spin_lock(&kobj_ns_type_lock); + if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && + kobj_ns_ops_tbl[type]) + ns = kobj_ns_ops_tbl[type]->initial_ns(); + spin_unlock(&kobj_ns_type_lock); + + return ns; +} + +/* + * kobj_ns_exit - invalidate a namespace tag + * + * @type: the namespace type (i.e. KOBJ_NS_TYPE_NET) + * @ns: the actual namespace being invalidated + * + * This is called when a tag is no longer valid. For instance, + * when a network namespace exits, it uses this helper to + * make sure no sb's sysfs_info points to the now-invalidated + * netns. + */ +void kobj_ns_exit(enum kobj_ns_type type, const void *ns) +{ + sysfs_exit_ns(type, ns); +} + + EXPORT_SYMBOL(kobject_get); EXPORT_SYMBOL(kobject_put); EXPORT_SYMBOL(kobject_del); diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 920a3ca6e259..b93579504dfa 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -18,18 +18,25 @@ #include <linux/string.h> #include <linux/kobject.h> #include <linux/module.h> - +#include <linux/slab.h> +#include <linux/user_namespace.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <net/sock.h> +#include <net/net_namespace.h> u64 uevent_seqnum; char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; static DEFINE_SPINLOCK(sequence_lock); -#if defined(CONFIG_NET) -static struct sock *uevent_sock; +#ifdef CONFIG_NET +struct uevent_sock { + struct list_head list; + struct sock *sk; +}; +static LIST_HEAD(uevent_sock_list); +static DEFINE_MUTEX(uevent_sock_mutex); #endif /* the strings here must match the enum in include/linux/kobject.h */ @@ -76,6 +83,39 @@ out: return ret; } +#ifdef CONFIG_NET +static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) +{ + struct kobject *kobj = data; + const struct kobj_ns_type_operations *ops; + + ops = kobj_ns_ops(kobj); + if (ops) { + const void *sock_ns, *ns; + ns = kobj->ktype->namespace(kobj); + sock_ns = ops->netlink_ns(dsk); + return sock_ns != ns; + } + + return 0; +} +#endif + +static int kobj_usermode_filter(struct kobject *kobj) +{ + const struct kobj_ns_type_operations *ops; + + ops = kobj_ns_ops(kobj); + if (ops) { + const void *init_ns, *ns; + ns = kobj->ktype->namespace(kobj); + init_ns = ops->initial_ns(); + return ns != init_ns; + } + + return 0; +} + /** * kobject_uevent_env - send an uevent with environmental data * @@ -95,10 +135,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, const char *subsystem; struct kobject *top_kobj; struct kset *kset; - struct kset_uevent_ops *uevent_ops; + const struct kset_uevent_ops *uevent_ops; u64 seq; int i = 0; int retval = 0; +#ifdef CONFIG_NET + struct uevent_sock *ue_sk; +#endif pr_debug("kobject: '%s' (%p): %s\n", kobject_name(kobj), kobj, __func__); @@ -210,7 +253,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, #if defined(CONFIG_NET) /* send netlink message */ - if (uevent_sock) { + mutex_lock(&uevent_sock_mutex); + list_for_each_entry(ue_sk, &uevent_sock_list, list) { + struct sock *uevent_sock = ue_sk->sk; struct sk_buff *skb; size_t len; @@ -232,18 +277,21 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, } NETLINK_CB(skb).dst_group = 1; - retval = netlink_broadcast(uevent_sock, skb, 0, 1, - GFP_KERNEL); + retval = netlink_broadcast_filtered(uevent_sock, skb, + 0, 1, GFP_KERNEL, + kobj_bcast_filter, + kobj); /* ENOBUFS should be handled in userspace */ if (retval == -ENOBUFS) retval = 0; } else retval = -ENOMEM; } + mutex_unlock(&uevent_sock_mutex); #endif /* call uevent_helper, usually only enabled during early boot */ - if (uevent_helper[0]) { + if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { char *argv [3]; argv [0] = uevent_helper; @@ -319,18 +367,59 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) EXPORT_SYMBOL_GPL(add_uevent_var); #if defined(CONFIG_NET) -static int __init kobject_uevent_init(void) +static int uevent_net_init(struct net *net) { - uevent_sock = netlink_kernel_create(&init_net, NETLINK_KOBJECT_UEVENT, - 1, NULL, NULL, THIS_MODULE); - if (!uevent_sock) { + struct uevent_sock *ue_sk; + + ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL); + if (!ue_sk) + return -ENOMEM; + + ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, + 1, NULL, NULL, THIS_MODULE); + if (!ue_sk->sk) { printk(KERN_ERR "kobject_uevent: unable to create netlink socket!\n"); + kfree(ue_sk); return -ENODEV; } - netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV); + mutex_lock(&uevent_sock_mutex); + list_add_tail(&ue_sk->list, &uevent_sock_list); + mutex_unlock(&uevent_sock_mutex); return 0; } +static void uevent_net_exit(struct net *net) +{ + struct uevent_sock *ue_sk; + + mutex_lock(&uevent_sock_mutex); + list_for_each_entry(ue_sk, &uevent_sock_list, list) { + if (sock_net(ue_sk->sk) == net) + goto found; + } + mutex_unlock(&uevent_sock_mutex); + return; + +found: + list_del(&ue_sk->list); + mutex_unlock(&uevent_sock_mutex); + + netlink_kernel_release(ue_sk->sk); + kfree(ue_sk); +} + +static struct pernet_operations uevent_net_ops = { + .init = uevent_net_init, + .exit = uevent_net_exit, +}; + +static int __init kobject_uevent_init(void) +{ + netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV); + return register_pernet_subsys(&uevent_net_ops); +} + + postcore_initcall(kobject_uevent_init); #endif diff --git a/lib/kref.c b/lib/kref.c index 9ecd6e865610..d3d227a08a4b 100644 --- a/lib/kref.c +++ b/lib/kref.c @@ -13,17 +13,7 @@ #include <linux/kref.h> #include <linux/module.h> - -/** - * kref_set - initialize object and set refcount to requested number. - * @kref: object in question. - * @num: initial reference counter - */ -void kref_set(struct kref *kref, int num) -{ - atomic_set(&kref->refcount, num); - smp_mb(); -} +#include <linux/slab.h> /** * kref_init - initialize object. @@ -31,7 +21,8 @@ void kref_set(struct kref *kref, int num) */ void kref_init(struct kref *kref) { - kref_set(kref, 1); + atomic_set(&kref->refcount, 1); + smp_mb(); } /** @@ -71,7 +62,6 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref)) return 0; } -EXPORT_SYMBOL(kref_set); EXPORT_SYMBOL(kref_init); EXPORT_SYMBOL(kref_get); EXPORT_SYMBOL(kref_put); diff --git a/lib/lcm.c b/lib/lcm.c new file mode 100644 index 000000000000..157cd88a6ffc --- /dev/null +++ b/lib/lcm.c @@ -0,0 +1,15 @@ +#include <linux/kernel.h> +#include <linux/gcd.h> +#include <linux/module.h> + +/* Lowest common multiple */ +unsigned long lcm(unsigned long a, unsigned long b) +{ + if (a && b) + return (a * b) / gcd(a, b); + else if (b) + return b; + + return a; +} +EXPORT_SYMBOL_GPL(lcm); diff --git a/lib/list_sort.c b/lib/list_sort.c index 19d11e0bb958..4b5cb794c38b 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c @@ -4,99 +4,214 @@ #include <linux/slab.h> #include <linux/list.h> +#define MAX_LIST_LENGTH_BITS 20 + +/* + * Returns a list organized in an intermediate format suited + * to chaining of merge() calls: null-terminated, no reserved or + * sentinel head node, "prev" links not maintained. + */ +static struct list_head *merge(void *priv, + int (*cmp)(void *priv, struct list_head *a, + struct list_head *b), + struct list_head *a, struct list_head *b) +{ + struct list_head head, *tail = &head; + + while (a && b) { + /* if equal, take 'a' -- important for sort stability */ + if ((*cmp)(priv, a, b) <= 0) { + tail->next = a; + a = a->next; + } else { + tail->next = b; + b = b->next; + } + tail = tail->next; + } + tail->next = a?:b; + return head.next; +} + +/* + * Combine final list merge with restoration of standard doubly-linked + * list structure. This approach duplicates code from merge(), but + * runs faster than the tidier alternatives of either a separate final + * prev-link restoration pass, or maintaining the prev links + * throughout. + */ +static void merge_and_restore_back_links(void *priv, + int (*cmp)(void *priv, struct list_head *a, + struct list_head *b), + struct list_head *head, + struct list_head *a, struct list_head *b) +{ + struct list_head *tail = head; + + while (a && b) { + /* if equal, take 'a' -- important for sort stability */ + if ((*cmp)(priv, a, b) <= 0) { + tail->next = a; + a->prev = tail; + a = a->next; + } else { + tail->next = b; + b->prev = tail; + b = b->next; + } + tail = tail->next; + } + tail->next = a ? : b; + + do { + /* + * In worst cases this loop may run many iterations. + * Continue callbacks to the client even though no + * element comparison is needed, so the client's cmp() + * routine can invoke cond_resched() periodically. + */ + (*cmp)(priv, tail, tail); + + tail->next->prev = tail; + tail = tail->next; + } while (tail->next); + + tail->next = head; + head->prev = tail; +} + /** - * list_sort - sort a list. - * @priv: private data, passed to @cmp + * list_sort - sort a list + * @priv: private data, opaque to list_sort(), passed to @cmp * @head: the list to sort * @cmp: the elements comparison function * - * This function has been implemented by Mark J Roberts <mjr@znex.org>. It - * implements "merge sort" which has O(nlog(n)) complexity. The list is sorted - * in ascending order. + * This function implements "merge sort", which has O(nlog(n)) + * complexity. * - * The comparison function @cmp is supposed to return a negative value if @a is - * less than @b, and a positive value if @a is greater than @b. If @a and @b - * are equivalent, then it does not matter what this function returns. + * The comparison function @cmp must return a negative value if @a + * should sort before @b, and a positive value if @a should sort after + * @b. If @a and @b are equivalent, and their original relative + * ordering is to be preserved, @cmp must return 0. */ void list_sort(void *priv, struct list_head *head, - int (*cmp)(void *priv, struct list_head *a, - struct list_head *b)) + int (*cmp)(void *priv, struct list_head *a, + struct list_head *b)) { - struct list_head *p, *q, *e, *list, *tail, *oldhead; - int insize, nmerges, psize, qsize, i; + struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists + -- last slot is a sentinel */ + int lev; /* index into part[] */ + int max_lev = 0; + struct list_head *list; if (list_empty(head)) return; + memset(part, 0, sizeof(part)); + + head->prev->next = NULL; list = head->next; - list_del(head); - insize = 1; - for (;;) { - p = oldhead = list; - list = tail = NULL; - nmerges = 0; - - while (p) { - nmerges++; - q = p; - psize = 0; - for (i = 0; i < insize; i++) { - psize++; - q = q->next == oldhead ? NULL : q->next; - if (!q) - break; - } - qsize = insize; - while (psize > 0 || (qsize > 0 && q)) { - if (!psize) { - e = q; - q = q->next; - qsize--; - if (q == oldhead) - q = NULL; - } else if (!qsize || !q) { - e = p; - p = p->next; - psize--; - if (p == oldhead) - p = NULL; - } else if (cmp(priv, p, q) <= 0) { - e = p; - p = p->next; - psize--; - if (p == oldhead) - p = NULL; - } else { - e = q; - q = q->next; - qsize--; - if (q == oldhead) - q = NULL; - } - if (tail) - tail->next = e; - else - list = e; - e->prev = tail; - tail = e; + while (list) { + struct list_head *cur = list; + list = list->next; + cur->next = NULL; + + for (lev = 0; part[lev]; lev++) { + cur = merge(priv, cmp, part[lev], cur); + part[lev] = NULL; + } + if (lev > max_lev) { + if (unlikely(lev >= ARRAY_SIZE(part)-1)) { + printk_once(KERN_DEBUG "list passed to" + " list_sort() too long for" + " efficiency\n"); + lev--; } - p = q; + max_lev = lev; } + part[lev] = cur; + } - tail->next = list; - list->prev = tail; + for (lev = 0; lev < max_lev; lev++) + if (part[lev]) + list = merge(priv, cmp, part[lev], list); - if (nmerges <= 1) - break; + merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); +} +EXPORT_SYMBOL(list_sort); - insize *= 2; - } +#ifdef DEBUG_LIST_SORT +struct debug_el { + struct list_head l_h; + int value; + unsigned serial; +}; - head->next = list; - head->prev = list->prev; - list->prev->next = head; - list->prev = head; +static int cmp(void *priv, struct list_head *a, struct list_head *b) +{ + return container_of(a, struct debug_el, l_h)->value + - container_of(b, struct debug_el, l_h)->value; } -EXPORT_SYMBOL(list_sort); +/* + * The pattern of set bits in the list length determines which cases + * are hit in list_sort(). + */ +#define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */ + +static int __init list_sort_test(void) +{ + int i, r = 1, count; + struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL); + struct list_head *cur; + + printk(KERN_WARNING "testing list_sort()\n"); + + cur = head; + for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) { + struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL); + BUG_ON(!el); + /* force some equivalencies */ + el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3); + el->serial = i; + + el->l_h.prev = cur; + cur->next = &el->l_h; + cur = cur->next; + } + head->prev = cur; + + list_sort(NULL, head, cmp); + + count = 1; + for (cur = head->next; cur->next != head; cur = cur->next) { + struct debug_el *el = container_of(cur, struct debug_el, l_h); + int cmp_result = cmp(NULL, cur, cur->next); + if (cur->next->prev != cur) { + printk(KERN_EMERG "list_sort() returned " + "a corrupted list!\n"); + return 1; + } else if (cmp_result > 0) { + printk(KERN_EMERG "list_sort() failed to sort!\n"); + return 1; + } else if (cmp_result == 0 && + el->serial >= container_of(cur->next, + struct debug_el, l_h)->serial) { + printk(KERN_EMERG "list_sort() failed to preserve order" + " of equivalent elements!\n"); + return 1; + } + kfree(cur->prev); + count++; + } + kfree(cur); + if (count != LIST_SORT_TEST_LENGTH) { + printk(KERN_EMERG "list_sort() returned list of" + "different length!\n"); + return 1; + } + return 0; +} +module_init(list_sort_test); +#endif diff --git a/lib/lmb.c b/lib/lmb.c deleted file mode 100644 index 9cee17142b2c..000000000000 --- a/lib/lmb.c +++ /dev/null @@ -1,532 +0,0 @@ -/* - * Procedures for maintaining information about logical memory blocks. - * - * Peter Bergner, IBM Corp. June 2001. - * Copyright (C) 2001 Peter Bergner. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/bitops.h> -#include <linux/lmb.h> - -#define LMB_ALLOC_ANYWHERE 0 - -struct lmb lmb; - -static int lmb_debug; - -static int __init early_lmb(char *p) -{ - if (p && strstr(p, "debug")) - lmb_debug = 1; - return 0; -} -early_param("lmb", early_lmb); - -static void lmb_dump(struct lmb_region *region, char *name) -{ - unsigned long long base, size; - int i; - - pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); - - for (i = 0; i < region->cnt; i++) { - base = region->region[i].base; - size = region->region[i].size; - - pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", - name, i, base, base + size - 1, size); - } -} - -void lmb_dump_all(void) -{ - if (!lmb_debug) - return; - - pr_info("LMB configuration:\n"); - pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size); - pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size); - - lmb_dump(&lmb.memory, "memory"); - lmb_dump(&lmb.reserved, "reserved"); -} - -static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, - u64 size2) -{ - return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); -} - -static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) -{ - if (base2 == base1 + size1) - return 1; - else if (base1 == base2 + size2) - return -1; - - return 0; -} - -static long lmb_regions_adjacent(struct lmb_region *rgn, - unsigned long r1, unsigned long r2) -{ - u64 base1 = rgn->region[r1].base; - u64 size1 = rgn->region[r1].size; - u64 base2 = rgn->region[r2].base; - u64 size2 = rgn->region[r2].size; - - return lmb_addrs_adjacent(base1, size1, base2, size2); -} - -static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) -{ - unsigned long i; - - for (i = r; i < rgn->cnt - 1; i++) { - rgn->region[i].base = rgn->region[i + 1].base; - rgn->region[i].size = rgn->region[i + 1].size; - } - rgn->cnt--; -} - -/* Assumption: base addr of region 1 < base addr of region 2 */ -static void lmb_coalesce_regions(struct lmb_region *rgn, - unsigned long r1, unsigned long r2) -{ - rgn->region[r1].size += rgn->region[r2].size; - lmb_remove_region(rgn, r2); -} - -void __init lmb_init(void) -{ - /* Create a dummy zero size LMB which will get coalesced away later. - * This simplifies the lmb_add() code below... - */ - lmb.memory.region[0].base = 0; - lmb.memory.region[0].size = 0; - lmb.memory.cnt = 1; - - /* Ditto. */ - lmb.reserved.region[0].base = 0; - lmb.reserved.region[0].size = 0; - lmb.reserved.cnt = 1; -} - -void __init lmb_analyze(void) -{ - int i; - - lmb.memory.size = 0; - - for (i = 0; i < lmb.memory.cnt; i++) - lmb.memory.size += lmb.memory.region[i].size; -} - -static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) -{ - unsigned long coalesced = 0; - long adjacent, i; - - if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { - rgn->region[0].base = base; - rgn->region[0].size = size; - return 0; - } - - /* First try and coalesce this LMB with another. */ - for (i = 0; i < rgn->cnt; i++) { - u64 rgnbase = rgn->region[i].base; - u64 rgnsize = rgn->region[i].size; - - if ((rgnbase == base) && (rgnsize == size)) - /* Already have this region, so we're done */ - return 0; - - adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); - if (adjacent > 0) { - rgn->region[i].base -= size; - rgn->region[i].size += size; - coalesced++; - break; - } else if (adjacent < 0) { - rgn->region[i].size += size; - coalesced++; - break; - } - } - - if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) { - lmb_coalesce_regions(rgn, i, i+1); - coalesced++; - } - - if (coalesced) - return coalesced; - if (rgn->cnt >= MAX_LMB_REGIONS) - return -1; - - /* Couldn't coalesce the LMB, so add it to the sorted table. */ - for (i = rgn->cnt - 1; i >= 0; i--) { - if (base < rgn->region[i].base) { - rgn->region[i+1].base = rgn->region[i].base; - rgn->region[i+1].size = rgn->region[i].size; - } else { - rgn->region[i+1].base = base; - rgn->region[i+1].size = size; - break; - } - } - - if (base < rgn->region[0].base) { - rgn->region[0].base = base; - rgn->region[0].size = size; - } - rgn->cnt++; - - return 0; -} - -long lmb_add(u64 base, u64 size) -{ - struct lmb_region *_rgn = &lmb.memory; - - /* On pSeries LPAR systems, the first LMB is our RMO region. */ - if (base == 0) - lmb.rmo_size = size; - - return lmb_add_region(_rgn, base, size); - -} - -long lmb_remove(u64 base, u64 size) -{ - struct lmb_region *rgn = &(lmb.memory); - u64 rgnbegin, rgnend; - u64 end = base + size; - int i; - - rgnbegin = rgnend = 0; /* supress gcc warnings */ - - /* Find the region where (base, size) belongs to */ - for (i=0; i < rgn->cnt; i++) { - rgnbegin = rgn->region[i].base; - rgnend = rgnbegin + rgn->region[i].size; - - if ((rgnbegin <= base) && (end <= rgnend)) - break; - } - - /* Didn't find the region */ - if (i == rgn->cnt) - return -1; - - /* Check to see if we are removing entire region */ - if ((rgnbegin == base) && (rgnend == end)) { - lmb_remove_region(rgn, i); - return 0; - } - - /* Check to see if region is matching at the front */ - if (rgnbegin == base) { - rgn->region[i].base = end; - rgn->region[i].size -= size; - return 0; - } - - /* Check to see if the region is matching at the end */ - if (rgnend == end) { - rgn->region[i].size -= size; - return 0; - } - - /* - * We need to split the entry - adjust the current one to the - * beginging of the hole and add the region after hole. - */ - rgn->region[i].size = base - rgn->region[i].base; - return lmb_add_region(rgn, end, rgnend - end); -} - -long __init lmb_reserve(u64 base, u64 size) -{ - struct lmb_region *_rgn = &lmb.reserved; - - BUG_ON(0 == size); - - return lmb_add_region(_rgn, base, size); -} - -long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) -{ - unsigned long i; - - for (i = 0; i < rgn->cnt; i++) { - u64 rgnbase = rgn->region[i].base; - u64 rgnsize = rgn->region[i].size; - if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) - break; - } - - return (i < rgn->cnt) ? i : -1; -} - -static u64 lmb_align_down(u64 addr, u64 size) -{ - return addr & ~(size - 1); -} - -static u64 lmb_align_up(u64 addr, u64 size) -{ - return (addr + (size - 1)) & ~(size - 1); -} - -static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, - u64 size, u64 align) -{ - u64 base, res_base; - long j; - - base = lmb_align_down((end - size), align); - while (start <= base) { - j = lmb_overlaps_region(&lmb.reserved, base, size); - if (j < 0) { - /* this area isn't reserved, take it */ - if (lmb_add_region(&lmb.reserved, base, size) < 0) - base = ~(u64)0; - return base; - } - res_base = lmb.reserved.region[j].base; - if (res_base < size) - break; - base = lmb_align_down(res_base - size, align); - } - - return ~(u64)0; -} - -static u64 __init lmb_alloc_nid_region(struct lmb_property *mp, - u64 (*nid_range)(u64, u64, int *), - u64 size, u64 align, int nid) -{ - u64 start, end; - - start = mp->base; - end = start + mp->size; - - start = lmb_align_up(start, align); - while (start < end) { - u64 this_end; - int this_nid; - - this_end = nid_range(start, end, &this_nid); - if (this_nid == nid) { - u64 ret = lmb_alloc_nid_unreserved(start, this_end, - size, align); - if (ret != ~(u64)0) - return ret; - } - start = this_end; - } - - return ~(u64)0; -} - -u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, - u64 (*nid_range)(u64 start, u64 end, int *nid)) -{ - struct lmb_region *mem = &lmb.memory; - int i; - - BUG_ON(0 == size); - - size = lmb_align_up(size, align); - - for (i = 0; i < mem->cnt; i++) { - u64 ret = lmb_alloc_nid_region(&mem->region[i], - nid_range, - size, align, nid); - if (ret != ~(u64)0) - return ret; - } - - return lmb_alloc(size, align); -} - -u64 __init lmb_alloc(u64 size, u64 align) -{ - return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); -} - -u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) -{ - u64 alloc; - - alloc = __lmb_alloc_base(size, align, max_addr); - - if (alloc == 0) - panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", - (unsigned long long) size, (unsigned long long) max_addr); - - return alloc; -} - -u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) -{ - long i, j; - u64 base = 0; - u64 res_base; - - BUG_ON(0 == size); - - size = lmb_align_up(size, align); - - /* On some platforms, make sure we allocate lowmem */ - /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ - if (max_addr == LMB_ALLOC_ANYWHERE) - max_addr = LMB_REAL_LIMIT; - - for (i = lmb.memory.cnt - 1; i >= 0; i--) { - u64 lmbbase = lmb.memory.region[i].base; - u64 lmbsize = lmb.memory.region[i].size; - - if (lmbsize < size) - continue; - if (max_addr == LMB_ALLOC_ANYWHERE) - base = lmb_align_down(lmbbase + lmbsize - size, align); - else if (lmbbase < max_addr) { - base = min(lmbbase + lmbsize, max_addr); - base = lmb_align_down(base - size, align); - } else - continue; - - while (base && lmbbase <= base) { - j = lmb_overlaps_region(&lmb.reserved, base, size); - if (j < 0) { - /* this area isn't reserved, take it */ - if (lmb_add_region(&lmb.reserved, base, size) < 0) - return 0; - return base; - } - res_base = lmb.reserved.region[j].base; - if (res_base < size) - break; - base = lmb_align_down(res_base - size, align); - } - } - return 0; -} - -/* You must call lmb_analyze() before this. */ -u64 __init lmb_phys_mem_size(void) -{ - return lmb.memory.size; -} - -u64 lmb_end_of_DRAM(void) -{ - int idx = lmb.memory.cnt - 1; - - return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); -} - -/* You must call lmb_analyze() after this. */ -void __init lmb_enforce_memory_limit(u64 memory_limit) -{ - unsigned long i; - u64 limit; - struct lmb_property *p; - - if (!memory_limit) - return; - - /* Truncate the lmb regions to satisfy the memory limit. */ - limit = memory_limit; - for (i = 0; i < lmb.memory.cnt; i++) { - if (limit > lmb.memory.region[i].size) { - limit -= lmb.memory.region[i].size; - continue; - } - - lmb.memory.region[i].size = limit; - lmb.memory.cnt = i + 1; - break; - } - - if (lmb.memory.region[0].size < lmb.rmo_size) - lmb.rmo_size = lmb.memory.region[0].size; - - memory_limit = lmb_end_of_DRAM(); - - /* And truncate any reserves above the limit also. */ - for (i = 0; i < lmb.reserved.cnt; i++) { - p = &lmb.reserved.region[i]; - - if (p->base > memory_limit) - p->size = 0; - else if ((p->base + p->size) > memory_limit) - p->size = memory_limit - p->base; - - if (p->size == 0) { - lmb_remove_region(&lmb.reserved, i); - i--; - } - } -} - -int __init lmb_is_reserved(u64 addr) -{ - int i; - - for (i = 0; i < lmb.reserved.cnt; i++) { - u64 upper = lmb.reserved.region[i].base + - lmb.reserved.region[i].size - 1; - if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) - return 1; - } - return 0; -} - -int lmb_is_region_reserved(u64 base, u64 size) -{ - return lmb_overlaps_region(&lmb.reserved, base, size); -} - -/* - * Given a <base, len>, find which memory regions belong to this range. - * Adjust the request and return a contiguous chunk. - */ -int lmb_find(struct lmb_property *res) -{ - int i; - u64 rstart, rend; - - rstart = res->base; - rend = rstart + res->size - 1; - - for (i = 0; i < lmb.memory.cnt; i++) { - u64 start = lmb.memory.region[i].base; - u64 end = start + lmb.memory.region[i].size - 1; - - if (start > rend) - return -1; - - if ((end >= rstart) && (start < rend)) { - /* adjust the request */ - if (rstart < start) - rstart = start; - if (rend > end) - rend = end; - res->base = rstart; - res->size = rend - rstart + 1; - return 0; - } - } - return -1; -} diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 92cdd9936e3d..05da38bcc298 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -28,7 +28,6 @@ #include <linux/slab.h> #include <linux/notifier.h> #include <linux/cpu.h> -#include <linux/gfp.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/rcupdate.h> @@ -364,7 +363,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, unsigned int height, shift; struct radix_tree_node *node, **slot; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (node == NULL) return NULL; @@ -384,7 +383,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, do { slot = (struct radix_tree_node **) (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); - node = rcu_dereference(*slot); + node = rcu_dereference_raw(*slot); if (node == NULL) return NULL; @@ -556,6 +555,10 @@ EXPORT_SYMBOL(radix_tree_tag_clear); * * 0: tag not present or not set * 1: tag set + * + * Note that the return value of this function may not be relied on, even if + * the RCU lock is held, unless tag modification and node deletion are excluded + * from concurrency. */ int radix_tree_tag_get(struct radix_tree_root *root, unsigned long index, unsigned int tag) @@ -568,7 +571,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, if (!root_tag_get(root, tag)) return 0; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (node == NULL) return 0; @@ -596,13 +599,9 @@ int radix_tree_tag_get(struct radix_tree_root *root, */ if (!tag_get(node, tag, offset)) saw_unset_tag = 1; - if (height == 1) { - int ret = tag_get(node, tag, offset); - - BUG_ON(ret && saw_unset_tag); - return !!ret; - } - node = rcu_dereference(node->slots[offset]); + if (height == 1) + return !!tag_get(node, tag, offset); + node = rcu_dereference_raw(node->slots[offset]); shift -= RADIX_TREE_MAP_SHIFT; height--; } @@ -657,7 +656,7 @@ EXPORT_SYMBOL(radix_tree_next_hole); * * Returns: the index of the hole if found, otherwise returns an index * outside of the set specified (in which case 'index - return >= max_scan' - * will be true). In rare cases of wrap-around, LONG_MAX will be returned. + * will be true). In rare cases of wrap-around, ULONG_MAX will be returned. * * radix_tree_next_hole may be called under rcu_read_lock. However, like * radix_tree_gang_lookup, this will not atomically search a snapshot of @@ -675,7 +674,7 @@ unsigned long radix_tree_prev_hole(struct radix_tree_root *root, if (!radix_tree_lookup(root, index)) break; index--; - if (index == LONG_MAX) + if (index == ULONG_MAX) break; } @@ -711,7 +710,7 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, } shift -= RADIX_TREE_MAP_SHIFT; - slot = rcu_dereference(slot->slots[i]); + slot = rcu_dereference_raw(slot->slots[i]); if (slot == NULL) goto out; } @@ -758,7 +757,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned long cur_index = first_index; unsigned int ret; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (!node) return 0; @@ -787,7 +786,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, slot = *(((void ***)results)[ret + i]); if (!slot) continue; - results[ret + nr_found] = rcu_dereference(slot); + results[ret + nr_found] = rcu_dereference_raw(slot); nr_found++; } ret += nr_found; @@ -826,7 +825,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, unsigned long cur_index = first_index; unsigned int ret; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (!node) return 0; @@ -915,7 +914,7 @@ __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, } } shift -= RADIX_TREE_MAP_SHIFT; - slot = rcu_dereference(slot->slots[i]); + slot = rcu_dereference_raw(slot->slots[i]); if (slot == NULL) break; } @@ -951,7 +950,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, if (!root_tag_get(root, tag)) return 0; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (!node) return 0; @@ -980,7 +979,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, slot = *(((void ***)results)[ret + i]); if (!slot) continue; - results[ret + nr_found] = rcu_dereference(slot); + results[ret + nr_found] = rcu_dereference_raw(slot); nr_found++; } ret += nr_found; @@ -1020,7 +1019,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, if (!root_tag_get(root, tag)) return 0; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (!node) return 0; diff --git a/lib/random32.c b/lib/random32.c index 217d5c4b666d..870dc3fc0f0f 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -39,13 +39,16 @@ #include <linux/jiffies.h> #include <linux/random.h> -struct rnd_state { - u32 s1, s2, s3; -}; - static DEFINE_PER_CPU(struct rnd_state, net_rand_state); -static u32 __random32(struct rnd_state *state) +/** + * prandom32 - seeded pseudo-random number generator. + * @state: pointer to state structure holding seeded state. + * + * This is used for pseudo-randomness with no outside seeding. + * For more random results, use random32(). + */ +u32 prandom32(struct rnd_state *state) { #define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b) @@ -55,14 +58,7 @@ static u32 __random32(struct rnd_state *state) return (state->s1 ^ state->s2 ^ state->s3); } - -/* - * Handle minimum values for seeds - */ -static inline u32 __seed(u32 x, u32 m) -{ - return (x < m) ? x + m : x; -} +EXPORT_SYMBOL(prandom32); /** * random32 - pseudo random number generator @@ -75,7 +71,7 @@ u32 random32(void) { unsigned long r; struct rnd_state *state = &get_cpu_var(net_rand_state); - r = __random32(state); + r = prandom32(state); put_cpu_var(state); return r; } @@ -118,12 +114,12 @@ static int __init random32_init(void) state->s3 = __seed(LCG(state->s2), 15); /* "warm it up" */ - __random32(state); - __random32(state); - __random32(state); - __random32(state); - __random32(state); - __random32(state); + prandom32(state); + prandom32(state); + prandom32(state); + prandom32(state); + prandom32(state); + prandom32(state); } return 0; } @@ -147,7 +143,7 @@ static int __init random32_reseed(void) state->s3 = __seed(seeds[2], 15); /* mix it in */ - __random32(state); + prandom32(state); } return 0; } diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 09f5ce1810dc..027a03f4c56d 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c @@ -16,9 +16,14 @@ /* * __ratelimit - rate limiting * @rs: ratelimit_state data + * @func: name of calling function * - * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks - * in every @rs->ratelimit_jiffies + * This enforces a rate limit: not more than @rs->burst callbacks + * in every @rs->interval + * + * RETURNS: + * 0 means callbacks will be suppressed. + * 1 means go ahead and do it. */ int ___ratelimit(struct ratelimit_state *rs, const char *func) { @@ -35,7 +40,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) * the entity that is holding the lock already: */ if (!spin_trylock_irqsave(&rs->lock, flags)) - return 1; + return 0; if (!rs->begin) rs->begin = jiffies; diff --git a/lib/rbtree.c b/lib/rbtree.c index e2aa3be29858..4693f79195d3 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -283,6 +283,74 @@ void rb_erase(struct rb_node *node, struct rb_root *root) } EXPORT_SYMBOL(rb_erase); +static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data) +{ + struct rb_node *parent; + +up: + func(node, data); + parent = rb_parent(node); + if (!parent) + return; + + if (node == parent->rb_left && parent->rb_right) + func(parent->rb_right, data); + else if (parent->rb_left) + func(parent->rb_left, data); + + node = parent; + goto up; +} + +/* + * after inserting @node into the tree, update the tree to account for + * both the new entry and any damage done by rebalance + */ +void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data) +{ + if (node->rb_left) + node = node->rb_left; + else if (node->rb_right) + node = node->rb_right; + + rb_augment_path(node, func, data); +} + +/* + * before removing the node, find the deepest node on the rebalance path + * that will still be there after @node gets removed + */ +struct rb_node *rb_augment_erase_begin(struct rb_node *node) +{ + struct rb_node *deepest; + + if (!node->rb_right && !node->rb_left) + deepest = rb_parent(node); + else if (!node->rb_right) + deepest = node->rb_left; + else if (!node->rb_left) + deepest = node->rb_right; + else { + deepest = rb_next(node); + if (deepest->rb_right) + deepest = deepest->rb_right; + else if (rb_parent(deepest) != node) + deepest = rb_parent(deepest); + } + + return deepest; +} + +/* + * after removal, update the tree to account for the removed entry + * and any rebalance damage. + */ +void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data) +{ + if (node) + rb_augment_path(node, func, data); +} + /* * This function returns the first node (in sort order) of the tree. */ diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index ccf95bff7984..ffc9fc7f3b05 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -143,13 +143,14 @@ void __sched __down_read(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; + unsigned long flags; - spin_lock_irq(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; - spin_unlock_irq(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); goto out; } @@ -164,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem) list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ - spin_unlock_irq(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); /* wait to be given the lock */ for (;;) { @@ -209,13 +210,14 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) { struct rwsem_waiter waiter; struct task_struct *tsk; + unsigned long flags; - spin_lock_irq(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; - spin_unlock_irq(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); goto out; } @@ -230,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ - spin_unlock_irq(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); /* wait to be given the lock */ for (;;) { diff --git a/lib/rwsem.c b/lib/rwsem.c index 3e3365e5665e..ceba8e28807a 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c @@ -136,9 +136,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) out: return sem; - /* undo the change to count, but check for a transition 1->0 */ + /* undo the change to the active count, but check for a transition + * 1->0 */ undo: - if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0) + if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK) goto out; goto try_again; } diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 0d475d8167bf..9afa25b52a83 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -7,6 +7,7 @@ * Version 2. See the file COPYING for more details. */ #include <linux/module.h> +#include <linux/slab.h> #include <linux/scatterlist.h> #include <linux/highmem.h> diff --git a/lib/show_mem.c b/lib/show_mem.c index 238e72a18ce1..fdc77c82f922 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c @@ -15,7 +15,7 @@ void show_mem(void) unsigned long total = 0, reserved = 0, shared = 0, nonshared = 0, highmem = 0; - printk(KERN_INFO "Mem-Info:\n"); + printk("Mem-Info:\n"); show_free_areas(); for_each_online_pgdat(pgdat) { @@ -49,15 +49,15 @@ void show_mem(void) pgdat_resize_unlock(pgdat, &flags); } - printk(KERN_INFO "%lu pages RAM\n", total); + printk("%lu pages RAM\n", total); #ifdef CONFIG_HIGHMEM - printk(KERN_INFO "%lu pages HighMem\n", highmem); + printk("%lu pages HighMem\n", highmem); #endif - printk(KERN_INFO "%lu pages reserved\n", reserved); - printk(KERN_INFO "%lu pages shared\n", shared); - printk(KERN_INFO "%lu pages non-shared\n", nonshared); + printk("%lu pages reserved\n", reserved); + printk("%lu pages shared\n", shared); + printk("%lu pages non-shared\n", nonshared); #ifdef CONFIG_QUICKLIST - printk(KERN_INFO "%lu pages in pagetable cache\n", + printk("%lu pages in pagetable cache\n", quicklist_total_size()); #endif } diff --git a/lib/string.c b/lib/string.c index a1cdcfcc42d0..f71bead1be3e 100644 --- a/lib/string.c +++ b/lib/string.c @@ -36,25 +36,21 @@ int strnicmp(const char *s1, const char *s2, size_t len) /* Yes, Virginia, it had better be unsigned */ unsigned char c1, c2; - c1 = c2 = 0; - if (len) { - do { - c1 = *s1; - c2 = *s2; - s1++; - s2++; - if (!c1) - break; - if (!c2) - break; - if (c1 == c2) - continue; - c1 = tolower(c1); - c2 = tolower(c2); - if (c1 != c2) - break; - } while (--len); - } + if (!len) + return 0; + + do { + c1 = *s1++; + c2 = *s2++; + if (!c1 || !c2) + break; + if (c1 == c2) + continue; + c1 = tolower(c1); + c2 = tolower(c2); + if (c1 != c2) + break; + } while (--len); return (int)c1 - (int)c2; } EXPORT_SYMBOL(strnicmp); @@ -693,13 +689,13 @@ EXPORT_SYMBOL(strstr); */ char *strnstr(const char *s1, const char *s2, size_t len) { - size_t l1 = len, l2; + size_t l2; l2 = strlen(s2); if (!l2) return (char *)s1; - while (l1 >= l2) { - l1--; + while (len >= l2) { + len--; if (!memcmp(s1, s2, l2)) return (char *)s1; s1++; diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 437eedb5a53b..a009055140ec 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -28,6 +28,7 @@ #include <linux/types.h> #include <linux/ctype.h> #include <linux/highmem.h> +#include <linux/gfp.h> #include <asm/io.h> #include <asm/dma.h> @@ -756,37 +757,6 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, EXPORT_SYMBOL(swiotlb_sync_single_for_device); /* - * Same as above, but for a sub-range of the mapping. - */ -static void -swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, - int dir, int target) -{ - swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target); -} - -void -swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, - SYNC_FOR_CPU); -} -EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); - -void -swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, - SYNC_FOR_DEVICE); -} -EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); - -/* * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the above swiotlb_map_page * interface. Here the scatter gather list elements are each tagged with the diff --git a/lib/textsearch.c b/lib/textsearch.c index 9fbcb44c554f..d608331b3e47 100644 --- a/lib/textsearch.c +++ b/lib/textsearch.c @@ -103,6 +103,7 @@ #include <linux/rcupdate.h> #include <linux/err.h> #include <linux/textsearch.h> +#include <linux/slab.h> static LIST_HEAD(ts_ops); static DEFINE_SPINLOCK(ts_mod_lock); diff --git a/lib/uuid.c b/lib/uuid.c new file mode 100644 index 000000000000..8fadd7cef46c --- /dev/null +++ b/lib/uuid.c @@ -0,0 +1,53 @@ +/* + * Unified UUID/GUID definition + * + * Copyright (C) 2009, Intel Corp. + * Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/uuid.h> +#include <linux/random.h> + +static void __uuid_gen_common(__u8 b[16]) +{ + int i; + u32 r; + + for (i = 0; i < 4; i++) { + r = random32(); + memcpy(b + i * 4, &r, 4); + } + /* reversion 0b10 */ + b[8] = (b[8] & 0x3F) | 0x80; +} + +void uuid_le_gen(uuid_le *lu) +{ + __uuid_gen_common(lu->b); + /* version 4 : random generation */ + lu->b[7] = (lu->b[7] & 0x0F) | 0x40; +} +EXPORT_SYMBOL_GPL(uuid_le_gen); + +void uuid_be_gen(uuid_be *bu) +{ + __uuid_gen_common(bu->b); + /* version 4 : random generation */ + bu->b[6] = (bu->b[6] & 0x0F) | 0x40; +} +EXPORT_SYMBOL_GPL(uuid_be_gen); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 3b8aeec4e327..b8a2f549ab0e 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -118,6 +118,7 @@ long long simple_strtoll(const char *cp, char **endp, unsigned int base) return simple_strtoull(cp, endp, base); } +EXPORT_SYMBOL(simple_strtoll); /** * strict_strtoul - convert a string to an unsigned long strictly @@ -266,7 +267,8 @@ int strict_strtoll(const char *cp, unsigned int base, long long *res) } EXPORT_SYMBOL(strict_strtoll); -static int skip_atoi(const char **s) +static noinline_for_stack +int skip_atoi(const char **s) { int i = 0; @@ -286,7 +288,8 @@ static int skip_atoi(const char **s) /* Formats correctly any integer in [0,99999]. * Outputs from one to five digits depending on input. * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ -static char *put_dec_trunc(char *buf, unsigned q) +static noinline_for_stack +char *put_dec_trunc(char *buf, unsigned q) { unsigned d3, d2, d1, d0; d1 = (q>>4) & 0xf; @@ -323,7 +326,8 @@ static char *put_dec_trunc(char *buf, unsigned q) return buf; } /* Same with if's removed. Always emits five digits */ -static char *put_dec_full(char *buf, unsigned q) +static noinline_for_stack +char *put_dec_full(char *buf, unsigned q) { /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ /* but anyway, gcc produces better code with full-sized ints */ @@ -365,7 +369,8 @@ static char *put_dec_full(char *buf, unsigned q) return buf; } /* No inlining helps gcc to use registers better */ -static noinline char *put_dec(char *buf, unsigned long long num) +static noinline_for_stack +char *put_dec(char *buf, unsigned long long num) { while (1) { unsigned rem; @@ -381,8 +386,8 @@ static noinline char *put_dec(char *buf, unsigned long long num) #define PLUS 4 /* show plus */ #define SPACE 8 /* space if plus */ #define LEFT 16 /* left justified */ -#define SMALL 32 /* Must be 32 == 0x20 */ -#define SPECIAL 64 /* 0x */ +#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */ +#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */ enum format_type { FORMAT_TYPE_NONE, /* Just a string part */ @@ -408,16 +413,17 @@ enum format_type { }; struct printf_spec { - enum format_type type; - int flags; /* flags to number() */ - int field_width; /* width of output field */ - int base; - int precision; /* # of digits/chars */ - int qualifier; + u8 type; /* format_type enum */ + u8 flags; /* flags to number() */ + u8 base; /* number base, 8, 10 or 16 only */ + u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */ + s16 field_width; /* width of output field */ + s16 precision; /* # of digits/chars */ }; -static char *number(char *buf, char *end, unsigned long long num, - struct printf_spec spec) +static noinline_for_stack +char *number(char *buf, char *end, unsigned long long num, + struct printf_spec spec) { /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ @@ -536,7 +542,8 @@ static char *number(char *buf, char *end, unsigned long long num, return buf; } -static char *string(char *buf, char *end, const char *s, struct printf_spec spec) +static noinline_for_stack +char *string(char *buf, char *end, const char *s, struct printf_spec spec) { int len, i; @@ -566,8 +573,9 @@ static char *string(char *buf, char *end, const char *s, struct printf_spec spec return buf; } -static char *symbol_string(char *buf, char *end, void *ptr, - struct printf_spec spec, char ext) +static noinline_for_stack +char *symbol_string(char *buf, char *end, void *ptr, + struct printf_spec spec, char ext) { unsigned long value = (unsigned long) ptr; #ifdef CONFIG_KALLSYMS @@ -587,8 +595,9 @@ static char *symbol_string(char *buf, char *end, void *ptr, #endif } -static char *resource_string(char *buf, char *end, struct resource *res, - struct printf_spec spec, const char *fmt) +static noinline_for_stack +char *resource_string(char *buf, char *end, struct resource *res, + struct printf_spec spec, const char *fmt) { #ifndef IO_RSRC_PRINTK_SIZE #define IO_RSRC_PRINTK_SIZE 6 @@ -597,22 +606,35 @@ static char *resource_string(char *buf, char *end, struct resource *res, #ifndef MEM_RSRC_PRINTK_SIZE #define MEM_RSRC_PRINTK_SIZE 10 #endif - struct printf_spec hex_spec = { + static const struct printf_spec io_spec = { .base = 16, + .field_width = IO_RSRC_PRINTK_SIZE, .precision = -1, .flags = SPECIAL | SMALL | ZEROPAD, }; - struct printf_spec dec_spec = { + static const struct printf_spec mem_spec = { + .base = 16, + .field_width = MEM_RSRC_PRINTK_SIZE, + .precision = -1, + .flags = SPECIAL | SMALL | ZEROPAD, + }; + static const struct printf_spec bus_spec = { + .base = 16, + .field_width = 2, + .precision = -1, + .flags = SMALL | ZEROPAD, + }; + static const struct printf_spec dec_spec = { .base = 10, .precision = -1, .flags = 0, }; - struct printf_spec str_spec = { + static const struct printf_spec str_spec = { .field_width = -1, .precision = 10, .flags = LEFT, }; - struct printf_spec flag_spec = { + static const struct printf_spec flag_spec = { .base = 16, .precision = -1, .flags = SPECIAL | SMALL, @@ -622,47 +644,48 @@ static char *resource_string(char *buf, char *end, struct resource *res, * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */ #define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4) #define FLAG_BUF_SIZE (2 * sizeof(res->flags)) -#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref disabled]") +#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]") #define RAW_BUF_SIZE sizeof("[mem - flags 0x]") char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE, 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)]; char *p = sym, *pend = sym + sizeof(sym); - int size = -1, addr = 0; int decode = (fmt[0] == 'R') ? 1 : 0; - - if (res->flags & IORESOURCE_IO) { - size = IO_RSRC_PRINTK_SIZE; - addr = 1; - } else if (res->flags & IORESOURCE_MEM) { - size = MEM_RSRC_PRINTK_SIZE; - addr = 1; - } + const struct printf_spec *specp; *p++ = '['; - if (res->flags & IORESOURCE_IO) + if (res->flags & IORESOURCE_IO) { p = string(p, pend, "io ", str_spec); - else if (res->flags & IORESOURCE_MEM) + specp = &io_spec; + } else if (res->flags & IORESOURCE_MEM) { p = string(p, pend, "mem ", str_spec); - else if (res->flags & IORESOURCE_IRQ) + specp = &mem_spec; + } else if (res->flags & IORESOURCE_IRQ) { p = string(p, pend, "irq ", str_spec); - else if (res->flags & IORESOURCE_DMA) + specp = &dec_spec; + } else if (res->flags & IORESOURCE_DMA) { p = string(p, pend, "dma ", str_spec); - else { + specp = &dec_spec; + } else if (res->flags & IORESOURCE_BUS) { + p = string(p, pend, "bus ", str_spec); + specp = &bus_spec; + } else { p = string(p, pend, "??? ", str_spec); + specp = &mem_spec; decode = 0; } - hex_spec.field_width = size; - p = number(p, pend, res->start, addr ? hex_spec : dec_spec); + p = number(p, pend, res->start, *specp); if (res->start != res->end) { *p++ = '-'; - p = number(p, pend, res->end, addr ? hex_spec : dec_spec); + p = number(p, pend, res->end, *specp); } if (decode) { if (res->flags & IORESOURCE_MEM_64) p = string(p, pend, " 64bit", str_spec); if (res->flags & IORESOURCE_PREFETCH) p = string(p, pend, " pref", str_spec); + if (res->flags & IORESOURCE_WINDOW) + p = string(p, pend, " window", str_spec); if (res->flags & IORESOURCE_DISABLED) p = string(p, pend, " disabled", str_spec); } else { @@ -675,30 +698,63 @@ static char *resource_string(char *buf, char *end, struct resource *res, return string(buf, end, sym, spec); } -static char *mac_address_string(char *buf, char *end, u8 *addr, - struct printf_spec spec, const char *fmt) +static noinline_for_stack +char *mac_address_string(char *buf, char *end, u8 *addr, + struct printf_spec spec, const char *fmt) { char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; char *p = mac_addr; int i; + char separator; + + if (fmt[1] == 'F') { /* FDDI canonical format */ + separator = '-'; + } else { + separator = ':'; + } for (i = 0; i < 6; i++) { p = pack_hex_byte(p, addr[i]); if (fmt[0] == 'M' && i != 5) - *p++ = ':'; + *p++ = separator; } *p = '\0'; return string(buf, end, mac_addr, spec); } -static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) +static noinline_for_stack +char *ip4_string(char *p, const u8 *addr, const char *fmt) { int i; - + bool leading_zeros = (fmt[0] == 'i'); + int index; + int step; + + switch (fmt[2]) { + case 'h': +#ifdef __BIG_ENDIAN + index = 0; + step = 1; +#else + index = 3; + step = -1; +#endif + break; + case 'l': + index = 3; + step = -1; + break; + case 'n': + case 'b': + default: + index = 0; + step = 1; + break; + } for (i = 0; i < 4; i++) { char temp[3]; /* hold each IP quad in reverse order */ - int digits = put_dec_trunc(temp, addr[i]) - temp; + int digits = put_dec_trunc(temp, addr[index]) - temp; if (leading_zeros) { if (digits < 3) *p++ = '0'; @@ -710,13 +766,15 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) *p++ = temp[digits]; if (i < 3) *p++ = '.'; + index += step; } *p = '\0'; return p; } -static char *ip6_compressed_string(char *p, const char *addr) +static noinline_for_stack +char *ip6_compressed_string(char *p, const char *addr) { int i, j, range; unsigned char zerolength[8]; @@ -789,14 +847,15 @@ static char *ip6_compressed_string(char *p, const char *addr) if (useIPv4) { if (needcolon) *p++ = ':'; - p = ip4_string(p, &in6.s6_addr[12], false); + p = ip4_string(p, &in6.s6_addr[12], "I4"); } *p = '\0'; return p; } -static char *ip6_string(char *p, const char *addr, const char *fmt) +static noinline_for_stack +char *ip6_string(char *p, const char *addr, const char *fmt) { int i; @@ -811,8 +870,9 @@ static char *ip6_string(char *p, const char *addr, const char *fmt) return p; } -static char *ip6_addr_string(char *buf, char *end, const u8 *addr, - struct printf_spec spec, const char *fmt) +static noinline_for_stack +char *ip6_addr_string(char *buf, char *end, const u8 *addr, + struct printf_spec spec, const char *fmt) { char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")]; @@ -824,18 +884,20 @@ static char *ip6_addr_string(char *buf, char *end, const u8 *addr, return string(buf, end, ip6_addr, spec); } -static char *ip4_addr_string(char *buf, char *end, const u8 *addr, - struct printf_spec spec, const char *fmt) +static noinline_for_stack +char *ip4_addr_string(char *buf, char *end, const u8 *addr, + struct printf_spec spec, const char *fmt) { char ip4_addr[sizeof("255.255.255.255")]; - ip4_string(ip4_addr, addr, fmt[0] == 'i'); + ip4_string(ip4_addr, addr, fmt); return string(buf, end, ip4_addr, spec); } -static char *uuid_string(char *buf, char *end, const u8 *addr, - struct printf_spec spec, const char *fmt) +static noinline_for_stack +char *uuid_string(char *buf, char *end, const u8 *addr, + struct printf_spec spec, const char *fmt) { char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")]; char *p = uuid; @@ -896,12 +958,15 @@ static char *uuid_string(char *buf, char *end, const u8 *addr, * - 'M' For a 6-byte MAC address, it prints the address in the * usual colon-separated hex notation * - 'm' For a 6-byte MAC address, it prints the hex address without colons + * - 'MF' For a 6-byte MAC FDDI address, it prints the address + * with a dash-separated hex notation * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) * IPv6 uses colon separated network-order 16 bit hex with leading 0's * - 'i' [46] for 'raw' IPv4/IPv6 addresses * IPv6 omits the colons (01020304...0f) * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) + * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order * - 'I6c' for IPv6 addresses printed as specified by * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form @@ -920,8 +985,9 @@ static char *uuid_string(char *buf, char *end, const u8 *addr, * function pointers are really function descriptors, which contain a * pointer to the real address. */ -static char *pointer(const char *fmt, char *buf, char *end, void *ptr, - struct printf_spec spec) +static noinline_for_stack +char *pointer(const char *fmt, char *buf, char *end, void *ptr, + struct printf_spec spec) { if (!ptr) return string(buf, end, "(null)", spec); @@ -939,6 +1005,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, return resource_string(buf, end, ptr, spec, fmt); case 'M': /* Colon separated: 00:01:02:03:04:05 */ case 'm': /* Contiguous: 000102030405 */ + /* [mM]F (FDDI, bit reversed) */ return mac_address_string(buf, end, ptr, spec, fmt); case 'I': /* Formatted IP supported * 4: 1.2.3.4 @@ -989,7 +1056,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, * @precision: precision of a number * @qualifier: qualifier of a number (long, size_t, ...) */ -static int format_decode(const char *fmt, struct printf_spec *spec) +static noinline_for_stack +int format_decode(const char *fmt, struct printf_spec *spec) { const char *start = fmt; @@ -1297,7 +1365,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) break; case FORMAT_TYPE_NRCHARS: { - int qualifier = spec.qualifier; + u8 qualifier = spec.qualifier; if (qualifier == 'l') { long *ip = va_arg(args, long *); @@ -1583,7 +1651,7 @@ do { \ case FORMAT_TYPE_NRCHARS: { /* skip %n 's argument */ - int qualifier = spec.qualifier; + u8 qualifier = spec.qualifier; void *skip_arg; if (qualifier == 'l') skip_arg = va_arg(args, long *); @@ -1849,7 +1917,9 @@ int vsscanf(const char *buf, const char *fmt, va_list args) char *next; char digit; int num = 0; - int qualifier, base, field_width; + u8 qualifier; + u8 base; + s16 field_width; bool is_sign; while (*fmt && *str) { @@ -1927,7 +1997,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args) { char *s = (char *)va_arg(args, char *); if (field_width == -1) - field_width = INT_MAX; + field_width = SHRT_MAX; /* first, skip leading white space in buffer */ str = skip_spaces(str); diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c index 215447c55261..2c13ecc5bb2c 100644 --- a/lib/zlib_inflate/inffast.c +++ b/lib/zlib_inflate/inffast.c @@ -8,21 +8,6 @@ #include "inflate.h" #include "inffast.h" -/* Only do the unaligned "Faster" variant when - * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is set - * - * On powerpc, it won't be as we don't include autoconf.h - * automatically for the boot wrapper, which is intended as - * we run in an environment where we may not be able to deal - * with (even rare) alignment faults. In addition, we do not - * define __KERNEL__ for arch/powerpc/boot unlike x86 - */ - -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -#include <asm/unaligned.h> -#include <asm/byteorder.h> -#endif - #ifndef ASMINF /* Allow machine dependent optimization for post-increment or pre-increment. @@ -36,14 +21,31 @@ - Pentium III (Anderson) - M68060 (Nikl) */ +union uu { + unsigned short us; + unsigned char b[2]; +}; + +/* Endian independed version */ +static inline unsigned short +get_unaligned16(const unsigned short *p) +{ + union uu mm; + unsigned char *b = (unsigned char *)p; + + mm.b[0] = b[0]; + mm.b[1] = b[1]; + return mm.us; +} + #ifdef POSTINC # define OFF 0 # define PUP(a) *(a)++ -# define UP_UNALIGNED(a) get_unaligned((a)++) +# define UP_UNALIGNED(a) get_unaligned16((a)++) #else # define OFF 1 # define PUP(a) *++(a) -# define UP_UNALIGNED(a) get_unaligned(++(a)) +# define UP_UNALIGNED(a) get_unaligned16(++(a)) #endif /* @@ -256,7 +258,6 @@ void inflate_fast(z_streamp strm, unsigned start) } } else { -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS unsigned short *sout; unsigned long loops; @@ -274,22 +275,25 @@ void inflate_fast(z_streamp strm, unsigned start) sfrom = (unsigned short *)(from - OFF); loops = len >> 1; do +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + PUP(sout) = PUP(sfrom); +#else PUP(sout) = UP_UNALIGNED(sfrom); +#endif while (--loops); out = (unsigned char *)sout + OFF; from = (unsigned char *)sfrom + OFF; } else { /* dist == 1 or dist == 2 */ unsigned short pat16; - pat16 = *(sout-2+2*OFF); - if (dist == 1) -#if defined(__BIG_ENDIAN) - pat16 = (pat16 & 0xff) | ((pat16 & 0xff) << 8); -#elif defined(__LITTLE_ENDIAN) - pat16 = (pat16 & 0xff00) | ((pat16 & 0xff00) >> 8); -#else -#error __BIG_ENDIAN nor __LITTLE_ENDIAN is defined -#endif + pat16 = *(sout-1+OFF); + if (dist == 1) { + union uu mm; + /* copy one char pattern to both bytes */ + mm.us = pat16; + mm.b[0] = mm.b[1]; + pat16 = mm.us; + } loops = len >> 1; do PUP(sout) = pat16; @@ -298,20 +302,6 @@ void inflate_fast(z_streamp strm, unsigned start) } if (len & 1) PUP(out) = PUP(from); -#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ - from = out - dist; /* copy direct from output */ - do { /* minimum length is three */ - PUP(out) = PUP(from); - PUP(out) = PUP(from); - PUP(out) = PUP(from); - len -= 3; - } while (len > 2); - if (len) { - PUP(out) = PUP(from); - if (len > 1) - PUP(out) = PUP(from); - } -#endif /* !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ } } else if ((op & 64) == 0) { /* 2nd level distance code */ |