diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 22 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/audit.c | 2 | ||||
-rw-r--r-- | lib/hweight.c | 10 | ||||
-rw-r--r-- | lib/ioremap.c | 92 | ||||
-rw-r--r-- | lib/klist.c | 26 | ||||
-rw-r--r-- | lib/kobject.c | 9 | ||||
-rw-r--r-- | lib/list_debug.c | 76 | ||||
-rw-r--r-- | lib/rbtree.c | 6 | ||||
-rw-r--r-- | lib/rwsem.c | 2 | ||||
-rw-r--r-- | lib/spinlock_debug.c | 15 | ||||
-rw-r--r-- | lib/ts_fsm.c | 10 |
12 files changed, 243 insertions, 29 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 554ee688a9f8..f9ae75cc0145 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -8,6 +8,13 @@ config PRINTK_TIME operations. This is useful for identifying long delays in kernel startup. +config ENABLE_MUST_CHECK + bool "Enable __must_check logic" + default y + help + Enable the __must_check logic in the kernel build. Disable this to + suppress the "warning: ignoring return value of 'foo', declared with + attribute warn_unused_result" messages. config MAGIC_SYSRQ bool "Magic SysRq key" @@ -218,7 +225,7 @@ config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select STACKTRACE - select FRAME_POINTER + select FRAME_POINTER if !X86 select KALLSYMS select KALLSYMS_ALL @@ -277,7 +284,7 @@ config DEBUG_HIGHMEM config DEBUG_BUGVERBOSE bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED depends on BUG - depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV + depends on ARM || ARM26 || AVR32 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV || SUPERH default !EMBEDDED help Say Y here to make BUG() panics output the file name and line number @@ -313,9 +320,18 @@ config DEBUG_VM If unsure, say N. +config DEBUG_LIST + bool "Debug linked list manipulation" + depends on DEBUG_KERNEL + help + Enable this to turn on extended checks in the linked-list + walking routines. + + If unsure, say N. + config FRAME_POINTER bool "Compile the kernel with frame pointers" - depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390) + depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || AVR32 || SUPERH) default y if DEBUG_INFO && UML help If you say Y here the resulting kernel image will be slightly larger diff --git a/lib/Makefile b/lib/Makefile index ef1d37afbbb6..ddf3e676e1f4 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -7,6 +7,7 @@ lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \ idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ sha1.o +lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o lib-y += kobject.o kref.o kobject_uevent.o klist.o @@ -28,6 +29,7 @@ lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_PLIST) += plist.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o +obj-$(CONFIG_DEBUG_LIST) += list_debug.o ifneq ($(CONFIG_HAVE_DEC_LOCK),y) lib-y += dec_and_lock.o diff --git a/lib/audit.c b/lib/audit.c index 8c21625ef938..3b1289fadf06 100644 --- a/lib/audit.c +++ b/lib/audit.c @@ -28,8 +28,10 @@ int audit_classify_syscall(int abi, unsigned syscall) switch(syscall) { case __NR_open: return 2; +#ifdef __NR_openat case __NR_openat: return 3; +#endif #ifdef __NR_socketcall case __NR_socketcall: return 4; diff --git a/lib/hweight.c b/lib/hweight.c index 438257671708..360556a7803d 100644 --- a/lib/hweight.c +++ b/lib/hweight.c @@ -1,5 +1,6 @@ #include <linux/module.h> #include <asm/types.h> +#include <asm/bitops.h> /** * hweightN - returns the hamming weight of a N-bit word @@ -40,14 +41,19 @@ unsigned long hweight64(__u64 w) #if BITS_PER_LONG == 32 return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); #elif BITS_PER_LONG == 64 +#ifdef ARCH_HAS_FAST_MULTIPLIER + w -= (w >> 1) & 0x5555555555555555ul; + w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul); + w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful; + return (w * 0x0101010101010101ul) >> 56; +#else __u64 res = w - ((w >> 1) & 0x5555555555555555ul); res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; res = res + (res >> 8); res = res + (res >> 16); return (res + (res >> 32)) & 0x00000000000000FFul; -#else -#error BITS_PER_LONG not defined +#endif #endif } EXPORT_SYMBOL(hweight64); diff --git a/lib/ioremap.c b/lib/ioremap.c new file mode 100644 index 000000000000..99fa277f9f7b --- /dev/null +++ b/lib/ioremap.c @@ -0,0 +1,92 @@ +/* + * Re-map IO memory to kernel address space so that we can access it. + * This is needed for high PCI addresses that aren't mapped in the + * 640k-1MB IO memory area on PC's + * + * (C) Copyright 1995 1996 Linus Torvalds + */ +#include <linux/io.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> + +#include <asm/cacheflush.h> +#include <asm/pgtable.h> + +static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, + unsigned long end, unsigned long phys_addr, pgprot_t prot) +{ + pte_t *pte; + unsigned long pfn; + + pfn = phys_addr >> PAGE_SHIFT; + pte = pte_alloc_kernel(pmd, addr); + if (!pte) + return -ENOMEM; + do { + BUG_ON(!pte_none(*pte)); + set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); + pfn++; + } while (pte++, addr += PAGE_SIZE, addr != end); + return 0; +} + +static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, + unsigned long end, unsigned long phys_addr, pgprot_t prot) +{ + pmd_t *pmd; + unsigned long next; + + phys_addr -= addr; + pmd = pmd_alloc(&init_mm, pud, addr); + if (!pmd) + return -ENOMEM; + do { + next = pmd_addr_end(addr, end); + if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot)) + return -ENOMEM; + } while (pmd++, addr = next, addr != end); + return 0; +} + +static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, + unsigned long end, unsigned long phys_addr, pgprot_t prot) +{ + pud_t *pud; + unsigned long next; + + phys_addr -= addr; + pud = pud_alloc(&init_mm, pgd, addr); + if (!pud) + return -ENOMEM; + do { + next = pud_addr_end(addr, end); + if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot)) + return -ENOMEM; + } while (pud++, addr = next, addr != end); + return 0; +} + +int ioremap_page_range(unsigned long addr, + unsigned long end, unsigned long phys_addr, pgprot_t prot) +{ + pgd_t *pgd; + unsigned long start; + unsigned long next; + int err; + + BUG_ON(addr >= end); + + start = addr; + phys_addr -= addr; + pgd = pgd_offset_k(addr); + do { + next = pgd_addr_end(addr, end); + err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot); + if (err) + break; + } while (pgd++, addr = next, addr != end); + + flush_cache_vmap(start, end); + + return err; +} diff --git a/lib/klist.c b/lib/klist.c index 9c94f0b163a1..120bd175aa78 100644 --- a/lib/klist.c +++ b/lib/klist.c @@ -123,12 +123,10 @@ EXPORT_SYMBOL_GPL(klist_add_tail); static void klist_release(struct kref * kref) { struct klist_node * n = container_of(kref, struct klist_node, n_ref); - void (*put)(struct klist_node *) = n->n_klist->put; + list_del(&n->n_node); complete(&n->n_removed); n->n_klist = NULL; - if (put) - put(n); } static int klist_dec_and_del(struct klist_node * n) @@ -145,10 +143,14 @@ static int klist_dec_and_del(struct klist_node * n) void klist_del(struct klist_node * n) { struct klist * k = n->n_klist; + void (*put)(struct klist_node *) = k->put; spin_lock(&k->k_lock); - klist_dec_and_del(n); + if (!klist_dec_and_del(n)) + put = NULL; spin_unlock(&k->k_lock); + if (put) + put(n); } EXPORT_SYMBOL_GPL(klist_del); @@ -161,10 +163,7 @@ EXPORT_SYMBOL_GPL(klist_del); void klist_remove(struct klist_node * n) { - struct klist * k = n->n_klist; - spin_lock(&k->k_lock); - klist_dec_and_del(n); - spin_unlock(&k->k_lock); + klist_del(n); wait_for_completion(&n->n_removed); } @@ -260,12 +259,15 @@ static struct klist_node * to_klist_node(struct list_head * n) struct klist_node * klist_next(struct klist_iter * i) { struct list_head * next; + struct klist_node * lnode = i->i_cur; struct klist_node * knode = NULL; + void (*put)(struct klist_node *) = i->i_klist->put; spin_lock(&i->i_klist->k_lock); - if (i->i_cur) { - next = i->i_cur->n_node.next; - klist_dec_and_del(i->i_cur); + if (lnode) { + next = lnode->n_node.next; + if (!klist_dec_and_del(lnode)) + put = NULL; } else next = i->i_head->next; @@ -275,6 +277,8 @@ struct klist_node * klist_next(struct klist_iter * i) } i->i_cur = knode; spin_unlock(&i->i_klist->k_lock); + if (put && lnode) + put(lnode); return knode; } diff --git a/lib/kobject.c b/lib/kobject.c index 8e7c71993487..1699eb9161f3 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -407,6 +407,7 @@ static struct kobj_type dir_ktype = { struct kobject *kobject_add_dir(struct kobject *parent, const char *name) { struct kobject *k; + int ret; if (!parent) return NULL; @@ -418,7 +419,13 @@ struct kobject *kobject_add_dir(struct kobject *parent, const char *name) k->parent = parent; k->ktype = &dir_ktype; kobject_set_name(k, name); - kobject_register(k); + ret = kobject_register(k); + if (ret < 0) { + printk(KERN_WARNING "kobject_add_dir: " + "kobject_register error: %d\n", ret); + kobject_del(k); + return NULL; + } return k; } diff --git a/lib/list_debug.c b/lib/list_debug.c new file mode 100644 index 000000000000..7ba9d823d388 --- /dev/null +++ b/lib/list_debug.c @@ -0,0 +1,76 @@ +/* + * Copyright 2006, Red Hat, Inc., Dave Jones + * Released under the General Public License (GPL). + * + * This file contains the linked list implementations for + * DEBUG_LIST. + */ + +#include <linux/module.h> +#include <linux/list.h> + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ + +void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + if (unlikely(next->prev != prev)) { + printk(KERN_ERR "list_add corruption. next->prev should be %p, but was %p\n", + prev, next->prev); + BUG(); + } + if (unlikely(prev->next != next)) { + printk(KERN_ERR "list_add corruption. prev->next should be %p, but was %p\n", + next, prev->next); + BUG(); + } + next->prev = new; + new->next = next; + new->prev = prev; + prev->next = new; +} +EXPORT_SYMBOL(__list_add); + +/** + * list_add - add a new entry + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + */ +void list_add(struct list_head *new, struct list_head *head) +{ + __list_add(new, head, head->next); +} +EXPORT_SYMBOL(list_add); + +/** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty on entry does not return true after this, the entry is + * in an undefined state. + */ +void list_del(struct list_head *entry) +{ + if (unlikely(entry->prev->next != entry)) { + printk(KERN_ERR "list_del corruption. prev->next should be %p, " + "but was %p\n", entry, entry->prev->next); + BUG(); + } + if (unlikely(entry->next->prev != entry)) { + printk(KERN_ERR "list_del corruption. next->prev should be %p, " + "but was %p\n", entry, entry->next->prev); + BUG(); + } + __list_del(entry->prev, entry->next); + entry->next = LIST_POISON1; + entry->prev = LIST_POISON2; +} +EXPORT_SYMBOL(list_del); diff --git a/lib/rbtree.c b/lib/rbtree.c index 1e55ba1c2edf..48499c2d88cc 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -322,6 +322,9 @@ struct rb_node *rb_next(struct rb_node *node) { struct rb_node *parent; + if (rb_parent(node) == node) + return NULL; + /* If we have a right-hand child, go down and then left as far as we can. */ if (node->rb_right) { @@ -348,6 +351,9 @@ struct rb_node *rb_prev(struct rb_node *node) { struct rb_node *parent; + if (rb_parent(node) == node) + return NULL; + /* If we have a left-hand child, go down and then right as far as we can. */ if (node->rb_left) { diff --git a/lib/rwsem.c b/lib/rwsem.c index b322421c2969..901d0e7da892 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c @@ -146,7 +146,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) /* * wait for a lock to be granted */ -static inline struct rw_semaphore * +static struct rw_semaphore * rwsem_down_failed_common(struct rw_semaphore *sem, struct rwsem_waiter *waiter, signed long adjustment) { diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 58c577dd82e5..dafaf1de2491 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -99,11 +99,12 @@ static inline void debug_spin_unlock(spinlock_t *lock) static void __spin_lock_debug(spinlock_t *lock) { - int print_once = 1; u64 i; + u64 loops = loops_per_jiffy * HZ; + int print_once = 1; for (;;) { - for (i = 0; i < loops_per_jiffy * HZ; i++) { + for (i = 0; i < loops; i++) { if (__raw_spin_trylock(&lock->raw_lock)) return; __delay(1); @@ -165,11 +166,12 @@ static void rwlock_bug(rwlock_t *lock, const char *msg) #if 0 /* __write_lock_debug() can lock up - maybe this can too? */ static void __read_lock_debug(rwlock_t *lock) { - int print_once = 1; u64 i; + u64 loops = loops_per_jiffy * HZ; + int print_once = 1; for (;;) { - for (i = 0; i < loops_per_jiffy * HZ; i++) { + for (i = 0; i < loops; i++) { if (__raw_read_trylock(&lock->raw_lock)) return; __delay(1); @@ -239,11 +241,12 @@ static inline void debug_write_unlock(rwlock_t *lock) #if 0 /* This can cause lockups */ static void __write_lock_debug(rwlock_t *lock) { - int print_once = 1; u64 i; + u64 loops = loops_per_jiffy * HZ; + int print_once = 1; for (;;) { - for (i = 0; i < loops_per_jiffy * HZ; i++) { + for (i = 0; i < loops; i++) { if (__raw_write_trylock(&lock->raw_lock)) return; __delay(1); diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c index 87847c2ae9e2..af575b61526b 100644 --- a/lib/ts_fsm.c +++ b/lib/ts_fsm.c @@ -12,13 +12,13 @@ * * A finite state machine consists of n states (struct ts_fsm_token) * representing the pattern as a finite automation. The data is read - * sequentially on a octet basis. Every state token specifies the number + * sequentially on an octet basis. Every state token specifies the number * of recurrences and the type of value accepted which can be either a * specific character or ctype based set of characters. The available * type of recurrences include 1, (0|1), [0 n], and [1 n]. * - * The algorithm differs between strict/non-strict mode specyfing - * whether the pattern has to start at the first octect. Strict mode + * The algorithm differs between strict/non-strict mode specifying + * whether the pattern has to start at the first octet. Strict mode * is enabled by default and can be disabled by inserting * TS_FSM_HEAD_IGNORE as the first token in the chain. * @@ -44,7 +44,7 @@ struct ts_fsm #define _W 0x200 /* wildcard */ /* Map to _ctype flags and some magic numbers */ -static u16 token_map[TS_FSM_TYPE_MAX+1] = { +static const u16 token_map[TS_FSM_TYPE_MAX+1] = { [TS_FSM_SPECIFIC] = 0, [TS_FSM_WILDCARD] = _W, [TS_FSM_CNTRL] = _C, @@ -61,7 +61,7 @@ static u16 token_map[TS_FSM_TYPE_MAX+1] = { [TS_FSM_ASCII] = _A, }; -static u16 token_lookup_tbl[256] = { +static const u16 token_lookup_tbl[256] = { _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */ _W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */ |