diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 2 | ||||
-rw-r--r-- | lib/Kconfig.debug | 124 | ||||
-rw-r--r-- | lib/Kconfig.kgdb | 3 | ||||
-rw-r--r-- | lib/bug.c | 4 | ||||
-rw-r--r-- | lib/cpu_rmap.c | 54 | ||||
-rw-r--r-- | lib/devres.c | 58 | ||||
-rw-r--r-- | lib/digsig.c | 43 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 165 | ||||
-rw-r--r-- | lib/hexdump.c | 4 | ||||
-rw-r--r-- | lib/locking-selftest.c | 34 | ||||
-rw-r--r-- | lib/mpi/mpi-internal.h | 4 | ||||
-rw-r--r-- | lib/mpi/mpicoder.c | 8 | ||||
-rw-r--r-- | lib/parser.c | 6 | ||||
-rw-r--r-- | lib/rbtree.c | 20 | ||||
-rw-r--r-- | lib/rwsem-spinlock.c | 69 | ||||
-rw-r--r-- | lib/rwsem.c | 75 | ||||
-rw-r--r-- | lib/swiotlb.c | 47 | ||||
-rw-r--r-- | lib/vsprintf.c | 7 | ||||
-rw-r--r-- | lib/xz/Kconfig | 34 |
19 files changed, 443 insertions, 318 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 75cdb77fa49d..3958dc4389f9 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -322,7 +322,7 @@ config CPUMASK_OFFSTACK config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS - depends on EXPERIMENTAL && BROKEN + depends on BROKEN config CPU_RMAP bool diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 3a353091a903..e4a7f808fa06 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -134,7 +134,7 @@ config DEBUG_SECTION_MISMATCH any use of code/data previously in these sections would most likely result in an oops. In the code, functions and variables are annotated with - __init, __devinit, etc. (see the full list in include/linux/init.h), + __init, __cpuinit, etc. (see the full list in include/linux/init.h), which results in the code/data being placed in specific sections. The section mismatch analysis is always performed after a full kernel build, and enabling this option causes the following @@ -243,8 +243,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE default 1 if BOOTPARAM_SOFTLOCKUP_PANIC config PANIC_ON_OOPS - bool "Panic on Oops" if EXPERT - default n + bool "Panic on Oops" help Say Y here to enable the kernel to panic when it oopses. This has the same effect as setting oops=panic on the kernel command @@ -455,7 +454,7 @@ config HAVE_DEBUG_KMEMLEAK config DEBUG_KMEMLEAK bool "Kernel memory leak detector" - depends on DEBUG_KERNEL && EXPERIMENTAL && HAVE_DEBUG_KMEMLEAK + depends on DEBUG_KERNEL && HAVE_DEBUG_KMEMLEAK select DEBUG_FS select STACKTRACE if STACKTRACE_SUPPORT select KALLSYMS @@ -605,61 +604,6 @@ config PROVE_LOCKING For more details, see Documentation/lockdep-design.txt. -config PROVE_RCU - bool "RCU debugging: prove RCU correctness" - depends on PROVE_LOCKING - default n - help - This feature enables lockdep extensions that check for correct - use of RCU APIs. This is currently under development. Say Y - if you want to debug RCU usage or help work on the PROVE_RCU - feature. - - Say N if you are unsure. - -config PROVE_RCU_REPEATEDLY - bool "RCU debugging: don't disable PROVE_RCU on first splat" - depends on PROVE_RCU - default n - help - By itself, PROVE_RCU will disable checking upon issuing the - first warning (or "splat"). This feature prevents such - disabling, allowing multiple RCU-lockdep warnings to be printed - on a single reboot. - - Say Y to allow multiple RCU-lockdep warnings per boot. - - Say N if you are unsure. - -config PROVE_RCU_DELAY - bool "RCU debugging: preemptible RCU race provocation" - depends on DEBUG_KERNEL && PREEMPT_RCU - default n - help - There is a class of races that involve an unlikely preemption - of __rcu_read_unlock() just after ->rcu_read_lock_nesting has - been set to INT_MIN. This feature inserts a delay at that - point to increase the probability of these races. - - Say Y to increase probability of preemption of __rcu_read_unlock(). - - Say N if you are unsure. - -config SPARSE_RCU_POINTER - bool "RCU debugging: sparse-based checks for pointer usage" - default n - help - This feature enables the __rcu sparse annotation for - RCU-protected pointers. This annotation will cause sparse - to flag any non-RCU used of annotated pointers. This can be - helpful when debugging RCU usage. Please note that this feature - is not intended to enforce code cleanliness; it is instead merely - a debugging aid. - - Say Y to make sparse flag questionable use of RCU-protected pointers - - Say N if you are unsure. - config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT @@ -937,6 +881,63 @@ config BOOT_PRINTK_DELAY BOOT_PRINTK_DELAY also may cause LOCKUP_DETECTOR to detect what it believes to be lockup conditions. +menu "RCU Debugging" + +config PROVE_RCU + bool "RCU debugging: prove RCU correctness" + depends on PROVE_LOCKING + default n + help + This feature enables lockdep extensions that check for correct + use of RCU APIs. This is currently under development. Say Y + if you want to debug RCU usage or help work on the PROVE_RCU + feature. + + Say N if you are unsure. + +config PROVE_RCU_REPEATEDLY + bool "RCU debugging: don't disable PROVE_RCU on first splat" + depends on PROVE_RCU + default n + help + By itself, PROVE_RCU will disable checking upon issuing the + first warning (or "splat"). This feature prevents such + disabling, allowing multiple RCU-lockdep warnings to be printed + on a single reboot. + + Say Y to allow multiple RCU-lockdep warnings per boot. + + Say N if you are unsure. + +config PROVE_RCU_DELAY + bool "RCU debugging: preemptible RCU race provocation" + depends on DEBUG_KERNEL && PREEMPT_RCU + default n + help + There is a class of races that involve an unlikely preemption + of __rcu_read_unlock() just after ->rcu_read_lock_nesting has + been set to INT_MIN. This feature inserts a delay at that + point to increase the probability of these races. + + Say Y to increase probability of preemption of __rcu_read_unlock(). + + Say N if you are unsure. + +config SPARSE_RCU_POINTER + bool "RCU debugging: sparse-based checks for pointer usage" + default n + help + This feature enables the __rcu sparse annotation for + RCU-protected pointers. This annotation will cause sparse + to flag any non-RCU used of annotated pointers. This can be + helpful when debugging RCU usage. Please note that this feature + is not intended to enforce code cleanliness; it is instead merely + a debugging aid. + + Say Y to make sparse flag questionable use of RCU-protected pointers + + Say N if you are unsure. + config RCU_TORTURE_TEST tristate "torture tests for RCU" depends on DEBUG_KERNEL @@ -970,7 +971,7 @@ config RCU_TORTURE_TEST_RUNNABLE config RCU_CPU_STALL_TIMEOUT int "RCU CPU stall timeout in seconds" - depends on TREE_RCU || TREE_PREEMPT_RCU + depends on RCU_STALL_COMMON range 3 300 default 21 help @@ -1008,6 +1009,7 @@ config RCU_CPU_STALL_INFO config RCU_TRACE bool "Enable tracing for RCU" depends on DEBUG_KERNEL + select TRACE_CLOCK help This option provides tracing in RCU which presents stats in debugfs for debugging RCU implementation. @@ -1015,6 +1017,8 @@ config RCU_TRACE Say Y here if you want to enable RCU tracing Say N if you are unsure. +endmenu # "RCU Debugging" + config KPROBES_SANITY_TEST bool "Kprobes sanity tests" depends on DEBUG_KERNEL diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 43cb93fa2651..dbb58ae1b8e0 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -5,7 +5,7 @@ config HAVE_ARCH_KGDB menuconfig KGDB bool "KGDB: kernel debugger" depends on HAVE_ARCH_KGDB - depends on DEBUG_KERNEL && EXPERIMENTAL + depends on DEBUG_KERNEL help If you say Y here, it will be possible to remotely debug the kernel using gdb. It is recommended but not required, that @@ -22,6 +22,7 @@ config KGDB_SERIAL_CONSOLE tristate "KGDB: use kgdb over the serial console" select CONSOLE_POLL select MAGIC_SYSRQ + depends on TTY default y help Share a serial console with kgdb. Sysrq-g must be used diff --git a/lib/bug.c b/lib/bug.c index a28c1415357c..168603477f02 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -55,6 +55,7 @@ static inline unsigned long bug_addr(const struct bug_entry *bug) } #ifdef CONFIG_MODULES +/* Updates are protected by module mutex */ static LIST_HEAD(module_bug_list); static const struct bug_entry *module_find_bug(unsigned long bugaddr) @@ -165,7 +166,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) print_modules(); show_regs(regs); print_oops_end_marker(); - add_taint(BUG_GET_TAINT(bug)); + /* Just a warning, don't kill lockdep. */ + add_taint(BUG_GET_TAINT(bug), LOCKDEP_STILL_OK); return BUG_TRAP_TYPE_WARN; } diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index 145dec5267c9..5fbed5caba6e 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -45,6 +45,7 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags) if (!rmap) return NULL; + kref_init(&rmap->refcount); rmap->obj = (void **)((char *)rmap + obj_offset); /* Initially assign CPUs to objects on a rota, since we have @@ -63,6 +64,35 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags) } EXPORT_SYMBOL(alloc_cpu_rmap); +/** + * cpu_rmap_release - internal reclaiming helper called from kref_put + * @ref: kref to struct cpu_rmap + */ +static void cpu_rmap_release(struct kref *ref) +{ + struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount); + kfree(rmap); +} + +/** + * cpu_rmap_get - internal helper to get new ref on a cpu_rmap + * @rmap: reverse-map allocated with alloc_cpu_rmap() + */ +static inline void cpu_rmap_get(struct cpu_rmap *rmap) +{ + kref_get(&rmap->refcount); +} + +/** + * cpu_rmap_put - release ref on a cpu_rmap + * @rmap: reverse-map allocated with alloc_cpu_rmap() + */ +int cpu_rmap_put(struct cpu_rmap *rmap) +{ + return kref_put(&rmap->refcount, cpu_rmap_release); +} +EXPORT_SYMBOL(cpu_rmap_put); + /* Reevaluate nearest object for given CPU, comparing with the given * neighbours at the given distance. */ @@ -197,8 +227,7 @@ struct irq_glue { * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL * - * Must be called in process context, before freeing the IRQs, and - * without holding any locks required by global workqueue items. + * Must be called in process context, before freeing the IRQs. */ void free_irq_cpu_rmap(struct cpu_rmap *rmap) { @@ -212,12 +241,18 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap) glue = rmap->obj[index]; irq_set_affinity_notifier(glue->notify.irq, NULL); } - irq_run_affinity_notifiers(); - kfree(rmap); + cpu_rmap_put(rmap); } EXPORT_SYMBOL(free_irq_cpu_rmap); +/** + * irq_cpu_rmap_notify - callback for IRQ subsystem when IRQ affinity updated + * @notify: struct irq_affinity_notify passed by irq/manage.c + * @mask: cpu mask for new SMP affinity + * + * This is executed in workqueue context. + */ static void irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) { @@ -230,10 +265,16 @@ irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc); } +/** + * irq_cpu_rmap_release - reclaiming callback for IRQ subsystem + * @ref: kref to struct irq_affinity_notify passed by irq/manage.c + */ static void irq_cpu_rmap_release(struct kref *ref) { struct irq_glue *glue = container_of(ref, struct irq_glue, notify.kref); + + cpu_rmap_put(glue->rmap); kfree(glue); } @@ -258,10 +299,13 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) glue->notify.notify = irq_cpu_rmap_notify; glue->notify.release = irq_cpu_rmap_release; glue->rmap = rmap; + cpu_rmap_get(rmap); glue->index = cpu_rmap_add(rmap, glue); rc = irq_set_affinity_notifier(irq, &glue->notify); - if (rc) + if (rc) { + cpu_rmap_put(glue->rmap); kfree(glue); + } return rc; } EXPORT_SYMBOL(irq_cpu_rmap_add); diff --git a/lib/devres.c b/lib/devres.c index 80b9c76d436a..88ad75952a76 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -1,3 +1,4 @@ +#include <linux/err.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/gfp.h> @@ -86,22 +87,24 @@ void devm_iounmap(struct device *dev, void __iomem *addr) EXPORT_SYMBOL(devm_iounmap); /** - * devm_request_and_ioremap() - Check, request region, and ioremap resource - * @dev: Generic device to handle the resource for + * devm_ioremap_resource() - check, request region, and ioremap resource + * @dev: generic device to handle the resource for * @res: resource to be handled * - * Takes all necessary steps to ioremap a mem resource. Uses managed device, so - * everything is undone on driver detach. Checks arguments, so you can feed - * it the result from e.g. platform_get_resource() directly. Returns the - * remapped pointer or NULL on error. Usage example: + * Checks that a resource is a valid memory region, requests the memory region + * and ioremaps it either as cacheable or as non-cacheable memory depending on + * the resource's flags. All operations are managed and will be undone on + * driver detach. + * + * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code + * on failure. Usage example: * * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - * base = devm_request_and_ioremap(&pdev->dev, res); - * if (!base) - * return -EADDRNOTAVAIL; + * base = devm_ioremap_resource(&pdev->dev, res); + * if (IS_ERR(base)) + * return PTR_ERR(base); */ -void __iomem *devm_request_and_ioremap(struct device *dev, - struct resource *res) +void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) { resource_size_t size; const char *name; @@ -111,7 +114,7 @@ void __iomem *devm_request_and_ioremap(struct device *dev, if (!res || resource_type(res) != IORESOURCE_MEM) { dev_err(dev, "invalid resource\n"); - return NULL; + return ERR_PTR(-EINVAL); } size = resource_size(res); @@ -119,7 +122,7 @@ void __iomem *devm_request_and_ioremap(struct device *dev, if (!devm_request_mem_region(dev, res->start, size, name)) { dev_err(dev, "can't request region for resource %pR\n", res); - return NULL; + return ERR_PTR(-EBUSY); } if (res->flags & IORESOURCE_CACHEABLE) @@ -130,10 +133,39 @@ void __iomem *devm_request_and_ioremap(struct device *dev, if (!dest_ptr) { dev_err(dev, "ioremap failed for resource %pR\n", res); devm_release_mem_region(dev, res->start, size); + dest_ptr = ERR_PTR(-ENOMEM); } return dest_ptr; } +EXPORT_SYMBOL(devm_ioremap_resource); + +/** + * devm_request_and_ioremap() - Check, request region, and ioremap resource + * @dev: Generic device to handle the resource for + * @res: resource to be handled + * + * Takes all necessary steps to ioremap a mem resource. Uses managed device, so + * everything is undone on driver detach. Checks arguments, so you can feed + * it the result from e.g. platform_get_resource() directly. Returns the + * remapped pointer or NULL on error. Usage example: + * + * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + * base = devm_request_and_ioremap(&pdev->dev, res); + * if (!base) + * return -EADDRNOTAVAIL; + */ +void __iomem *devm_request_and_ioremap(struct device *device, + struct resource *res) +{ + void __iomem *dest_ptr; + + dest_ptr = devm_ioremap_resource(device, res); + if (IS_ERR(dest_ptr)) + return NULL; + + return dest_ptr; +} EXPORT_SYMBOL(devm_request_and_ioremap); #ifdef CONFIG_HAS_IOPORT diff --git a/lib/digsig.c b/lib/digsig.c index 8c0e62975c88..2f31e6a45f0a 100644 --- a/lib/digsig.c +++ b/lib/digsig.c @@ -30,11 +30,10 @@ static struct crypto_shash *shash; -static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg, - unsigned long msglen, - unsigned long modulus_bitlen, - unsigned char *out, - unsigned long *outlen) +static const char *pkcs_1_v1_5_decode_emsa(const unsigned char *msg, + unsigned long msglen, + unsigned long modulus_bitlen, + unsigned long *outlen) { unsigned long modulus_len, ps_len, i; @@ -42,11 +41,11 @@ static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg, /* test message size */ if ((msglen > modulus_len) || (modulus_len < 11)) - return -EINVAL; + return NULL; /* separate encoded message */ - if ((msg[0] != 0x00) || (msg[1] != (unsigned char)1)) - return -EINVAL; + if (msg[0] != 0x00 || msg[1] != 0x01) + return NULL; for (i = 2; i < modulus_len - 1; i++) if (msg[i] != 0xFF) @@ -56,19 +55,13 @@ static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg, if (msg[i] != 0) /* There was no octet with hexadecimal value 0x00 to separate ps from m. */ - return -EINVAL; + return NULL; ps_len = i - 2; - if (*outlen < (msglen - (2 + ps_len + 1))) { - *outlen = msglen - (2 + ps_len + 1); - return -EOVERFLOW; - } - *outlen = (msglen - (2 + ps_len + 1)); - memcpy(out, &msg[2 + ps_len + 1], *outlen); - return 0; + return msg + 2 + ps_len + 1; } /* @@ -83,7 +76,8 @@ static int digsig_verify_rsa(struct key *key, unsigned long mlen, mblen; unsigned nret, l; int head, i; - unsigned char *out1 = NULL, *out2 = NULL; + unsigned char *out1 = NULL; + const char *m; MPI in = NULL, res = NULL, pkey[2]; uint8_t *p, *datap, *endp; struct user_key_payload *ukp; @@ -120,7 +114,7 @@ static int digsig_verify_rsa(struct key *key, } mblen = mpi_get_nbits(pkey[0]); - mlen = (mblen + 7)/8; + mlen = DIV_ROUND_UP(mblen, 8); if (mlen == 0) goto err; @@ -129,10 +123,6 @@ static int digsig_verify_rsa(struct key *key, if (!out1) goto err; - out2 = kzalloc(mlen, GFP_KERNEL); - if (!out2) - goto err; - nret = siglen; in = mpi_read_from_buffer(sig, &nret); if (!in) @@ -162,18 +152,17 @@ static int digsig_verify_rsa(struct key *key, memset(out1, 0, head); memcpy(out1 + head, p, l); - err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); - if (err) - goto err; + kfree(p); + + m = pkcs_1_v1_5_decode_emsa(out1, len, mblen, &len); - if (len != hlen || memcmp(out2, h, hlen)) + if (!m || len != hlen || memcmp(m, h, hlen)) err = -EINVAL; err: mpi_free(in); mpi_free(res); kfree(out1); - kfree(out2); while (--i >= 0) mpi_free(pkey[i]); err1: diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 1db1fc660538..5276b99ca650 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -59,7 +59,7 @@ struct ddebug_iter { static DEFINE_MUTEX(ddebug_lock); static LIST_HEAD(ddebug_tables); -static int verbose = 0; +static int verbose; module_param(verbose, int, 0644); /* Return the path relative to source root */ @@ -100,24 +100,32 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf, return buf; } -#define vpr_info(fmt, ...) \ - if (verbose) do { pr_info(fmt, ##__VA_ARGS__); } while (0) - -#define vpr_info_dq(q, msg) \ +#define vpr_info(fmt, ...) \ do { \ - /* trim last char off format print */ \ - vpr_info("%s: func=\"%s\" file=\"%s\" " \ - "module=\"%s\" format=\"%.*s\" " \ - "lineno=%u-%u", \ - msg, \ - q->function ? q->function : "", \ - q->filename ? q->filename : "", \ - q->module ? q->module : "", \ - (int)(q->format ? strlen(q->format) - 1 : 0), \ - q->format ? q->format : "", \ - q->first_lineno, q->last_lineno); \ + if (verbose) \ + pr_info(fmt, ##__VA_ARGS__); \ } while (0) +static void vpr_info_dq(const struct ddebug_query *query, const char *msg) +{ + /* trim any trailing newlines */ + int fmtlen = 0; + + if (query->format) { + fmtlen = strlen(query->format); + while (fmtlen && query->format[fmtlen - 1] == '\n') + fmtlen--; + } + + vpr_info("%s: func=\"%s\" file=\"%s\" module=\"%s\" format=\"%.*s\" lineno=%u-%u\n", + msg, + query->function ? query->function : "", + query->filename ? query->filename : "", + query->module ? query->module : "", + fmtlen, query->format ? query->format : "", + query->first_lineno, query->last_lineno); +} + /* * Search the tables for _ddebug's which match the given `query' and * apply the `flags' and `mask' to them. Returns number of matching @@ -141,7 +149,7 @@ static int ddebug_change(const struct ddebug_query *query, if (query->module && strcmp(query->module, dt->mod_name)) continue; - for (i = 0 ; i < dt->num_ddebugs ; i++) { + for (i = 0; i < dt->num_ddebugs; i++) { struct _ddebug *dp = &dt->ddebugs[i]; /* match against the source filename */ @@ -176,10 +184,10 @@ static int ddebug_change(const struct ddebug_query *query, continue; dp->flags = newflags; vpr_info("changed %s:%d [%s]%s =%s\n", - trim_prefix(dp->filename), dp->lineno, - dt->mod_name, dp->function, - ddebug_describe_flags(dp, flagbuf, - sizeof(flagbuf))); + trim_prefix(dp->filename), dp->lineno, + dt->mod_name, dp->function, + ddebug_describe_flags(dp, flagbuf, + sizeof(flagbuf))); } } mutex_unlock(&ddebug_lock); @@ -213,19 +221,23 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) /* find `end' of word, whitespace separated or quoted */ if (*buf == '"' || *buf == '\'') { int quote = *buf++; - for (end = buf ; *end && *end != quote ; end++) + for (end = buf; *end && *end != quote; end++) ; - if (!*end) + if (!*end) { + pr_err("unclosed quote: %s\n", buf); return -EINVAL; /* unclosed quote */ + } } else { - for (end = buf ; *end && !isspace(*end) ; end++) + for (end = buf; *end && !isspace(*end); end++) ; BUG_ON(end == buf); } /* `buf' is start of word, `end' is one past its end */ - if (nwords == maxwords) + if (nwords == maxwords) { + pr_err("too many words, legal max <=%d\n", maxwords); return -EINVAL; /* ran out of words[] before bytes */ + } if (*end) *end++ = '\0'; /* terminate the word */ words[nwords++] = buf; @@ -235,7 +247,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) if (verbose) { int i; pr_info("split into words:"); - for (i = 0 ; i < nwords ; i++) + for (i = 0; i < nwords; i++) pr_cont(" \"%s\"", words[i]); pr_cont("\n"); } @@ -257,7 +269,11 @@ static inline int parse_lineno(const char *str, unsigned int *val) return 0; } *val = simple_strtoul(str, &end, 10); - return end == NULL || end == str || *end != '\0' ? -EINVAL : 0; + if (end == NULL || end == str || *end != '\0') { + pr_err("bad line-number: %s\n", str); + return -EINVAL; + } + return 0; } /* @@ -286,11 +302,11 @@ static char *unescape(char *str) in += 2; continue; } else if (isodigit(in[1]) && - isodigit(in[2]) && - isodigit(in[3])) { - *out++ = ((in[1] - '0')<<6) | - ((in[2] - '0')<<3) | - (in[3] - '0'); + isodigit(in[2]) && + isodigit(in[3])) { + *out++ = (((in[1] - '0') << 6) | + ((in[2] - '0') << 3) | + (in[3] - '0')); in += 4; continue; } @@ -308,8 +324,8 @@ static int check_set(const char **dest, char *src, char *name) if (*dest) { rc = -EINVAL; - pr_err("match-spec:%s val:%s overridden by %s", - name, *dest, src); + pr_err("match-spec:%s val:%s overridden by %s\n", + name, *dest, src); } *dest = src; return rc; @@ -337,40 +353,46 @@ static int ddebug_parse_query(char *words[], int nwords, int rc; /* check we have an even number of words */ - if (nwords % 2 != 0) + if (nwords % 2 != 0) { + pr_err("expecting pairs of match-spec <value>\n"); return -EINVAL; + } memset(query, 0, sizeof(*query)); if (modname) /* support $modname.dyndbg=<multiple queries> */ query->module = modname; - for (i = 0 ; i < nwords ; i += 2) { - if (!strcmp(words[i], "func")) + for (i = 0; i < nwords; i += 2) { + if (!strcmp(words[i], "func")) { rc = check_set(&query->function, words[i+1], "func"); - else if (!strcmp(words[i], "file")) + } else if (!strcmp(words[i], "file")) { rc = check_set(&query->filename, words[i+1], "file"); - else if (!strcmp(words[i], "module")) + } else if (!strcmp(words[i], "module")) { rc = check_set(&query->module, words[i+1], "module"); - else if (!strcmp(words[i], "format")) + } else if (!strcmp(words[i], "format")) { rc = check_set(&query->format, unescape(words[i+1]), - "format"); - else if (!strcmp(words[i], "line")) { + "format"); + } else if (!strcmp(words[i], "line")) { char *first = words[i+1]; char *last = strchr(first, '-'); if (query->first_lineno || query->last_lineno) { - pr_err("match-spec:line given 2 times\n"); + pr_err("match-spec: line used 2x\n"); return -EINVAL; } if (last) *last++ = '\0'; - if (parse_lineno(first, &query->first_lineno) < 0) + if (parse_lineno(first, &query->first_lineno) < 0) { + pr_err("line-number is <0\n"); return -EINVAL; + } if (last) { /* range <first>-<last> */ if (parse_lineno(last, &query->last_lineno) < query->first_lineno) { - pr_err("last-line < 1st-line\n"); + pr_err("last-line:%d < 1st-line:%d\n", + query->last_lineno, + query->first_lineno); return -EINVAL; } } else { @@ -406,19 +428,22 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp, op = *str++; break; default: + pr_err("bad flag-op %c, at start of %s\n", *str, str); return -EINVAL; } vpr_info("op='%c'\n", op); - for ( ; *str ; ++str) { + for (; *str ; ++str) { for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) { if (*str == opt_array[i].opt_char) { flags |= opt_array[i].flag; break; } } - if (i < 0) + if (i < 0) { + pr_err("unknown flag '%c' in \"%s\"\n", *str, str); return -EINVAL; + } } vpr_info("flags=0x%x\n", flags); @@ -450,16 +475,22 @@ static int ddebug_exec_query(char *query_string, const char *modname) char *words[MAXWORDS]; nwords = ddebug_tokenize(query_string, words, MAXWORDS); - if (nwords <= 0) + if (nwords <= 0) { + pr_err("tokenize failed\n"); return -EINVAL; - if (ddebug_parse_query(words, nwords-1, &query, modname)) + } + /* check flags 1st (last arg) so query is pairs of spec,val */ + if (ddebug_parse_flags(words[nwords-1], &flags, &mask)) { + pr_err("flags parse failed\n"); return -EINVAL; - if (ddebug_parse_flags(words[nwords-1], &flags, &mask)) + } + if (ddebug_parse_query(words, nwords-1, &query, modname)) { + pr_err("query parse failed\n"); return -EINVAL; - + } /* actually go and implement the change */ nfound = ddebug_change(&query, flags, mask); - vpr_info_dq((&query), (nfound) ? "applied" : "no-match"); + vpr_info_dq(&query, nfound ? "applied" : "no-match"); return nfound; } @@ -488,8 +519,9 @@ static int ddebug_exec_queries(char *query, const char *modname) if (rc < 0) { errs++; exitcode = rc; - } else + } else { nfound += rc; + } i++; } vpr_info("processed %d queries, with %d matches, %d errs\n", @@ -765,7 +797,7 @@ static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos) struct _ddebug *dp; vpr_info("called m=%p p=%p *pos=%lld\n", - m, p, (unsigned long long)*pos); + m, p, (unsigned long long)*pos); if (p == SEQ_START_TOKEN) dp = ddebug_iter_first(iter); @@ -791,14 +823,14 @@ static int ddebug_proc_show(struct seq_file *m, void *p) if (p == SEQ_START_TOKEN) { seq_puts(m, - "# filename:lineno [module]function flags format\n"); + "# filename:lineno [module]function flags format\n"); return 0; } seq_printf(m, "%s:%u [%s]%s =%s \"", - trim_prefix(dp->filename), dp->lineno, - iter->table->mod_name, dp->function, - ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf))); + trim_prefix(dp->filename), dp->lineno, + iter->table->mod_name, dp->function, + ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf))); seq_escape(m, dp->format, "\t\r\n\""); seq_puts(m, "\"\n"); @@ -845,7 +877,7 @@ static int ddebug_proc_open(struct inode *inode, struct file *file) kfree(iter); return err; } - ((struct seq_file *) file->private_data)->private = iter; + ((struct seq_file *)file->private_data)->private = iter; return 0; } @@ -1002,8 +1034,7 @@ static int __init dynamic_debug_init(void) int verbose_bytes = 0; if (__start___verbose == __stop___verbose) { - pr_warn("_ddebug table is empty in a " - "CONFIG_DYNAMIC_DEBUG build"); + pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n"); return 1; } iter = __start___verbose; @@ -1030,18 +1061,16 @@ static int __init dynamic_debug_init(void) goto out_err; ddebug_init_success = 1; - vpr_info("%d modules, %d entries and %d bytes in ddebug tables," - " %d bytes in (readonly) verbose section\n", - modct, entries, (int)( modct * sizeof(struct ddebug_table)), - verbose_bytes + (int)(__stop___verbose - __start___verbose)); + vpr_info("%d modules, %d entries and %d bytes in ddebug tables, %d bytes in (readonly) verbose section\n", + modct, entries, (int)(modct * sizeof(struct ddebug_table)), + verbose_bytes + (int)(__stop___verbose - __start___verbose)); /* apply ddebug_query boot param, dont unload tables on err */ if (ddebug_setup_string[0] != '\0') { - pr_warn("ddebug_query param name is deprecated," - " change it to dyndbg\n"); + pr_warn("ddebug_query param name is deprecated, change it to dyndbg\n"); ret = ddebug_exec_queries(ddebug_setup_string, NULL); if (ret < 0) - pr_warn("Invalid ddebug boot param %s", + pr_warn("Invalid ddebug boot param %s\n", ddebug_setup_string); else pr_info("%d changes by ddebug_query\n", ret); diff --git a/lib/hexdump.c b/lib/hexdump.c index 6540d657dca4..3f0494c9d57a 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -227,6 +227,7 @@ void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, } EXPORT_SYMBOL(print_hex_dump); +#if !defined(CONFIG_DYNAMIC_DEBUG) /** * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params * @prefix_str: string to prefix each line with; @@ -246,4 +247,5 @@ void print_hex_dump_bytes(const char *prefix_str, int prefix_type, buf, len, true); } EXPORT_SYMBOL(print_hex_dump_bytes); -#endif +#endif /* !defined(CONFIG_DYNAMIC_DEBUG) */ +#endif /* defined(CONFIG_PRINTK) */ diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 7aae0f2a5e0a..c3eb261a7df3 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -47,10 +47,10 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose); * Normal standalone locks, for the circular and irq-context * dependency tests: */ -static DEFINE_SPINLOCK(lock_A); -static DEFINE_SPINLOCK(lock_B); -static DEFINE_SPINLOCK(lock_C); -static DEFINE_SPINLOCK(lock_D); +static DEFINE_RAW_SPINLOCK(lock_A); +static DEFINE_RAW_SPINLOCK(lock_B); +static DEFINE_RAW_SPINLOCK(lock_C); +static DEFINE_RAW_SPINLOCK(lock_D); static DEFINE_RWLOCK(rwlock_A); static DEFINE_RWLOCK(rwlock_B); @@ -73,12 +73,12 @@ static DECLARE_RWSEM(rwsem_D); * but X* and Y* are different classes. We do this so that * we do not trigger a real lockup: */ -static DEFINE_SPINLOCK(lock_X1); -static DEFINE_SPINLOCK(lock_X2); -static DEFINE_SPINLOCK(lock_Y1); -static DEFINE_SPINLOCK(lock_Y2); -static DEFINE_SPINLOCK(lock_Z1); -static DEFINE_SPINLOCK(lock_Z2); +static DEFINE_RAW_SPINLOCK(lock_X1); +static DEFINE_RAW_SPINLOCK(lock_X2); +static DEFINE_RAW_SPINLOCK(lock_Y1); +static DEFINE_RAW_SPINLOCK(lock_Y2); +static DEFINE_RAW_SPINLOCK(lock_Z1); +static DEFINE_RAW_SPINLOCK(lock_Z2); static DEFINE_RWLOCK(rwlock_X1); static DEFINE_RWLOCK(rwlock_X2); @@ -107,10 +107,10 @@ static DECLARE_RWSEM(rwsem_Z2); */ #define INIT_CLASS_FUNC(class) \ static noinline void \ -init_class_##class(spinlock_t *lock, rwlock_t *rwlock, struct mutex *mutex, \ - struct rw_semaphore *rwsem) \ +init_class_##class(raw_spinlock_t *lock, rwlock_t *rwlock, \ + struct mutex *mutex, struct rw_semaphore *rwsem)\ { \ - spin_lock_init(lock); \ + raw_spin_lock_init(lock); \ rwlock_init(rwlock); \ mutex_init(mutex); \ init_rwsem(rwsem); \ @@ -168,10 +168,10 @@ static void init_shared_classes(void) * Shortcuts for lock/unlock API variants, to keep * the testcases compact: */ -#define L(x) spin_lock(&lock_##x) -#define U(x) spin_unlock(&lock_##x) +#define L(x) raw_spin_lock(&lock_##x) +#define U(x) raw_spin_unlock(&lock_##x) #define LU(x) L(x); U(x) -#define SI(x) spin_lock_init(&lock_##x) +#define SI(x) raw_spin_lock_init(&lock_##x) #define WL(x) write_lock(&rwlock_##x) #define WU(x) write_unlock(&rwlock_##x) @@ -911,7 +911,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) #define I2(x) \ do { \ - spin_lock_init(&lock_##x); \ + raw_spin_lock_init(&lock_##x); \ rwlock_init(&rwlock_##x); \ mutex_init(&mutex_##x); \ init_rwsem(&rwsem_##x); \ diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h index 77adcf6bc257..60cf765628e9 100644 --- a/lib/mpi/mpi-internal.h +++ b/lib/mpi/mpi-internal.h @@ -65,10 +65,6 @@ typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */ typedef int mpi_size_t; /* (must be a signed type) */ -#define ABS(x) (x >= 0 ? x : -x) -#define MIN(l, o) ((l) < (o) ? (l) : (o)) -#define MAX(h, i) ((h) > (i) ? (h) : (i)) - static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) { if (a->alloced < b) diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 3962b7f7fe3f..5f9c44cdf1f5 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -52,7 +52,7 @@ MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes) else nbits = 0; - nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; + nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); val = mpi_alloc(nlimbs); if (!val) return NULL; @@ -96,8 +96,8 @@ MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread) buffer += 2; nread = 2; - nbytes = (nbits + 7) / 8; - nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; + nbytes = DIV_ROUND_UP(nbits, 8); + nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); val = mpi_alloc(nlimbs); if (!val) return NULL; @@ -193,7 +193,7 @@ int mpi_set_buffer(MPI a, const void *xbuffer, unsigned nbytes, int sign) int nlimbs; int i; - nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; + nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); if (RESIZE_IF_NEEDED(a, nlimbs) < 0) return -ENOMEM; a->sign = sign; diff --git a/lib/parser.c b/lib/parser.c index 52cfa69f73df..807b2aaa33fa 100644 --- a/lib/parser.c +++ b/lib/parser.c @@ -157,7 +157,7 @@ static int match_number(substring_t *s, int *result, int base) * * Description: Attempts to parse the &substring_t @s as a decimal integer. On * success, sets @result to the integer represented by the string and returns 0. - * Returns either -ENOMEM or -EINVAL on failure. + * Returns -ENOMEM, -EINVAL, or -ERANGE on failure. */ int match_int(substring_t *s, int *result) { @@ -171,7 +171,7 @@ int match_int(substring_t *s, int *result) * * Description: Attempts to parse the &substring_t @s as an octal integer. On * success, sets @result to the integer represented by the string and returns - * 0. Returns either -ENOMEM or -EINVAL on failure. + * 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. */ int match_octal(substring_t *s, int *result) { @@ -185,7 +185,7 @@ int match_octal(substring_t *s, int *result) * * Description: Attempts to parse the &substring_t @s as a hexadecimal integer. * On success, sets @result to the integer represented by the string and - * returns 0. Returns either -ENOMEM or -EINVAL on failure. + * returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. */ int match_hex(substring_t *s, int *result) { diff --git a/lib/rbtree.c b/lib/rbtree.c index 4f56a11d67fa..c0e31fe2fabf 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -194,8 +194,12 @@ __rb_insert(struct rb_node *node, struct rb_root *root, } } -__always_inline void -__rb_erase_color(struct rb_node *parent, struct rb_root *root, +/* + * Inline version for rb_erase() use - we want to be able to inline + * and eliminate the dummy_rotate callback there + */ +static __always_inline void +____rb_erase_color(struct rb_node *parent, struct rb_root *root, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) { struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; @@ -355,6 +359,13 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root, } } } + +/* Non-inline version for rb_erase_augmented() use */ +void __rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) +{ + ____rb_erase_color(parent, root, augment_rotate); +} EXPORT_SYMBOL(__rb_erase_color); /* @@ -380,7 +391,10 @@ EXPORT_SYMBOL(rb_insert_color); void rb_erase(struct rb_node *node, struct rb_root *root) { - rb_erase_augmented(node, root, &dummy_callbacks); + struct rb_node *rebalance; + rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); + if (rebalance) + ____rb_erase_color(rebalance, root, dummy_rotate); } EXPORT_SYMBOL(rb_erase); diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 7e0d6a58fc83..7542afbb22b3 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -73,20 +73,13 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) goto dont_wake_writers; } - /* if we are allowed to wake writers try to grant a single write lock - * if there's a writer at the front of the queue - * - we leave the 'waiting count' incremented to signify potential - * contention + /* + * as we support write lock stealing, we can't set sem->activity + * to -1 here to indicate we get the lock. Instead, we wake it up + * to let it go get it again. */ if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { - sem->activity = -1; - list_del(&waiter->list); - tsk = waiter->task; - /* Don't touch waiter after ->task has been NULLed */ - smp_mb(); - waiter->task = NULL; - wake_up_process(tsk); - put_task_struct(tsk); + wake_up_process(waiter->task); goto out; } @@ -121,18 +114,10 @@ static inline struct rw_semaphore * __rwsem_wake_one_writer(struct rw_semaphore *sem) { struct rwsem_waiter *waiter; - struct task_struct *tsk; - - sem->activity = -1; waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); - list_del(&waiter->list); + wake_up_process(waiter->task); - tsk = waiter->task; - smp_mb(); - waiter->task = NULL; - wake_up_process(tsk); - put_task_struct(tsk); return sem; } @@ -204,7 +189,6 @@ int __down_read_trylock(struct rw_semaphore *sem) /* * get a write lock on the semaphore - * - we increment the waiting count anyway to indicate an exclusive lock */ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) { @@ -214,37 +198,32 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) raw_spin_lock_irqsave(&sem->wait_lock, flags); - if (sem->activity == 0 && list_empty(&sem->wait_list)) { - /* granted */ - sem->activity = -1; - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); - goto out; - } - - tsk = current; - set_task_state(tsk, TASK_UNINTERRUPTIBLE); - /* set up my own style of waitqueue */ + tsk = current; waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_WRITE; - get_task_struct(tsk); - list_add_tail(&waiter.list, &sem->wait_list); - /* we don't need to touch the semaphore struct anymore */ - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); - - /* wait to be given the lock */ + /* wait for someone to release the lock */ for (;;) { - if (!waiter.task) + /* + * That is the key to support write lock stealing: allows the + * task already on CPU to get the lock soon rather than put + * itself into sleep and waiting for system woke it or someone + * else in the head of the wait list up. + */ + if (sem->activity == 0) break; - schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); + schedule(); + raw_spin_lock_irqsave(&sem->wait_lock, flags); } + /* got the lock */ + sem->activity = -1; + list_del(&waiter.list); - tsk->state = TASK_RUNNING; - out: - ; + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); } void __sched __down_write(struct rw_semaphore *sem) @@ -262,8 +241,8 @@ int __down_write_trylock(struct rw_semaphore *sem) raw_spin_lock_irqsave(&sem->wait_lock, flags); - if (sem->activity == 0 && list_empty(&sem->wait_list)) { - /* granted */ + if (sem->activity == 0) { + /* got the lock */ sem->activity = -1; ret = 1; } diff --git a/lib/rwsem.c b/lib/rwsem.c index 8337e1b9bb8d..ad5e0df16ab4 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c @@ -2,6 +2,8 @@ * * Written by David Howells (dhowells@redhat.com). * Derived from arch/i386/kernel/semaphore.c + * + * Writer lock-stealing by Alex Shi <alex.shi@intel.com> */ #include <linux/rwsem.h> #include <linux/sched.h> @@ -60,7 +62,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) struct rwsem_waiter *waiter; struct task_struct *tsk; struct list_head *next; - signed long oldcount, woken, loop, adjustment; + signed long woken, loop, adjustment; waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) @@ -72,30 +74,8 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) */ goto out; - /* There's a writer at the front of the queue - try to grant it the - * write lock. However, we only wake this writer if we can transition - * the active part of the count from 0 -> 1 - */ - adjustment = RWSEM_ACTIVE_WRITE_BIAS; - if (waiter->list.next == &sem->wait_list) - adjustment -= RWSEM_WAITING_BIAS; - - try_again_write: - oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; - if (oldcount & RWSEM_ACTIVE_MASK) - /* Someone grabbed the sem already */ - goto undo_write; - - /* We must be careful not to touch 'waiter' after we set ->task = NULL. - * It is an allocated on the waiter's stack and may become invalid at - * any time after that point (due to a wakeup from another source). - */ - list_del(&waiter->list); - tsk = waiter->task; - smp_mb(); - waiter->task = NULL; - wake_up_process(tsk); - put_task_struct(tsk); + /* Wake up the writing waiter and let the task grab the sem: */ + wake_up_process(waiter->task); goto out; readers_only: @@ -157,12 +137,40 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) out: return sem; +} + +/* Try to get write sem, caller holds sem->wait_lock: */ +static int try_get_writer_sem(struct rw_semaphore *sem, + struct rwsem_waiter *waiter) +{ + struct rwsem_waiter *fwaiter; + long oldcount, adjustment; - /* undo the change to the active count, but check for a transition - * 1->0 */ - undo_write: + /* only steal when first waiter is writing */ + fwaiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); + if (!(fwaiter->flags & RWSEM_WAITING_FOR_WRITE)) + return 0; + + adjustment = RWSEM_ACTIVE_WRITE_BIAS; + /* Only one waiter in the queue: */ + if (fwaiter == waiter && waiter->list.next == &sem->wait_list) + adjustment -= RWSEM_WAITING_BIAS; + +try_again_write: + oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; + if (!(oldcount & RWSEM_ACTIVE_MASK)) { + /* No active lock: */ + struct task_struct *tsk = waiter->task; + + list_del(&waiter->list); + smp_mb(); + put_task_struct(tsk); + tsk->state = TASK_RUNNING; + return 1; + } + /* some one grabbed the sem already */ if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK) - goto out; + return 0; goto try_again_write; } @@ -210,6 +218,15 @@ rwsem_down_failed_common(struct rw_semaphore *sem, for (;;) { if (!waiter.task) break; + + raw_spin_lock_irq(&sem->wait_lock); + /* Try to get the writer sem, may steal from the head writer: */ + if (flags == RWSEM_WAITING_FOR_WRITE) + if (try_get_writer_sem(sem, &waiter)) { + raw_spin_unlock_irq(&sem->wait_lock); + return sem; + } + raw_spin_unlock_irq(&sem->wait_lock); schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); } diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 196b06984dec..bfe02b8fc55b 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -122,11 +122,18 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, return phys_to_dma(hwdev, virt_to_phys(address)); } +static bool no_iotlb_memory; + void swiotlb_print_info(void) { unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; unsigned char *vstart, *vend; + if (no_iotlb_memory) { + pr_warn("software IO TLB: No low mem\n"); + return; + } + vstart = phys_to_virt(io_tlb_start); vend = phys_to_virt(io_tlb_end); @@ -136,7 +143,7 @@ void swiotlb_print_info(void) bytes >> 20, vstart, vend - 1); } -void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) +int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { void *v_overflow_buffer; unsigned long i, bytes; @@ -150,9 +157,10 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) /* * Get the overflow emergency buffer */ - v_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); + v_overflow_buffer = alloc_bootmem_low_pages_nopanic( + PAGE_ALIGN(io_tlb_overflow)); if (!v_overflow_buffer) - panic("Cannot allocate SWIOTLB overflow buffer!\n"); + return -ENOMEM; io_tlb_overflow_buffer = __pa(v_overflow_buffer); @@ -169,15 +177,19 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) if (verbose) swiotlb_print_info(); + + return 0; } /* * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. */ -static void __init -swiotlb_init_with_default_size(size_t default_size, int verbose) +void __init +swiotlb_init(int verbose) { + /* default to 64MB */ + size_t default_size = 64UL<<20; unsigned char *vstart; unsigned long bytes; @@ -188,20 +200,16 @@ swiotlb_init_with_default_size(size_t default_size, int verbose) bytes = io_tlb_nslabs << IO_TLB_SHIFT; - /* - * Get IO TLB memory from the low pages - */ - vstart = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); - if (!vstart) - panic("Cannot allocate SWIOTLB buffer"); - - swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose); -} + /* Get IO TLB memory from the low pages */ + vstart = alloc_bootmem_low_pages_nopanic(PAGE_ALIGN(bytes)); + if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) + return; -void __init -swiotlb_init(int verbose) -{ - swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */ + if (io_tlb_start) + free_bootmem(io_tlb_start, + PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); + pr_warn("Cannot allocate SWIOTLB buffer"); + no_iotlb_memory = true; } /* @@ -405,6 +413,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, unsigned long offset_slots; unsigned long max_slots; + if (no_iotlb_memory) + panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); + mask = dma_get_seg_boundary(hwdev); tbl_dma_addr &= mask; diff --git a/lib/vsprintf.c b/lib/vsprintf.c index fab33a9c5318..0d62fd700f68 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1030,6 +1030,7 @@ int kptr_restrict __read_mostly; * N no separator * The maximum supported length is 64 bytes of the input. Consider * to use print_hex_dump() for the larger input. + * - 'a' For a phys_addr_t type and its derivative types (passed by reference) * * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 * function pointers are really function descriptors, which contain a @@ -1120,6 +1121,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, return netdev_feature_string(buf, end, ptr, spec); } break; + case 'a': + spec.flags |= SPECIAL | SMALL | ZEROPAD; + spec.field_width = sizeof(phys_addr_t) * 2 + 2; + spec.base = 16; + return number(buf, end, + (unsigned long long) *((phys_addr_t *)ptr), spec); } spec.flags |= SMALL; if (spec.field_width == -1) { diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig index 60a6088d0e5e..82a04d7ba99e 100644 --- a/lib/xz/Kconfig +++ b/lib/xz/Kconfig @@ -6,42 +6,40 @@ config XZ_DEC the .xz file format as the container. For integrity checking, CRC32 is supported. See Documentation/xz.txt for more information. +if XZ_DEC + config XZ_DEC_X86 - bool "x86 BCJ filter decoder" if EXPERT - default y - depends on XZ_DEC + bool "x86 BCJ filter decoder" + default y if X86 select XZ_DEC_BCJ config XZ_DEC_POWERPC - bool "PowerPC BCJ filter decoder" if EXPERT - default y - depends on XZ_DEC + bool "PowerPC BCJ filter decoder" + default y if POWERPC select XZ_DEC_BCJ config XZ_DEC_IA64 - bool "IA-64 BCJ filter decoder" if EXPERT - default y - depends on XZ_DEC + bool "IA-64 BCJ filter decoder" + default y if IA64 select XZ_DEC_BCJ config XZ_DEC_ARM - bool "ARM BCJ filter decoder" if EXPERT - default y - depends on XZ_DEC + bool "ARM BCJ filter decoder" + default y if ARM select XZ_DEC_BCJ config XZ_DEC_ARMTHUMB - bool "ARM-Thumb BCJ filter decoder" if EXPERT - default y - depends on XZ_DEC + bool "ARM-Thumb BCJ filter decoder" + default y if (ARM && ARM_THUMB) select XZ_DEC_BCJ config XZ_DEC_SPARC - bool "SPARC BCJ filter decoder" if EXPERT - default y - depends on XZ_DEC + bool "SPARC BCJ filter decoder" + default y if SPARC select XZ_DEC_BCJ +endif + config XZ_DEC_BCJ bool default n |