summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-06-08 10:19:40 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-06-08 10:19:40 -0700
commit987aec39a74373c55c3bedd5c3c83896d78fd0a2 (patch)
tree66aba6d4f166f269be95fe4d267d57efe533aefd /lib
parent303cda0ea7c1c33701812ccb80d37083a4093c7c (diff)
parentd4a4f75cd8f29cd9464a5a32e9224a91571d6649 (diff)
downloadlinux-987aec39a74373c55c3bedd5c3c83896d78fd0a2.tar.gz
linux-987aec39a74373c55c3bedd5c3c83896d78fd0a2.tar.bz2
linux-987aec39a74373c55c3bedd5c3c83896d78fd0a2.zip
Merge 4.1-rc7 into driver-core-next
We want the fixes in this branch as well for testing and merge resolution. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/cpumask.c74
-rw-r--r--lib/percpu_counter.c6
-rw-r--r--lib/rhashtable.c11
-rw-r--r--lib/strnlen_user.c12
-rw-r--r--lib/swiotlb.c5
5 files changed, 53 insertions, 55 deletions
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 830dd5dec40f..5f627084f2e9 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -139,64 +139,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
#endif
/**
- * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first
- *
+ * cpumask_local_spread - select the i'th cpu with local numa cpu's first
* @i: index number
- * @numa_node: local numa_node
- * @dstp: cpumask with the relevant cpu bit set according to the policy
+ * @node: local numa_node
*
- * This function sets the cpumask according to a numa aware policy.
- * cpumask could be used as an affinity hint for the IRQ related to a
- * queue. When the policy is to spread queues across cores - local cores
- * first.
+ * This function selects an online CPU according to a numa aware policy;
+ * local cpus are returned first, followed by non-local ones, then it
+ * wraps around.
*
- * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set
- * the cpu bit and need to re-call the function.
+ * It's not very efficient, but useful for setup.
*/
-int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
+unsigned int cpumask_local_spread(unsigned int i, int node)
{
- cpumask_var_t mask;
int cpu;
- int ret = 0;
-
- if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
- return -ENOMEM;
+ /* Wrap: we always want a cpu. */
i %= num_online_cpus();
- if (numa_node == -1 || !cpumask_of_node(numa_node)) {
- /* Use all online cpu's for non numa aware system */
- cpumask_copy(mask, cpu_online_mask);
+ if (node == -1) {
+ for_each_cpu(cpu, cpu_online_mask)
+ if (i-- == 0)
+ return cpu;
} else {
- int n;
-
- cpumask_and(mask,
- cpumask_of_node(numa_node), cpu_online_mask);
-
- n = cpumask_weight(mask);
- if (i >= n) {
- i -= n;
-
- /* If index > number of local cpu's, mask out local
- * cpu's
- */
- cpumask_andnot(mask, cpu_online_mask, mask);
+ /* NUMA first. */
+ for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
+ if (i-- == 0)
+ return cpu;
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ /* Skip NUMA nodes, done above. */
+ if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
+ continue;
+
+ if (i-- == 0)
+ return cpu;
}
}
-
- for_each_cpu(cpu, mask) {
- if (--i < 0)
- goto out;
- }
-
- ret = -EAGAIN;
-
-out:
- free_cpumask_var(mask);
-
- if (!ret)
- cpumask_set_cpu(cpu, dstp);
-
- return ret;
+ BUG();
}
-EXPORT_SYMBOL(cpumask_set_cpu_local_first);
+EXPORT_SYMBOL(cpumask_local_spread);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 48144cdae819..f051d69f0910 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -197,13 +197,13 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
* Compare counter against given value.
* Return 1 if greater, 0 if equal and -1 if less
*/
-int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
{
s64 count;
count = percpu_counter_read(fbc);
/* Check to see if rough count will be sufficient for comparison */
- if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
+ if (abs(count - rhs) > (batch * num_online_cpus())) {
if (count > rhs)
return 1;
else
@@ -218,7 +218,7 @@ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
else
return 0;
}
-EXPORT_SYMBOL(percpu_counter_compare);
+EXPORT_SYMBOL(__percpu_counter_compare);
static int __init percpu_counter_startup(void)
{
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index b28df4019ade..4396434e4715 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -14,6 +14,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
@@ -446,6 +447,10 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
if (key && rhashtable_lookup_fast(ht, key, ht->p))
goto exit;
+ err = -E2BIG;
+ if (unlikely(rht_grow_above_max(ht, tbl)))
+ goto exit;
+
err = -EAGAIN;
if (rhashtable_check_elasticity(ht, tbl, hash) ||
rht_grow_above_100(ht, tbl))
@@ -738,6 +743,12 @@ int rhashtable_init(struct rhashtable *ht,
if (params->max_size)
ht->p.max_size = rounddown_pow_of_two(params->max_size);
+ if (params->insecure_max_entries)
+ ht->p.insecure_max_entries =
+ rounddown_pow_of_two(params->insecure_max_entries);
+ else
+ ht->p.insecure_max_entries = ht->p.max_size * 2;
+
ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
/* The maximum (not average) chain length grows with the
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index a28df5206d95..fe9a32591c24 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
return res + find_zero(data) + 1 - align;
}
res += sizeof(unsigned long);
- if (unlikely(max < sizeof(unsigned long)))
+ /* We already handled 'unsigned long' bytes. Did we do it all ? */
+ if (unlikely(max <= sizeof(unsigned long)))
break;
max -= sizeof(unsigned long);
if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
@@ -89,8 +90,15 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
* Get the size of a NUL-terminated string in user space.
*
* Returns the size of the string INCLUDING the terminating NUL.
- * If the string is too long, returns 'count+1'.
+ * If the string is too long, returns a number larger than @count. User
+ * has to check the return value against "> count".
* On exception (or invalid count), returns 0.
+ *
+ * NOTE! You should basically never use this function. There is
+ * almost never any valid case for using the length of a user space
+ * string, since the string can be changed at any time by other
+ * threads. Use "strncpy_from_user()" instead to get a stable copy
+ * of the string.
*/
long strnlen_user(const char __user *str, long count)
{
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 4abda074ea45..3c365ab6cf5f 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -537,8 +537,9 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
* Allocates bounce buffer and returns its kernel virtual address.
*/
-phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size,
- enum dma_data_direction dir)
+static phys_addr_t
+map_single(struct device *hwdev, phys_addr_t phys, size_t size,
+ enum dma_data_direction dir)
{
dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);