summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorStephen Bates <sbates@raithlin.com>2017-11-17 15:28:16 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-12-14 09:28:22 +0100
commita77c116075936faa6e8b8622e22acc9cbf47fe9e (patch)
tree2176676067f00613a42e6fab27241f1b6ea6506a /lib
parente9672477a5f059e88e3c3b886c0a61253447254a (diff)
downloadlinux-stable-a77c116075936faa6e8b8622e22acc9cbf47fe9e.tar.gz
linux-stable-a77c116075936faa6e8b8622e22acc9cbf47fe9e.tar.bz2
linux-stable-a77c116075936faa6e8b8622e22acc9cbf47fe9e.zip
lib/genalloc.c: make the avail variable an atomic_long_t
[ Upstream commit 36a3d1dd4e16bcd0d2ddfb4a2ec7092f0ae0d931 ] If the amount of resources allocated to a gen_pool exceeds 2^32 then the avail atomic overflows and this causes problems when clients try and borrow resources from the pool. This is only expected to be an issue on 64 bit systems. Add the <linux/atomic.h> header to pull in atomic_long* operations. So that 32 bit systems continue to use atomic32_t but 64 bit systems can use atomic64_t. Link: http://lkml.kernel.org/r/1509033843-25667-1-git-send-email-sbates@raithlin.com Signed-off-by: Stephen Bates <sbates@raithlin.com> Reviewed-by: Logan Gunthorpe <logang@deltatee.com> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Reviewed-by: Daniel Mentz <danielmentz@google.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Sasha Levin <alexander.levin@verizon.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/genalloc.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 144fe6b1a03e..ca06adc4f445 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -194,7 +194,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
chunk->phys_addr = phys;
chunk->start_addr = virt;
chunk->end_addr = virt + size - 1;
- atomic_set(&chunk->avail, size);
+ atomic_long_set(&chunk->avail, size);
spin_lock(&pool->lock);
list_add_rcu(&chunk->next_chunk, &pool->chunks);
@@ -304,7 +304,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
nbits = (size + (1UL << order) - 1) >> order;
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
- if (size > atomic_read(&chunk->avail))
+ if (size > atomic_long_read(&chunk->avail))
continue;
start_bit = 0;
@@ -324,7 +324,7 @@ retry:
addr = chunk->start_addr + ((unsigned long)start_bit << order);
size = nbits << order;
- atomic_sub(size, &chunk->avail);
+ atomic_long_sub(size, &chunk->avail);
break;
}
rcu_read_unlock();
@@ -390,7 +390,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
BUG_ON(remain);
size = nbits << order;
- atomic_add(size, &chunk->avail);
+ atomic_long_add(size, &chunk->avail);
rcu_read_unlock();
return;
}
@@ -464,7 +464,7 @@ size_t gen_pool_avail(struct gen_pool *pool)
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
- avail += atomic_read(&chunk->avail);
+ avail += atomic_long_read(&chunk->avail);
rcu_read_unlock();
return avail;
}