summaryrefslogtreecommitdiffstats
path: root/lib/bitmap.c
diff options
context:
space:
mode:
authorYury Norov <ynorov@caviumnetworks.com>2017-09-08 16:15:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-08 18:26:49 -0700
commit0a5ce0831d04382aa9e2420e33dff958ddade542 (patch)
tree4ee2c49cb260e1b514578ce1bc90a917d3595792 /lib/bitmap.c
parente4dace3615526fd66c86dd535ee4bc9e8c706e37 (diff)
downloadlinux-0a5ce0831d04382aa9e2420e33dff958ddade542.tar.gz
linux-0a5ce0831d04382aa9e2420e33dff958ddade542.tar.bz2
linux-0a5ce0831d04382aa9e2420e33dff958ddade542.zip
lib/bitmap.c: make bitmap_parselist() thread-safe and much faster
Current implementation of bitmap_parselist() uses a static variable to save local state while setting bits in the bitmap. It is obviously wrong if we assume execution in multiprocessor environment. Fortunately, it's possible to rewrite this portion of code to avoid using the static variable. It is also possible to set bits in the mask per-range with bitmap_set(), not per-bit, as it is implemented now, with set_bit(); which is way faster. The important side effect of this change is that setting bits in this function from now is not per-bit atomic and less memory-ordered. This is because set_bit() guarantees the order of memory accesses, while bitmap_set() does not. I think that it is the advantage of the new approach, because the bitmap_parselist() is intended to initialise bit arrays, and user should protect the whole bitmap during initialisation if needed. So protecting individual bits looks expensive and useless. Also, other range-oriented functions in lib/bitmap.c don't worry much about atomicity. With all that, setting 2k bits in map with the pattern like 0-2047:128/256 becomes ~50 times faster after applying the patch in my testing environment (arm64 hosted on qemu). The second patch of the series adds the test for bitmap_parselist(). It's not intended to cover all tricky cases, just to make sure that I didn't screw up during rework. Link: http://lkml.kernel.org/r/20170807225438.16161-1-ynorov@caviumnetworks.com Signed-off-by: Yury Norov <ynorov@caviumnetworks.com> Cc: Noam Camus <noamca@mellanox.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Mauro Carvalho Chehab <mchehab@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/bitmap.c')
-rw-r--r--lib/bitmap.c18
1 files changed, 6 insertions, 12 deletions
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 9a532805364b..c82c61b66e16 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -513,7 +513,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
int nmaskbits)
{
unsigned int a, b, old_a, old_b;
- unsigned int group_size, used_size;
+ unsigned int group_size, used_size, off;
int c, old_c, totaldigits, ndigits;
const char __user __force *ubuf = (const char __user __force *)buf;
int at_start, in_range, in_partial_range;
@@ -599,6 +599,8 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
a = old_a;
b = old_b;
old_a = old_b = 0;
+ } else {
+ used_size = group_size = b - a + 1;
}
/* if no digit is after '-', it's wrong*/
if (at_start && in_range)
@@ -608,17 +610,9 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
if (b >= nmaskbits)
return -ERANGE;
while (a <= b) {
- if (in_partial_range) {
- static int pos_in_group = 1;
-
- if (pos_in_group <= used_size)
- set_bit(a, maskp);
-
- if (a == b || ++pos_in_group > group_size)
- pos_in_group = 1;
- } else
- set_bit(a, maskp);
- a++;
+ off = min(b - a + 1, used_size);
+ bitmap_set(maskp, a, off);
+ a += group_size;
}
} while (buflen && c == ',');
return 0;