summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2021-01-11 11:05:52 +0800
committerJens Axboe <axboe@kernel.dk>2021-01-24 21:22:45 -0700
commit49d1ec8573f74ff1e23df1d5092211de46baa236 (patch)
treef6f25b2b40cb3845afdd529482cc974a7cdd7978
parent1a23e06cdab2be07cbda460c6417d7de564c48e6 (diff)
downloadlinux-stable-49d1ec8573f74ff1e23df1d5092211de46baa236.tar.gz
linux-stable-49d1ec8573f74ff1e23df1d5092211de46baa236.tar.bz2
linux-stable-49d1ec8573f74ff1e23df1d5092211de46baa236.zip
block: manage bio slab cache by xarray
Managing bio slab cache via xarray by using slab cache size as xarray index, and storing 'struct bio_slab' instance into xarray. So code is simplified a lot, meantime it becomes more readable than before. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Tested-by: Pavel Begunkov <asml.silence@gmail.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/bio.c116
1 files changed, 49 insertions, 67 deletions
diff --git a/block/bio.c b/block/bio.c
index 0b70ade17da6..87bf16460e0e 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -19,6 +19,7 @@
#include <linux/highmem.h>
#include <linux/sched/sysctl.h>
#include <linux/blk-crypto.h>
+#include <linux/xarray.h>
#include <trace/events/block.h>
#include "blk.h"
@@ -58,89 +59,80 @@ struct bio_slab {
char name[8];
};
static DEFINE_MUTEX(bio_slab_lock);
-static struct bio_slab *bio_slabs;
-static unsigned int bio_slab_nr, bio_slab_max;
+static DEFINE_XARRAY(bio_slabs);
-static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
+static struct bio_slab *create_bio_slab(unsigned int size)
{
- unsigned int sz = sizeof(struct bio) + extra_size;
- struct kmem_cache *slab = NULL;
- struct bio_slab *bslab, *new_bio_slabs;
- unsigned int new_bio_slab_max;
- unsigned int i, entry = -1;
+ struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
- mutex_lock(&bio_slab_lock);
+ if (!bslab)
+ return NULL;
- i = 0;
- while (i < bio_slab_nr) {
- bslab = &bio_slabs[i];
+ snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
+ bslab->slab = kmem_cache_create(bslab->name, size,
+ ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL);
+ if (!bslab->slab)
+ goto fail_alloc_slab;
- if (!bslab->slab && entry == -1)
- entry = i;
- else if (bslab->slab_size == sz) {
- slab = bslab->slab;
- bslab->slab_ref++;
- break;
- }
- i++;
- }
+ bslab->slab_ref = 1;
+ bslab->slab_size = size;
- if (slab)
- goto out_unlock;
-
- if (bio_slab_nr == bio_slab_max && entry == -1) {
- new_bio_slab_max = bio_slab_max << 1;
- new_bio_slabs = krealloc(bio_slabs,
- new_bio_slab_max * sizeof(struct bio_slab),
- GFP_KERNEL);
- if (!new_bio_slabs)
- goto out_unlock;
- bio_slab_max = new_bio_slab_max;
- bio_slabs = new_bio_slabs;
- }
- if (entry == -1)
- entry = bio_slab_nr++;
+ if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
+ return bslab;
- bslab = &bio_slabs[entry];
+ kmem_cache_destroy(bslab->slab);
- snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
- slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!slab)
- goto out_unlock;
+fail_alloc_slab:
+ kfree(bslab);
+ return NULL;
+}
- bslab->slab = slab;
- bslab->slab_ref = 1;
- bslab->slab_size = sz;
-out_unlock:
+static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
+{
+ return bs->front_pad + sizeof(struct bio) +
+ BIO_INLINE_VECS * sizeof(struct bio_vec);
+}
+
+static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
+{
+ unsigned int size = bs_bio_slab_size(bs);
+ struct bio_slab *bslab;
+
+ mutex_lock(&bio_slab_lock);
+ bslab = xa_load(&bio_slabs, size);
+ if (bslab)
+ bslab->slab_ref++;
+ else
+ bslab = create_bio_slab(size);
mutex_unlock(&bio_slab_lock);
- return slab;
+
+ if (bslab)
+ return bslab->slab;
+ return NULL;
}
static void bio_put_slab(struct bio_set *bs)
{
struct bio_slab *bslab = NULL;
- unsigned int i;
+ unsigned int slab_size = bs_bio_slab_size(bs);
mutex_lock(&bio_slab_lock);
- for (i = 0; i < bio_slab_nr; i++) {
- if (bs->bio_slab == bio_slabs[i].slab) {
- bslab = &bio_slabs[i];
- break;
- }
- }
-
+ bslab = xa_load(&bio_slabs, slab_size);
if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
goto out;
+ WARN_ON_ONCE(bslab->slab != bs->bio_slab);
+
WARN_ON(!bslab->slab_ref);
if (--bslab->slab_ref)
goto out;
+ xa_erase(&bio_slabs, slab_size);
+
kmem_cache_destroy(bslab->slab);
- bslab->slab = NULL;
+ kfree(bslab);
out:
mutex_unlock(&bio_slab_lock);
@@ -1570,15 +1562,13 @@ int bioset_init(struct bio_set *bs,
unsigned int front_pad,
int flags)
{
- unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
-
bs->front_pad = front_pad;
spin_lock_init(&bs->rescue_lock);
bio_list_init(&bs->rescue_list);
INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
- bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
+ bs->bio_slab = bio_find_or_create_slab(bs);
if (!bs->bio_slab)
return -ENOMEM;
@@ -1642,16 +1632,8 @@ static void __init biovec_init_slabs(void)
static int __init init_bio(void)
{
- bio_slab_max = 2;
- bio_slab_nr = 0;
- bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
- GFP_KERNEL);
-
BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
- if (!bio_slabs)
- panic("bio: can't allocate bios\n");
-
bio_integrity_init();
biovec_init_slabs();