summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-02-19 12:33:30 -0700
committerJens Axboe <axboe@kernel.dk>2021-02-25 09:23:47 -0700
commite941894eae31b52f0fd9bdb3ce20620afa152f45 (patch)
treed8705880ec0fa73973f5c243b91986ca23e6ad09 /fs/io_uring.c
parenteb2de9418d56b5e6ebf27bad51dbce3e22ee109b (diff)
downloadlinux-stable-e941894eae31b52f0fd9bdb3ce20620afa152f45.tar.gz
linux-stable-e941894eae31b52f0fd9bdb3ce20620afa152f45.tar.bz2
linux-stable-e941894eae31b52f0fd9bdb3ce20620afa152f45.zip
io-wq: make buffered file write hashed work map per-ctx
Before the io-wq thread change, we maintained a hash work map and lock per-node per-ring. That wasn't ideal, as we really wanted it to be per ring. But now that we have per-task workers, the hash map ends up being just per-task. That'll work just fine for the normal case of having one task use a ring, but if you share the ring between tasks, then it's considerably worse than it was before. Make the hash map per ctx instead, which provides full per-ctx buffered write serialization on hashed writes. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c19
1 files changed, 19 insertions, 0 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0a435a6f265a..fbc85afa9a87 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -360,6 +360,9 @@ struct io_ring_ctx {
unsigned cached_cq_overflow;
unsigned long sq_check_overflow;
+ /* hashed buffered write serialization */
+ struct io_wq_hash *hash_map;
+
struct list_head defer_list;
struct list_head timeout_list;
struct list_head cq_overflow_list;
@@ -454,6 +457,8 @@ struct io_ring_ctx {
/* exit task_work */
struct callback_head *exit_task_work;
+ struct wait_queue_head hash_wait;
+
/* Keep this last, we don't need it for the fast path */
struct work_struct exit_work;
};
@@ -7763,9 +7768,21 @@ static struct io_wq_work *io_free_work(struct io_wq_work *work)
static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx)
{
+ struct io_wq_hash *hash;
struct io_wq_data data;
unsigned int concurrency;
+ hash = ctx->hash_map;
+ if (!hash) {
+ hash = kzalloc(sizeof(*hash), GFP_KERNEL);
+ if (!hash)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&hash->refs, 1);
+ init_waitqueue_head(&hash->wait);
+ ctx->hash_map = hash;
+ }
+
+ data.hash = hash;
data.free_work = io_free_work;
data.do_work = io_wq_submit_work;
@@ -8405,6 +8422,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
percpu_ref_exit(&ctx->refs);
free_uid(ctx->user);
io_req_caches_free(ctx, NULL);
+ if (ctx->hash_map)
+ io_wq_put_hash(ctx->hash_map);
kfree(ctx->cancel_hash);
kfree(ctx);
}