summaryrefslogtreecommitdiffstats
path: root/include/linux/io_uring_types.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/io_uring_types.h')
-rw-r--r--include/linux/io_uring_types.h24
1 files changed, 15 insertions, 9 deletions
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 00689c12f6ab..1b2a20a42413 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -188,8 +188,10 @@ struct io_ev_fd {
};
struct io_alloc_cache {
- struct hlist_head list;
+ struct io_wq_work_node list;
unsigned int nr_cached;
+ unsigned int max_cached;
+ size_t elem_size;
};
struct io_ring_ctx {
@@ -239,7 +241,6 @@ struct io_ring_ctx {
* uring_lock, and updated through io_uring_register(2)
*/
struct io_rsrc_node *rsrc_node;
- int rsrc_cached_refs;
atomic_t cancel_seq;
struct io_file_table file_table;
unsigned nr_user_files;
@@ -295,7 +296,7 @@ struct io_ring_ctx {
spinlock_t completion_lock;
bool poll_multi_queue;
- bool cq_waiting;
+ atomic_t cq_wait_nr;
/*
* ->iopoll_list is protected by the ctx->uring_lock for
@@ -325,16 +326,15 @@ struct io_ring_ctx {
struct io_restriction restrictions;
/* slow path rsrc auxilary data, used by update/register */
- struct io_rsrc_node *rsrc_backup_node;
struct io_mapped_ubuf *dummy_ubuf;
struct io_rsrc_data *file_data;
struct io_rsrc_data *buf_data;
- struct delayed_work rsrc_put_work;
- struct callback_head rsrc_put_tw;
- struct llist_head rsrc_put_llist;
+ /* protected by ->uring_lock */
struct list_head rsrc_ref_list;
- spinlock_t rsrc_ref_lock;
+ struct io_alloc_cache rsrc_node_cache;
+ struct wait_queue_head rsrc_quiesce_wq;
+ unsigned rsrc_quiesce;
struct list_head io_buffers_pages;
@@ -366,6 +366,11 @@ struct io_ring_ctx {
unsigned evfd_last_cq_tail;
};
+struct io_tw_state {
+ /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */
+ bool locked;
+};
+
enum {
REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
@@ -472,7 +477,7 @@ enum {
REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT),
};
-typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
+typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
struct io_task_work {
struct llist_node node;
@@ -562,6 +567,7 @@ struct io_kiocb {
atomic_t refs;
atomic_t poll_refs;
struct io_task_work io_task_work;
+ unsigned nr_tw;
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
union {
struct hlist_node hash_node;