summaryrefslogtreecommitdiffstats
path: root/io_uring/io_uring.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2023-10-02 19:51:38 -0600
committerJens Axboe <axboe@kernel.dk>2023-10-03 08:12:54 -0600
commit1658633c04653578429ff5dfc62fdc159203a8f2 (patch)
treecaab58969a6d936ba39f46c0432752c25ed59c33 /io_uring/io_uring.h
parentf8024f1f36a30a082b0457d5779c8847cea57f57 (diff)
downloadlinux-1658633c04653578429ff5dfc62fdc159203a8f2.tar.gz
linux-1658633c04653578429ff5dfc62fdc159203a8f2.tar.bz2
linux-1658633c04653578429ff5dfc62fdc159203a8f2.zip
io_uring: ensure io_lockdep_assert_cq_locked() handles disabled rings
io_lockdep_assert_cq_locked() checks that locking is correctly done when a CQE is posted. If the ring is setup in a disabled state with IORING_SETUP_R_DISABLED, then ctx->submitter_task isn't assigned until the ring is later enabled. We generally don't post CQEs in this state, as no SQEs can be submitted. However it is possible to generate a CQE if tagged resources are being updated. If this happens and PROVE_LOCKING is enabled, then the locking check helper will dereference ctx->submitter_task, which hasn't been set yet. Fixup io_lockdep_assert_cq_locked() to handle this case correctly. While at it, convert it to a static inline as well, so that generated line offsets will actually reflect which condition failed, rather than just the line offset for io_lockdep_assert_cq_locked() itself. Reported-and-tested-by: syzbot+efc45d4e7ba6ab4ef1eb@syzkaller.appspotmail.com Fixes: f26cc9593581 ("io_uring: lockdep annotate CQ locking") Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.h')
-rw-r--r--io_uring/io_uring.h41
1 files changed, 27 insertions, 14 deletions
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 547c30582fb8..0bc145614a6e 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -86,20 +86,33 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
bool cancel_all);
-#define io_lockdep_assert_cq_locked(ctx) \
- do { \
- lockdep_assert(in_task()); \
- \
- if (ctx->flags & IORING_SETUP_IOPOLL) { \
- lockdep_assert_held(&ctx->uring_lock); \
- } else if (!ctx->task_complete) { \
- lockdep_assert_held(&ctx->completion_lock); \
- } else if (ctx->submitter_task->flags & PF_EXITING) { \
- lockdep_assert(current_work()); \
- } else { \
- lockdep_assert(current == ctx->submitter_task); \
- } \
- } while (0)
+#if defined(CONFIG_PROVE_LOCKING)
+static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
+{
+ lockdep_assert(in_task());
+
+ if (ctx->flags & IORING_SETUP_IOPOLL) {
+ lockdep_assert_held(&ctx->uring_lock);
+ } else if (!ctx->task_complete) {
+ lockdep_assert_held(&ctx->completion_lock);
+ } else if (ctx->submitter_task) {
+ /*
+ * ->submitter_task may be NULL and we can still post a CQE,
+ * if the ring has been setup with IORING_SETUP_R_DISABLED.
+ * Not from an SQE, as those cannot be submitted, but via
+ * updating tagged resources.
+ */
+ if (ctx->submitter_task->flags & PF_EXITING)
+ lockdep_assert(current_work());
+ else
+ lockdep_assert(current == ctx->submitter_task);
+ }
+}
+#else
+static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
+{
+}
+#endif
static inline void io_req_task_work_add(struct io_kiocb *req)
{