summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2021-12-09 07:31:25 +0100
committerJens Axboe <axboe@kernel.dk>2021-12-16 10:59:01 -0700
commitedf70ff5a1ed9769da35178454d743828061a6a3 (patch)
treeb018260edea1d3f960e30ca4129de6a7ccb94bce /block
parent8a20c0c7e0cea7eb0c32fd6b63ff514c9ac32b8f (diff)
downloadlinux-stable-edf70ff5a1ed9769da35178454d743828061a6a3.tar.gz
linux-stable-edf70ff5a1ed9769da35178454d743828061a6a3.tar.bz2
linux-stable-edf70ff5a1ed9769da35178454d743828061a6a3.zip
block: refactor put_io_context
Move the code to delay freeing the icqs into a separate helper. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20211209063131.18537-6-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-ioc.c38
1 files changed, 19 insertions, 19 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 04f3d2b0ca7d..ca996214c10a 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -143,6 +143,24 @@ static void ioc_release_fn(struct work_struct *work)
kmem_cache_free(iocontext_cachep, ioc);
}
+/*
+ * Releasing icqs requires reverse order double locking and we may already be
+ * holding a queue_lock. Do it asynchronously from a workqueue.
+ */
+static bool ioc_delay_free(struct io_context *ioc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->lock, flags);
+ if (!hlist_empty(&ioc->icq_list)) {
+ queue_work(system_power_efficient_wq, &ioc->release_work);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+ return true;
+ }
+ spin_unlock_irqrestore(&ioc->lock, flags);
+ return false;
+}
+
/**
* put_io_context - put a reference of io_context
* @ioc: io_context to put
@@ -152,26 +170,8 @@ static void ioc_release_fn(struct work_struct *work)
*/
void put_io_context(struct io_context *ioc)
{
- unsigned long flags;
- bool free_ioc = false;
-
BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
-
- /*
- * Releasing ioc requires reverse order double locking and we may
- * already be holding a queue_lock. Do it asynchronously from wq.
- */
- if (atomic_long_dec_and_test(&ioc->refcount)) {
- spin_lock_irqsave(&ioc->lock, flags);
- if (!hlist_empty(&ioc->icq_list))
- queue_work(system_power_efficient_wq,
- &ioc->release_work);
- else
- free_ioc = true;
- spin_unlock_irqrestore(&ioc->lock, flags);
- }
-
- if (free_ioc)
+ if (atomic_long_dec_and_test(&ioc->refcount) && !ioc_delay_free(ioc))
kmem_cache_free(iocontext_cachep, ioc);
}
EXPORT_SYMBOL_GPL(put_io_context);