summaryrefslogtreecommitdiffstats
path: root/block/blk-ioc.c
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2017-02-10 10:32:34 -0800
committerJens Axboe <axboe@fb.com>2017-02-10 11:34:47 -0700
commit3d492c2e0146ccaad08c7cbe16e2e229328b5e79 (patch)
treec78b95cdedb006cdc11a99e6a0e253f8875cee11 /block/blk-ioc.c
parentf6f94300cda0f85180a0dd8838d1cc855661e239 (diff)
downloadlinux-stable-3d492c2e0146ccaad08c7cbe16e2e229328b5e79.tar.gz
linux-stable-3d492c2e0146ccaad08c7cbe16e2e229328b5e79.tar.bz2
linux-stable-3d492c2e0146ccaad08c7cbe16e2e229328b5e79.zip
blk-mq-sched: don't hold queue_lock when calling exit_icq
None of the other blk-mq elevator hooks are called with this lock held. Additionally, it can lead to circular locking dependencies between queue_lock and the private scheduler lock. Reported-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r--block/blk-ioc.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index fe186a9eade9..b12f9c87b4c3 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -35,7 +35,10 @@ static void icq_free_icq_rcu(struct rcu_head *head)
kmem_cache_free(icq->__rcu_icq_cache, icq);
}
-/* Exit an icq. Called with both ioc and q locked. */
+/*
+ * Exit an icq. Called with both ioc and q locked for sq, only ioc locked for
+ * mq.
+ */
static void ioc_exit_icq(struct io_cq *icq)
{
struct elevator_type *et = icq->q->elevator->type;
@@ -166,6 +169,7 @@ EXPORT_SYMBOL(put_io_context);
*/
void put_io_context_active(struct io_context *ioc)
{
+ struct elevator_type *et;
unsigned long flags;
struct io_cq *icq;
@@ -184,13 +188,19 @@ retry:
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
if (icq->flags & ICQ_EXITED)
continue;
- if (spin_trylock(icq->q->queue_lock)) {
+
+ et = icq->q->elevator->type;
+ if (et->uses_mq) {
ioc_exit_icq(icq);
- spin_unlock(icq->q->queue_lock);
} else {
- spin_unlock_irqrestore(&ioc->lock, flags);
- cpu_relax();
- goto retry;
+ if (spin_trylock(icq->q->queue_lock)) {
+ ioc_exit_icq(icq);
+ spin_unlock(icq->q->queue_lock);
+ } else {
+ spin_unlock_irqrestore(&ioc->lock, flags);
+ cpu_relax();
+ goto retry;
+ }
}
}
spin_unlock_irqrestore(&ioc->lock, flags);