summaryrefslogtreecommitdiffstats
path: root/drivers/mmc/core/queue.c
diff options
context:
space:
mode:
authorSarthak Garg <sartgarg@codeaurora.org>2020-05-07 21:45:33 +0530
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-05-20 08:20:21 +0200
commite8eb122b9f438f30e0d867f332ab5ff72c40d294 (patch)
tree201ec3bc1fbecabc182b9a827fde6128c3873145 /drivers/mmc/core/queue.c
parentfdf547a591f51964ab166ad8b05cda75a050b659 (diff)
downloadlinux-stable-e8eb122b9f438f30e0d867f332ab5ff72c40d294.tar.gz
linux-stable-e8eb122b9f438f30e0d867f332ab5ff72c40d294.tar.bz2
linux-stable-e8eb122b9f438f30e0d867f332ab5ff72c40d294.zip
mmc: core: Fix recursive locking issue in CQE recovery path
[ Upstream commit 39a22f73744d5baee30b5f134ae2e30b668b66ed ] Consider the following stack trace -001|raw_spin_lock_irqsave -002|mmc_blk_cqe_complete_rq -003|__blk_mq_complete_request(inline) -003|blk_mq_complete_request(rq) -004|mmc_cqe_timed_out(inline) -004|mmc_mq_timed_out mmc_mq_timed_out acquires the queue_lock for the first time. The mmc_blk_cqe_complete_rq function also tries to acquire the same queue lock resulting in recursive locking where the task is spinning for the same lock which it has already acquired leading to watchdog bark. Fix this issue with the lock only for the required critical section. Cc: <stable@vger.kernel.org> Fixes: 1e8e55b67030 ("mmc: block: Add CQE support") Suggested-by: Sahitya Tummala <stummala@codeaurora.org> Signed-off-by: Sarthak Garg <sartgarg@codeaurora.org> Acked-by: Adrian Hunter <adrian.hunter@intel.com> Link: https://lore.kernel.org/r/1588868135-31783-1-git-send-email-vbadigan@codeaurora.org Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/mmc/core/queue.c')
-rw-r--r--drivers/mmc/core/queue.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 9edc08685e86..4d1e468d3982 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -107,7 +107,7 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
case MMC_ISSUE_DCMD:
if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
if (recovery_needed)
- __mmc_cqe_recovery_notifier(mq);
+ mmc_cqe_recovery_notifier(mrq);
return BLK_EH_RESET_TIMER;
}
/* No timeout (XXX: huh? comment doesn't make much sense) */
@@ -125,18 +125,13 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
unsigned long flags;
- int ret;
+ bool ignore_tout;
spin_lock_irqsave(&mq->lock, flags);
-
- if (mq->recovery_needed || !mq->use_cqe)
- ret = BLK_EH_RESET_TIMER;
- else
- ret = mmc_cqe_timed_out(req);
-
+ ignore_tout = mq->recovery_needed || !mq->use_cqe;
spin_unlock_irqrestore(&mq->lock, flags);
- return ret;
+ return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
}
static void mmc_mq_recovery_handler(struct work_struct *work)