summaryrefslogtreecommitdiffstats
path: root/drivers/dma/ptdma/ptdma-dev.c
diff options
context:
space:
mode:
authorEric Pilmore <epilmore@gigaio.com>2023-01-18 19:39:08 -0800
committerVinod Koul <vkoul@kernel.org>2023-01-19 18:59:34 +0530
commit95e5fda3b5f9ed8239b145da3fa01e641cf5d53c (patch)
treeafce5d3b1023c71cdefca5c03800da9d9adfac93 /drivers/dma/ptdma/ptdma-dev.c
parenta7a7ee6f5a019ad72852c001abbce50d35e992f2 (diff)
downloadlinux-stable-95e5fda3b5f9ed8239b145da3fa01e641cf5d53c.tar.gz
linux-stable-95e5fda3b5f9ed8239b145da3fa01e641cf5d53c.tar.bz2
linux-stable-95e5fda3b5f9ed8239b145da3fa01e641cf5d53c.zip
ptdma: pt_core_execute_cmd() should use spinlock
The interrupt handler (pt_core_irq_handler()) of the ptdma driver can be called from interrupt context. The code flow in this function can lead down to pt_core_execute_cmd() which will attempt to grab a mutex, which is not appropriate in interrupt context and ultimately leads to a kernel panic. The fix here changes this mutex to a spinlock, which has been verified to resolve the issue. Fixes: fa5d823b16a9 ("dmaengine: ptdma: Initial driver for the AMD PTDMA") Signed-off-by: Eric Pilmore <epilmore@gigaio.com> Link: https://lore.kernel.org/r/20230119033907.35071-1-epilmore@gigaio.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/dma/ptdma/ptdma-dev.c')
-rw-r--r--drivers/dma/ptdma/ptdma-dev.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
index 377da23012ac..a2bf13ff18b6 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/ptdma/ptdma-dev.c
@@ -71,12 +71,13 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
u32 tail;
+ unsigned long flags;
if (soc) {
desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
desc->dw0 &= ~DWORD0_SOC;
}
- mutex_lock(&cmd_q->q_mutex);
+ spin_lock_irqsave(&cmd_q->q_lock, flags);
/* Copy 32-byte command descriptor to hw queue. */
memcpy(q_desc, desc, 32);
@@ -91,7 +92,7 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
/* Turn the queue back on using our cached control register */
pt_start_queue(cmd_q);
- mutex_unlock(&cmd_q->q_mutex);
+ spin_unlock_irqrestore(&cmd_q->q_lock, flags);
return 0;
}
@@ -199,7 +200,7 @@ int pt_core_init(struct pt_device *pt)
cmd_q->pt = pt;
cmd_q->dma_pool = dma_pool;
- mutex_init(&cmd_q->q_mutex);
+ spin_lock_init(&cmd_q->q_lock);
/* Page alignment satisfies our needs for N <= 128 */
cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);