diff options
author | Eric Pilmore <epilmore@gigaio.com> | 2023-01-18 19:39:08 -0800 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2023-02-01 08:34:25 +0100 |
commit | 13ba563c2c8055ba8a637c9f70bb833b43cb4207 (patch) | |
tree | 5faa351bca7e316efe4239e7960cb4571a74b004 /drivers/dma | |
parent | 0c5213ce6e7234a030ac67a1c0582de5b41a8de6 (diff) | |
download | linux-stable-13ba563c2c8055ba8a637c9f70bb833b43cb4207.tar.gz linux-stable-13ba563c2c8055ba8a637c9f70bb833b43cb4207.tar.bz2 linux-stable-13ba563c2c8055ba8a637c9f70bb833b43cb4207.zip |
ptdma: pt_core_execute_cmd() should use spinlock
[ Upstream commit 95e5fda3b5f9ed8239b145da3fa01e641cf5d53c ]
The interrupt handler (pt_core_irq_handler()) of the ptdma
driver can be called from interrupt context. The code flow
in this function can lead down to pt_core_execute_cmd() which
will attempt to grab a mutex, which is not appropriate in
interrupt context and ultimately leads to a kernel panic.
The fix here changes this mutex to a spinlock, which has
been verified to resolve the issue.
Fixes: fa5d823b16a9 ("dmaengine: ptdma: Initial driver for the AMD PTDMA")
Signed-off-by: Eric Pilmore <epilmore@gigaio.com>
Link: https://lore.kernel.org/r/20230119033907.35071-1-epilmore@gigaio.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/ptdma/ptdma-dev.c | 7 | ||||
-rw-r--r-- | drivers/dma/ptdma/ptdma.h | 2 |
2 files changed, 5 insertions, 4 deletions
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c index 377da23012ac..a2bf13ff18b6 100644 --- a/drivers/dma/ptdma/ptdma-dev.c +++ b/drivers/dma/ptdma/ptdma-dev.c @@ -71,12 +71,13 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd bool soc = FIELD_GET(DWORD0_SOC, desc->dw0); u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx]; u32 tail; + unsigned long flags; if (soc) { desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0); desc->dw0 &= ~DWORD0_SOC; } - mutex_lock(&cmd_q->q_mutex); + spin_lock_irqsave(&cmd_q->q_lock, flags); /* Copy 32-byte command descriptor to hw queue. */ memcpy(q_desc, desc, 32); @@ -91,7 +92,7 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd /* Turn the queue back on using our cached control register */ pt_start_queue(cmd_q); - mutex_unlock(&cmd_q->q_mutex); + spin_unlock_irqrestore(&cmd_q->q_lock, flags); return 0; } @@ -199,7 +200,7 @@ int pt_core_init(struct pt_device *pt) cmd_q->pt = pt; cmd_q->dma_pool = dma_pool; - mutex_init(&cmd_q->q_mutex); + spin_lock_init(&cmd_q->q_lock); /* Page alignment satisfies our needs for N <= 128 */ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h index d093c43b7d13..21b4bf895200 100644 --- a/drivers/dma/ptdma/ptdma.h +++ b/drivers/dma/ptdma/ptdma.h @@ -196,7 +196,7 @@ struct pt_cmd_queue { struct ptdma_desc *qbase; /* Aligned queue start address (per requirement) */ - struct mutex q_mutex ____cacheline_aligned; + spinlock_t q_lock ____cacheline_aligned; unsigned int qidx; unsigned int qsize; |