diff options
author | Dave Jiang <dave.jiang@intel.com> | 2022-04-26 15:32:06 -0700 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2022-06-14 18:36:28 +0200 |
commit | cfe3dd8bd5262749b1b1b6e07169acaa82fba64a (patch) | |
tree | e14e559fe634fa37ee3d4f8ceb40276f99ec9b7c | |
parent | fb5e51c0aa973df7d9995992f4ff48e5bb59a8ac (diff) | |
download | linux-stable-cfe3dd8bd5262749b1b1b6e07169acaa82fba64a.tar.gz linux-stable-cfe3dd8bd5262749b1b1b6e07169acaa82fba64a.tar.bz2 linux-stable-cfe3dd8bd5262749b1b1b6e07169acaa82fba64a.zip |
dmaengine: idxd: add missing callback function to support DMA_INTERRUPT
commit 2112b8f4fb5cc35d1c384324763765953186b81f upstream.
When setting DMA_INTERRUPT capability, a callback function
dma->device_prep_dma_interrupt() is needed to support this capability.
Without setting the callback, dma_async_device_register() will fail dma
capability check.
Fixes: 4e5a4eb20393 ("dmaengine: idxd: set DMA_INTERRUPT cap bit")
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/165101232637.3951447.15765792791591763119.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/dma/idxd/dma.c | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index acb5681e25ed..29af898f3c24 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -78,6 +78,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq, } static struct dma_async_tx_descriptor * +idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags) +{ + struct idxd_wq *wq = to_idxd_wq(c); + u32 desc_flags; + struct idxd_desc *desc; + + if (wq->state != IDXD_WQ_ENABLED) + return NULL; + + op_flag_setup(flags, &desc_flags); + desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(desc)) + return NULL; + + idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP, + 0, 0, 0, desc->compl_dma, desc_flags); + desc->txd.flags = flags; + return &desc->txd; +} + +static struct dma_async_tx_descriptor * idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { @@ -186,6 +207,7 @@ int idxd_register_dma_device(struct idxd_device *idxd) dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); dma->device_release = idxd_dma_release; + dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt; if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy; |