From 4e4106f5e942bff65548e82fc330d40385c89220 Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Sat, 4 May 2019 23:37:57 +0200 Subject: dmaengine: jz4780: Fix transfers being ACKed too soon When a multi-descriptor DMA transfer is in progress, the "IRQ pending" flag will apparently be set for that channel as soon as the last descriptor loads, way before the IRQ actually happens. This behaviour has been observed on the JZ4725B, but maybe other SoCs are affected. In the case where another DMA transfer is running into completion on a separate channel, the IRQ handler would then run the completion handler for our previous channel even if the transfer didn't actually finish. Fix this by checking in the completion handler that we're indeed done; if not the interrupted DMA transfer will simply be resumed. Signed-off-by: Paul Cercueil Signed-off-by: Vinod Koul --- drivers/dma/dma-jz4780.c | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 9ce0a386225b..f49534019d37 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -666,10 +666,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, return status; } -static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, - struct jz4780_dma_chan *jzchan) +static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, + struct jz4780_dma_chan *jzchan) { uint32_t dcs; + bool ack = true; spin_lock(&jzchan->vchan.lock); @@ -692,12 +693,20 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) { if (jzchan->desc->type == DMA_CYCLIC) { vchan_cyclic_callback(&jzchan->desc->vdesc); - } else { + + jz4780_dma_begin(jzchan); + } else if (dcs & JZ_DMA_DCS_TT) { vchan_cookie_complete(&jzchan->desc->vdesc); jzchan->desc = NULL; - } - jz4780_dma_begin(jzchan); + jz4780_dma_begin(jzchan); + } else { + /* False positive - continue the transfer */ + ack = false; + jz4780_dma_chn_writel(jzdma, jzchan->id, + JZ_DMA_REG_DCS, + JZ_DMA_DCS_CTE); + } } } else { dev_err(&jzchan->vchan.chan.dev->device, @@ -705,21 +714,22 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, } spin_unlock(&jzchan->vchan.lock); + + return ack; } static irqreturn_t jz4780_dma_irq_handler(int irq, void *data) { struct jz4780_dma_dev *jzdma = data; + unsigned int nb_channels = jzdma->soc_data->nb_channels; uint32_t pending, dmac; int i; pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP); - for (i = 0; i < jzdma->soc_data->nb_channels; i++) { - if (!(pending & (1<chan[i]); + for_each_set_bit(i, (unsigned long *)&pending, nb_channels) { + if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i])) + pending &= ~BIT(i); } /* Clear halt and address error status of all channels. */ @@ -728,7 +738,7 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data) jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac); /* Clear interrupt pending status. */ - jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0); + jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending); return IRQ_HANDLED; } -- cgit v1.2.3 From 827484912e824b7c8c2e1866272084e0e1d3a2c4 Mon Sep 17 00:00:00 2001 From: Peng Ma Date: Mon, 6 May 2019 10:21:11 +0800 Subject: dmaengine: fsl-qdma: Add improvement When an error occurs we should clean the error register then to return Signed-off-by: Peng Ma Signed-off-by: Vinod Koul --- drivers/dma/fsl-qdma.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index aa1d0ae3d207..60b062c3647b 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -701,10 +701,8 @@ static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id) intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR); - if (intr) { + if (intr) dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n"); - return IRQ_NONE; - } qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR); return IRQ_HANDLED; -- cgit v1.2.3 From 0788611c9a0925c607de536b2449de5ed98ef8df Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 8 May 2019 23:33:29 +0100 Subject: dmaengine: dw-axi-dmac: fix null dereference when pointer first is null In the unlikely event that axi_desc_get returns a null desc in the very first iteration of the while-loop the error exit path ends up calling axi_desc_put on a null pointer 'first' and this causes a null pointer dereference. Fix this by adding a null check on pointer 'first' before calling axi_desc_put. Addresses-Coverity: ("Explicit null dereference") Fixes: 1fe20f1b8454 ("dmaengine: Introduce DW AXI DMAC driver") Signed-off-by: Colin Ian King Signed-off-by: Vinod Koul --- drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index b2ac1d2c5b86..a1ce307c502f 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -512,7 +512,8 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, return vchan_tx_prep(&chan->vc, &first->vd, flags); err_desc_get: - axi_desc_put(first); + if (first) + axi_desc_put(first); return NULL; } -- cgit v1.2.3 From 069b3c4214f27b130d0642f32438560db30f452e Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 9 May 2019 13:09:23 +0300 Subject: dmaengine: mediatek-cqdma: sleeping in atomic context The mtk_cqdma_poll_engine_done() function takes a true/false parameter where true means it's called from atomic context. There are a couple places where it was set to false but it's actually in atomic context so it should be true. All the callers for mtk_cqdma_hard_reset() are holding a spin_lock and in mtk_cqdma_free_chan_resources() we take a spin_lock before calling the mtk_cqdma_poll_engine_done() function. Fixes: b1f01e48df5a ("dmaengine: mediatek: Add MediaTek Command-Queue DMA controller for MT6765 SoC") Signed-off-by: Dan Carpenter Signed-off-by: Vinod Koul --- drivers/dma/mediatek/mtk-cqdma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c index 814853842e29..723b11c190b3 100644 --- a/drivers/dma/mediatek/mtk-cqdma.c +++ b/drivers/dma/mediatek/mtk-cqdma.c @@ -225,7 +225,7 @@ static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc) mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); - return mtk_cqdma_poll_engine_done(pc, false); + return mtk_cqdma_poll_engine_done(pc, true); } static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc, @@ -671,7 +671,7 @@ static void mtk_cqdma_free_chan_resources(struct dma_chan *c) mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); /* wait for the completion of flush operation */ - if (mtk_cqdma_poll_engine_done(cvc->pc, false) < 0) + if (mtk_cqdma_poll_engine_done(cvc->pc, true) < 0) dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n"); /* clear the flush bit and interrupt flag */ -- cgit v1.2.3 From b53611fb1ce9b1786bd18205473e0c1d6bfa8934 Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Thu, 16 May 2019 16:53:52 +0100 Subject: dmaengine: tegra210-adma: Fix crash during probe Commit f33e7bb3eb92 ("dmaengine: tegra210-adma: restore channel status") added support to save and restore the DMA channel registers when runtime suspending the ADMA. This change is causing the kernel to crash when probing the ADMA, if the device is probed deferred when looking up the channel interrupts. The crash occurs because not all of the channel base addresses have been setup at this point and in the clean-up path of the probe, pm_runtime_suspend() is called invoking its callback which expects all the channel base addresses to be initialised. Although this could be fixed by simply checking for a NULL address, on further review of the driver it seems more appropriate that we only call pm_runtime_get_sync() after all the channel interrupts and base addresses have been configured. Therefore, fix this crash by moving the calls to pm_runtime_enable(), pm_runtime_get_sync() and tegra_adma_init() after the DMA channels have been initialised. Fixes: f33e7bb3eb92 ("dmaengine: tegra210-adma: restore channel status") Signed-off-by: Jon Hunter Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 21f6be16d013..3ec3d71acd25 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -845,16 +845,6 @@ static int tegra_adma_probe(struct platform_device *pdev) return PTR_ERR(tdma->ahub_clk); } - pm_runtime_enable(&pdev->dev); - - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) - goto rpm_disable; - - ret = tegra_adma_init(tdma); - if (ret) - goto rpm_put; - INIT_LIST_HEAD(&tdma->dma_dev.channels); for (i = 0; i < tdma->nr_channels; i++) { struct tegra_adma_chan *tdc = &tdma->channels[i]; @@ -873,6 +863,16 @@ static int tegra_adma_probe(struct platform_device *pdev) tdc->tdma = tdma; } + pm_runtime_enable(&pdev->dev); + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) + goto rpm_disable; + + ret = tegra_adma_init(tdma); + if (ret) + goto rpm_put; + dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); @@ -916,13 +916,13 @@ static int tegra_adma_probe(struct platform_device *pdev) dma_remove: dma_async_device_unregister(&tdma->dma_dev); -irq_dispose: - while (--i >= 0) - irq_dispose_mapping(tdma->channels[i].irq); rpm_put: pm_runtime_put_sync(&pdev->dev); rpm_disable: pm_runtime_disable(&pdev->dev); +irq_dispose: + while (--i >= 0) + irq_dispose_mapping(tdma->channels[i].irq); return ret; } -- cgit v1.2.3 From 9ab59bf5dd6380a56e2897c92c5cd920ae4b0f8b Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Thu, 16 May 2019 16:53:53 +0100 Subject: dmaengine: tegra210-adma: Fix channel FIFO configuration Commit ded1f3db4cd6 ("dmaengine: tegra210-adma: prepare for supporting newer Tegra chips") removed the default settings DMA channel RX and TX FIFO sizes and this is breaking DMA transfers. The intention was to move the default settings to the chip specific data structure because this commit was preparing for adding support for Tegra186 where the fields for the FIFO CTRL register are slightly different. Fix the configuration of the FIFO sizes by adding default values for the FIFO CTRL register for both Tegra210 and Tegra186 and store the values in the chip specific structure. Fixes: ded1f3db4cd6 ("dmaengine: tegra210-adma: prepare for supporting newer Tegra chips") Signed-off-by: Jon Hunter Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 3ec3d71acd25..3f50fd11c380 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -53,10 +53,14 @@ #define ADMA_CH_CONFIG_MAX_BUFS 8 #define ADMA_CH_FIFO_CTRL 0x2c -#define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val) (((val) & 0xf) << 24) -#define ADMA_CH_FIFO_CTRL_STARV_THRES(val) (((val) & 0xf) << 16) -#define ADMA_CH_FIFO_CTRL_TX_FIFO_SIZE_SHIFT 8 -#define ADMA_CH_FIFO_CTRL_RX_FIFO_SIZE_SHIFT 0 +#define TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0xf) << 24) +#define TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0xf) << 16) +#define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8) +#define TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0xf) +#define TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0x1f) << 24) +#define TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0x1f) << 16) +#define TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0x1f) << 8) +#define TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0x1f) #define ADMA_CH_LOWER_SRC_ADDR 0x34 #define ADMA_CH_LOWER_TRG_ADDR 0x3c @@ -71,8 +75,15 @@ #define TEGRA_ADMA_BURST_COMPLETE_TIME 20 -#define ADMA_CH_FIFO_CTRL_DEFAULT (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \ - ADMA_CH_FIFO_CTRL_STARV_THRES(1)) +#define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \ + TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \ + TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \ + TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(3)) + +#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \ + TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \ + TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \ + TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(3)) #define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift) @@ -85,6 +96,7 @@ struct tegra_adma; * @ch_req_tx_shift: Register offset for AHUB transmit channel select. * @ch_req_rx_shift: Register offset for AHUB receive channel select. * @ch_base_offset: Reister offset of DMA channel registers. + * @ch_fifo_ctrl: Default value for channel FIFO CTRL register. * @ch_req_mask: Mask for Tx or Rx channel select. * @ch_req_max: Maximum number of Tx or Rx channels available. * @ch_reg_size: Size of DMA channel register space. @@ -97,6 +109,7 @@ struct tegra_adma_chip_data { unsigned int ch_req_tx_shift; unsigned int ch_req_rx_shift; unsigned int ch_base_offset; + unsigned int ch_fifo_ctrl; unsigned int ch_req_mask; unsigned int ch_req_max; unsigned int ch_reg_size; @@ -600,7 +613,7 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, ADMA_CH_CTRL_FLOWCTRL_EN; ch_regs->config |= cdata->adma_get_burst_config(burst_size); ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1); - ch_regs->fifo_ctrl = ADMA_CH_FIFO_CTRL_DEFAULT; + ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl; ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; return tegra_adma_request_alloc(tdc, direction); @@ -784,6 +797,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = { .ch_req_tx_shift = 28, .ch_req_rx_shift = 24, .ch_base_offset = 0, + .ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT, .ch_req_mask = 0xf, .ch_req_max = 10, .ch_reg_size = 0x80, @@ -797,6 +811,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = { .ch_req_tx_shift = 27, .ch_req_rx_shift = 22, .ch_base_offset = 0x10000, + .ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT, .ch_req_mask = 0x1f, .ch_req_max = 20, .ch_reg_size = 0x100, -- cgit v1.2.3 From 492252493ea382d12cb61c52295fc2d088bba28f Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Thu, 16 May 2019 16:53:54 +0100 Subject: dmaengine: tegra210-adma: Fix spelling Correct spelling of 'register' in Tegra210 ADMA driver. Fixes: ded1f3db4cd6 ("dmaengine: tegra210-adma: prepare for supporting newer Tegra chips") Signed-off-by: Jon Hunter Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 3f50fd11c380..17ea4dd99c62 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -95,7 +95,7 @@ struct tegra_adma; * @global_int_clear: Register offset of DMA global interrupt clear. * @ch_req_tx_shift: Register offset for AHUB transmit channel select. * @ch_req_rx_shift: Register offset for AHUB receive channel select. - * @ch_base_offset: Reister offset of DMA channel registers. + * @ch_base_offset: Register offset of DMA channel registers. * @ch_fifo_ctrl: Default value for channel FIFO CTRL register. * @ch_req_mask: Mask for Tx or Rx channel select. * @ch_req_max: Maximum number of Tx or Rx channels available. -- cgit v1.2.3 From 16d0f85e45b99411ac10cb12cdd9279204a72381 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 6 May 2019 15:28:28 +0800 Subject: dmaengine: sprd: Fix the possible crash when getting descriptor status We will get a NULL virtual descriptor by vchan_find_desc() when the descriptor has been submitted, that will crash the kernel when getting the descriptor status. In this case, since the descriptor has been submitted to process, but it is not completed now, which means the descriptor is listed into the 'vc->desc_submitted' list now. So we can not get current processing descriptor by vchan_find_desc(), but the pointer 'schan->cur_desc' will point to the current processing descriptor, then we can use 'schan->cur_desc' to get current processing descriptor's status to avoid this issue. Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 48431e2da987..e29342ab85f6 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -625,7 +625,7 @@ static enum dma_status sprd_dma_tx_status(struct dma_chan *chan, else pos = 0; } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) { - struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd); + struct sprd_dma_desc *sdesc = schan->cur_desc; if (sdesc->dir == DMA_DEV_TO_MEM) pos = sprd_dma_get_dst_addr(schan); -- cgit v1.2.3 From 58152b0e573e5581c4b9ef7cf06d2e9fafae27d4 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 6 May 2019 15:28:29 +0800 Subject: dmaengine: sprd: Add validation of current descriptor in irq handler When user terminates one DMA channel to free all its descriptors, but at the same time one transaction interrupt was triggered possibly, now we should not handle this interrupt by validating if the 'schan->cur_desc' was set as NULL to avoid crashing the kernel. Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index e29342ab85f6..431e289d59a5 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -552,12 +552,17 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id) schan = &sdev->channels[i]; spin_lock(&schan->vc.lock); + + sdesc = schan->cur_desc; + if (!sdesc) { + spin_unlock(&schan->vc.lock); + return IRQ_HANDLED; + } + int_type = sprd_dma_get_int_type(schan); req_type = sprd_dma_get_req_type(schan); sprd_dma_clear_int(schan); - sdesc = schan->cur_desc; - /* cyclic mode schedule callback */ cyclic = schan->linklist.phy_addr ? true : false; if (cyclic == true) { -- cgit v1.2.3 From 3d626a97f0303e9c30d063434b749de3f0f91fb5 Mon Sep 17 00:00:00 2001 From: Eric Long Date: Mon, 6 May 2019 15:28:30 +0800 Subject: dmaengine: sprd: Fix the incorrect start for 2-stage destination channels The 2-stage destination channel will be triggered by source channel automatically, which means we should not trigger it by software request. Signed-off-by: Eric Long Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 431e289d59a5..0f92e60529d1 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -510,7 +510,9 @@ static void sprd_dma_start(struct sprd_dma_chn *schan) sprd_dma_set_uid(schan); sprd_dma_enable_chn(schan); - if (schan->dev_id == SPRD_DMA_SOFTWARE_UID) + if (schan->dev_id == SPRD_DMA_SOFTWARE_UID && + schan->chn_mode != SPRD_DMA_DST_CHN0 && + schan->chn_mode != SPRD_DMA_DST_CHN1) sprd_dma_soft_request(schan); } -- cgit v1.2.3 From 89d03b3c126d683f7b2cd5b07178493993d12448 Mon Sep 17 00:00:00 2001 From: Eric Long Date: Mon, 6 May 2019 15:28:31 +0800 Subject: dmaengine: sprd: Fix block length overflow The maximum value of block length is 0xffff, so if the configured transfer length is more than 0xffff, that will cause block length overflow to lead a configuration error. Thus we can set block length as the maximum burst length to avoid this issue, since the maximum burst length will not be a big value which is more than 0xffff. Signed-off-by: Eric Long Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 0f92e60529d1..a01c23246632 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -778,7 +778,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK; hw->frg_len = temp; - hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK; + hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK; hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK; temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET; -- cgit v1.2.3 From c434e377dad1dec05cad1870ce21bc539e1e024f Mon Sep 17 00:00:00 2001 From: Eric Long Date: Mon, 6 May 2019 15:28:32 +0800 Subject: dmaengine: sprd: Fix the right place to configure 2-stage transfer Move the 2-stage configuration before configuring the link-list mode, since we will use some 2-stage configuration to fill the link-list configuration. Signed-off-by: Eric Long Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index a01c23246632..01abed5cde49 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -911,6 +911,12 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, schan->linklist.virt_addr = 0; } + /* Set channel mode and trigger mode for 2-stage transfer */ + schan->chn_mode = + (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK; + schan->trg_mode = + (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK; + sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT); if (!sdesc) return NULL; @@ -944,12 +950,6 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, } } - /* Set channel mode and trigger mode for 2-stage transfer */ - schan->chn_mode = - (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK; - schan->trg_mode = - (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK; - ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len, dir, flags, slave_cfg); if (ret) { -- cgit v1.2.3 From 9bb9fe0cfbe0aa72fed906ade0590e1702815e5d Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 6 May 2019 15:28:33 +0800 Subject: dmaengine: sprd: Add interrupt support for 2-stage transfer For 2-stage transfer, some users like Audio still need transaction interrupt to notify when the 2-stage transfer is completed. Thus we should enable 2-stage transfer interrupt to support this feature. Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 01abed5cde49..baac476c8622 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -62,6 +62,8 @@ /* SPRD_DMA_GLB_2STAGE_GRP register definition */ #define SPRD_DMA_GLB_2STAGE_EN BIT(24) #define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20) +#define SPRD_DMA_GLB_DEST_INT BIT(22) +#define SPRD_DMA_GLB_SRC_INT BIT(20) #define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19) #define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18) #define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17) @@ -135,6 +137,7 @@ /* define DMA channel mode & trigger mode mask */ #define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0) #define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0) +#define SPRD_DMA_INT_TYPE_MASK GENMASK(7, 0) /* define the DMA transfer step type */ #define SPRD_DMA_NONE_STEP 0 @@ -190,6 +193,7 @@ struct sprd_dma_chn { u32 dev_id; enum sprd_dma_chn_mode chn_mode; enum sprd_dma_trg_mode trg_mode; + enum sprd_dma_int_type int_type; struct sprd_dma_desc *cur_desc; }; @@ -429,6 +433,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan) val = chn & SPRD_DMA_GLB_SRC_CHN_MASK; val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET; val |= SPRD_DMA_GLB_2STAGE_EN; + if (schan->int_type != SPRD_DMA_NO_INT) + val |= SPRD_DMA_GLB_SRC_INT; + sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val); break; @@ -436,6 +443,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan) val = chn & SPRD_DMA_GLB_SRC_CHN_MASK; val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET; val |= SPRD_DMA_GLB_2STAGE_EN; + if (schan->int_type != SPRD_DMA_NO_INT) + val |= SPRD_DMA_GLB_SRC_INT; + sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val); break; @@ -443,6 +453,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan) val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) & SPRD_DMA_GLB_DEST_CHN_MASK; val |= SPRD_DMA_GLB_2STAGE_EN; + if (schan->int_type != SPRD_DMA_NO_INT) + val |= SPRD_DMA_GLB_DEST_INT; + sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val); break; @@ -450,6 +463,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan) val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) & SPRD_DMA_GLB_DEST_CHN_MASK; val |= SPRD_DMA_GLB_2STAGE_EN; + if (schan->int_type != SPRD_DMA_NO_INT) + val |= SPRD_DMA_GLB_DEST_INT; + sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val); break; @@ -911,11 +927,15 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, schan->linklist.virt_addr = 0; } - /* Set channel mode and trigger mode for 2-stage transfer */ + /* + * Set channel mode, interrupt mode and trigger mode for 2-stage + * transfer. + */ schan->chn_mode = (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK; schan->trg_mode = (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK; + schan->int_type = flags & SPRD_DMA_INT_TYPE_MASK; sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT); if (!sdesc) -- cgit v1.2.3