From 4c329f5da7cfa366bacfda1328a025dd38951317 Mon Sep 17 00:00:00 2001 From: Vijaya Krishna Nivarthi Date: Tue, 9 May 2023 15:31:36 +0530 Subject: spi: spi-geni-qcom: Select FIFO mode for chip select Spi geni driver switches between FIFO and DMA modes based on xfer length. FIFO mode relies on M_CMD_DONE_EN interrupt for completion while DMA mode relies on XX_DMA_DONE. During dynamic switching, if FIFO mode is chosen, FIFO related interrupts are enabled and DMA related interrupts are disabled. And viceversa. Chip select shares M_CMD_DONE_EN interrupt with FIFO to check completion. Now, if a chip select operation is preceded by a DMA xfer, M_CMD_DONE_EN interrupt would have been disabled and hence it will never receive one resulting in timeout. For chip select, in addition to setting the xfer mode to FIFO, select_mode() to FIFO so that required interrupts are enabled. Fixes: e5f0dfa78ac7 ("spi: spi-geni-qcom: Add support for SE DMA mode") Suggested-by: Praveen Talari cs_flag = set_flag; /* set xfer_mode to FIFO to complete cs_done in isr */ mas->cur_xfer_mode = GENI_SE_FIFO; + geni_se_select_mode(se, mas->cur_xfer_mode); + reinit_completion(&mas->cs_done); if (set_flag) geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0); -- cgit v1.2.3 From a84c11e16dc2cc1faad2e688f8c12beeb369d80c Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Tue, 9 May 2023 17:41:52 +0100 Subject: spi: spi-cadence: Avoid read of RX FIFO before its ready Recent changes to cdns_spi_irq introduced some issues. Firstly, when writing the end of a longer transaction, the code in cdns_spi_irq will write data into the TX FIFO, then immediately fall into the if (!xspi->tx_bytes) path and attempt to read data from the RX FIFO. However this required waiting for the TX FIFO to empty before the RX data was ready. Secondly, the variable trans_cnt is now rather inaccurately named since in cases, where the watermark is set to 1, trans_cnt will be 1 but the count of bytes transferred would be much longer. Finally, when setting up the transaction we set the watermark to 50% of the FIFO if the transaction is great than 50% of the FIFO. However, there is no need to split a tranaction that is smaller than the whole FIFO, so anything up to the FIFO size can be done in a single transaction. Tidy up the code a little, to avoid repeatedly calling cdns_spi_read_rx_fifo with a count of 1, and correct the three issues noted above. Fixes: b1b90514eaa3 ("spi: spi-cadence: Add support for Slave mode") Signed-off-by: Charles Keepax tx_fifo_depth) && - (xspi->tx_bytes > 0)) { - + while ((trans_cnt < avail) && (xspi->tx_bytes > 0)) { /* When xspi in busy condition, bytes may send failed, * then spi control did't work thoroughly, add one byte delay */ @@ -381,33 +379,23 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id) spi_finalize_current_transfer(ctlr); status = IRQ_HANDLED; } else if (intr_status & CDNS_SPI_IXR_TXOW) { - int trans_cnt = cdns_spi_read(xspi, CDNS_SPI_THLD); + int threshold = cdns_spi_read(xspi, CDNS_SPI_THLD); + int trans_cnt = xspi->rx_bytes - xspi->tx_bytes; + + if (threshold > 1) + trans_cnt -= threshold; + /* Set threshold to one if number of pending are * less than half fifo */ if (xspi->tx_bytes < xspi->tx_fifo_depth >> 1) cdns_spi_write(xspi, CDNS_SPI_THLD, 1); - while (trans_cnt) { - cdns_spi_read_rx_fifo(xspi, 1); - - if (xspi->tx_bytes) { - if (xspi->txbuf) - cdns_spi_write(xspi, CDNS_SPI_TXD, - *xspi->txbuf++); - else - cdns_spi_write(xspi, CDNS_SPI_TXD, 0); - xspi->tx_bytes--; - } - trans_cnt--; - } - if (!xspi->tx_bytes) { - /* Fixed delay due to controller limitation with - * RX_NEMPTY incorrect status - * Xilinx AR:65885 contains more details - */ - udelay(10); - cdns_spi_read_rx_fifo(xspi, xspi->rx_bytes); + cdns_spi_read_rx_fifo(xspi, trans_cnt); + + if (xspi->tx_bytes) { + cdns_spi_fill_tx_fifo(xspi, trans_cnt); + } else { cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_DEFAULT); spi_finalize_current_transfer(ctlr); @@ -456,10 +444,10 @@ static int cdns_transfer_one(struct spi_controller *ctlr, /* Set TX empty threshold to half of FIFO depth * only if TX bytes are more than half FIFO depth. */ - if (xspi->tx_bytes > (xspi->tx_fifo_depth >> 1)) + if (xspi->tx_bytes > xspi->tx_fifo_depth) cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1); - cdns_spi_fill_tx_fifo(xspi); + cdns_spi_fill_tx_fifo(xspi, xspi->tx_fifo_depth); spi_transfer_delay_exec(transfer); cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT); -- cgit v1.2.3 From a0eb7be22c0f934d1fe7e1131f174ef5bc59d3f9 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Tue, 9 May 2023 17:41:53 +0100 Subject: spi: spi-cadence: Only overlap FIFO transactions in slave mode Commit b1b90514eaa3 ("spi: spi-cadence: Add support for Slave mode") updated the code to trigger the IRQ when the FIFO was half empty, overlapping filling more data into the FIFO and sending what is left. This appears to cause regressions on the Zynq 7000, for transactions longer than the FIFO size, below that no overlapping occurs. It would appear from my testing that any attempt to put new data into the FIFO whilst data is still transmitting causes data corruption on both send and receive. If I am reading the commit message right on commit 49530e641178 ("spi: cadence: Add usleep_range() for cdns_spi_fill_tx_fifo()"), that would also seem to imply this is the case. On the assumption that this isn't an issue on the platform the original slave mode support was added for, update the cdns_transfer_one to only set the watermark to 50% of the FIFO size when in slave mode. There by retaining the new behaviour for slave mode but reverting to the older behaviour when the SPI is used a master. Fixes: b1b90514eaa3 ("spi: spi-cadence: Add support for Slave mode") Signed-off-by: Charles Keepax tx_bytes = transfer->len; xspi->rx_bytes = transfer->len; - if (!spi_controller_is_slave(ctlr)) + if (!spi_controller_is_slave(ctlr)) { cdns_spi_setup_transfer(spi, transfer); - - /* Set TX empty threshold to half of FIFO depth - * only if TX bytes are more than half FIFO depth. - */ - if (xspi->tx_bytes > xspi->tx_fifo_depth) - cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1); + } else { + /* Set TX empty threshold to half of FIFO depth + * only if TX bytes are more than half FIFO depth. + */ + if (xspi->tx_bytes > xspi->tx_fifo_depth) + cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1); + } cdns_spi_fill_tx_fifo(xspi, xspi->tx_fifo_depth); spi_transfer_delay_exec(transfer); -- cgit v1.2.3 From ec9452594e04804cabbc561e88e96b48ab4655e4 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Sat, 13 May 2023 19:36:46 +0200 Subject: spi: MAINTAINERS: drop Krzysztof Kozlowski from Samsung SPI Remove Krzysztof Kozlowski from maintainers of Samsung SoC SPI drivers. Signed-off-by: Krzysztof Kozlowski M: Andi Shyti L: linux-spi@vger.kernel.org L: linux-samsung-soc@vger.kernel.org -- cgit v1.2.3 From 445164e8c136f1445caf735d6d268c948e71caf1 Mon Sep 17 00:00:00 2001 From: Amit Kumar Mahapatra Date: Mon, 15 May 2023 18:33:43 +0530 Subject: spi: dw: Replace spi->chip_select references with function calls New set/get APIs for accessing spi->chip_select were introduced by 'commit 9e264f3f85a5 ("spi: Replace all spi->chip_select and spi->cs_gpiod references with function call")', but the 'commit 2c8606040a80 ("spi: dw: Add support for AMD Pensando Elba SoC")' uses the old interface by directly accessing spi->chip_select. So, replace all spi->chip_select references in the driver with new get/set APIs. Signed-off-by: Amit Kumar Mahapatra priv; u8 cs; - cs = spi->chip_select; + cs = spi_get_chipselect(spi, 0); if (cs < 2) - dw_spi_elba_override_cs(syscon, spi->chip_select, enable); + dw_spi_elba_override_cs(syscon, spi_get_chipselect(spi, 0), enable); /* * The DW SPI controller needs a native CS bit selected to start * the serial engine. */ - spi->chip_select = 0; + spi_set_chipselect(spi, 0, 0); dw_spi_set_cs(spi, enable); - spi->chip_select = cs; + spi_get_chipselect(spi, cs); } static int dw_spi_elba_init(struct platform_device *pdev, -- cgit v1.2.3 From 6afe2ae8dc48e643cb9f52e86494b96942440bc6 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Thu, 18 May 2023 10:39:26 +0100 Subject: spi: spi-cadence: Interleave write of TX and read of RX FIFO When working in slave mode it seems the timing is exceedingly tight. The TX FIFO can never empty, because the master is driving the clock so zeros would be sent for those bytes where the FIFO is empty. Return to interleaving the writing of the TX FIFO and the reading of the RX FIFO to try to ensure the data is available when required. Fixes: a84c11e16dc2 ("spi: spi-cadence: Avoid read of RX FIFO before its ready") Signed-off-by: Charles Keepax Link: https://lore.kernel.org/r/20230518093927.711358-1-ckeepax@opensource.cirrus.com Signed-off-by: Mark Brown --- drivers/spi/spi-cadence.c | 64 ++++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 34 deletions(-) diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index ff02d8104131..26e663369319 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -301,47 +302,43 @@ static int cdns_spi_setup_transfer(struct spi_device *spi, } /** - * cdns_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible + * cdns_spi_process_fifo - Fills the TX FIFO, and drain the RX FIFO * @xspi: Pointer to the cdns_spi structure + * @ntx: Number of bytes to pack into the TX FIFO + * @nrx: Number of bytes to drain from the RX FIFO */ -static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi, unsigned int avail) +static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx) { - unsigned long trans_cnt = 0; + ntx = clamp(ntx, 0, xspi->tx_bytes); + nrx = clamp(nrx, 0, xspi->rx_bytes); - while ((trans_cnt < avail) && (xspi->tx_bytes > 0)) { + xspi->tx_bytes -= ntx; + xspi->rx_bytes -= nrx; + + while (ntx || nrx) { /* When xspi in busy condition, bytes may send failed, * then spi control did't work thoroughly, add one byte delay */ - if (cdns_spi_read(xspi, CDNS_SPI_ISR) & - CDNS_SPI_IXR_TXFULL) + if (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_TXFULL) udelay(10); - if (xspi->txbuf) - cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); - else - cdns_spi_write(xspi, CDNS_SPI_TXD, 0); + if (ntx) { + if (xspi->txbuf) + cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); + else + cdns_spi_write(xspi, CDNS_SPI_TXD, 0); - xspi->tx_bytes--; - trans_cnt++; - } -} + ntx--; + } -/** - * cdns_spi_read_rx_fifo - Reads the RX FIFO with as many bytes as possible - * @xspi: Pointer to the cdns_spi structure - * @count: Read byte count - */ -static void cdns_spi_read_rx_fifo(struct cdns_spi *xspi, unsigned long count) -{ - u8 data; - - /* Read out the data from the RX FIFO */ - while (count > 0) { - data = cdns_spi_read(xspi, CDNS_SPI_RXD); - if (xspi->rxbuf) - *xspi->rxbuf++ = data; - xspi->rx_bytes--; - count--; + if (nrx) { + u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD); + + if (xspi->rxbuf) + *xspi->rxbuf++ = data; + + nrx--; + } } } @@ -391,11 +388,10 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id) if (xspi->tx_bytes < xspi->tx_fifo_depth >> 1) cdns_spi_write(xspi, CDNS_SPI_THLD, 1); - cdns_spi_read_rx_fifo(xspi, trans_cnt); - if (xspi->tx_bytes) { - cdns_spi_fill_tx_fifo(xspi, trans_cnt); + cdns_spi_process_fifo(xspi, trans_cnt, trans_cnt); } else { + cdns_spi_process_fifo(xspi, 0, trans_cnt); cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_DEFAULT); spi_finalize_current_transfer(ctlr); @@ -448,7 +444,7 @@ static int cdns_transfer_one(struct spi_controller *ctlr, cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1); } - cdns_spi_fill_tx_fifo(xspi, xspi->tx_fifo_depth); + cdns_spi_process_fifo(xspi, xspi->tx_fifo_depth, 0); spi_transfer_delay_exec(transfer); cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT); -- cgit v1.2.3 From 9728fb3ce11729aa8c276825ddf504edeb00611d Mon Sep 17 00:00:00 2001 From: Clark Wang Date: Fri, 5 May 2023 14:35:57 +0800 Subject: spi: lpspi: disable lpspi module irq in DMA mode When all bits of IER are set to 0, we still can observe the lpspi irq events when using DMA mode to transfer data. So disable irq to avoid the too much irq events. Signed-off-by: Clark Wang Link: https://lore.kernel.org/r/20230505063557.3962220-1-xiaoning.wang@nxp.com Signed-off-by: Mark Brown --- drivers/spi/spi-fsl-lpspi.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index f2341ab99556..4b70038ceb6b 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -910,9 +910,14 @@ static int fsl_lpspi_probe(struct platform_device *pdev) ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller); if (ret == -EPROBE_DEFER) goto out_pm_get; - if (ret < 0) dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret); + else + /* + * disable LPSPI module IRQ when enable DMA mode successfully, + * to prevent the unexpected LPSPI module IRQ events. + */ + disable_irq(irq); ret = devm_spi_register_controller(&pdev->dev, controller); if (ret < 0) { -- cgit v1.2.3 From 4be47a5d59cbc9396a6ffd327913eb4c8d67a32f Mon Sep 17 00:00:00 2001 From: Daniel Golle Date: Mon, 1 May 2023 19:33:14 +0100 Subject: spi: mt65xx: make sure operations completed before unloading When unloading the spi-mt65xx kernel module during an ongoing spi-mem operation the kernel will Oops shortly after unloading the module. This is because wait_for_completion_timeout was still running and returning into the no longer loaded module: Internal error: Oops: 0000000096000005 [#1] SMP Modules linked in: [many, but spi-mt65xx is no longer there] CPU: 0 PID: 2578 Comm: block Tainted: G W O 6.3.0-next-20230428+ #0 Hardware name: Bananapi BPI-R3 (DT) pstate: 804000c5 (Nzcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : __lock_acquire+0x18c/0x20e8 lr : __lock_acquire+0x9b8/0x20e8 sp : ffffffc009ec3400 x29: ffffffc009ec3400 x28: 0000000000000001 x27: 0000000000000004 x26: ffffff80082888c8 x25: 0000000000000000 x24: 0000000000000000 x23: ffffffc009609da8 x22: ffffff8008288000 x21: ffffff8008288968 x20: 00000000000003c2 x19: ffffff8008be7990 x18: 00000000000002af x17: 0000000000000000 x16: 0000000000000000 x15: ffffffc008d78970 x14: 000000000000080d x13: 00000000000002af x12: 00000000ffffffea x11: 00000000ffffefff x10: ffffffc008dd0970 x9 : ffffffc008d78918 x8 : 0000000000017fe8 x7 : 0000000000000001 x6 : 0000000000000000 x5 : ffffff807fb53910 x4 : 0000000000000000 x3 : 0000000000000027 x2 : 0000000000000027 x1 : 0000000000000000 x0 : 00000000000c03c2 Call trace: __lock_acquire+0x18c/0x20e8 lock_acquire+0x100/0x2a4 _raw_spin_lock_irq+0x58/0x74 __wait_for_common+0xe0/0x1b4 wait_for_completion_timeout+0x1c/0x24 0xffffffc000acc8a4 <--- used to be mtk_spi_transfer_wait spi_mem_exec_op+0x390/0x3ec spi_mem_no_dirmap_read+0x6c/0x88 spi_mem_dirmap_read+0xcc/0x12c spinand_read_page+0xf8/0x1dc spinand_mtd_read+0x1b4/0x2fc mtd_read_oob_std+0x58/0x7c mtd_read_oob+0x8c/0x148 mtd_read+0x50/0x6c ... Prevent this by completing in mtk_spi_remove if needed. Fixes: 9f763fd20da7 ("spi: mediatek: add spi memory support for ipm design") Signed-off-by: Daniel Golle Link: https://lore.kernel.org/r/ZFAF6pJxMu1z6k4w@makrotopia.org Signed-off-by: Mark Brown --- drivers/spi/spi-mt65xx.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 21c321f43766..d7432e2219d8 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -1275,6 +1275,9 @@ static int mtk_spi_remove(struct platform_device *pdev) struct mtk_spi *mdata = spi_master_get_devdata(master); int ret; + if (mdata->use_spimem && !completion_done(&mdata->spimem_done)) + complete(&mdata->spimem_done); + ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) return ret; -- cgit v1.2.3 From 0c331fd1dccfba657129380ee084b95c1cedfbef Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Thu, 18 May 2023 15:04:25 +0200 Subject: spi: qup: Request DMA before enabling clocks It is usually better to request all necessary resources (clocks, regulators, ...) before starting to make use of them. That way they do not change state in case one of the resources is not available yet and probe deferral (-EPROBE_DEFER) is necessary. This is particularly important for DMA channels and IOMMUs which are not enforced by fw_devlink yet (unless you use fw_devlink.strict=1). spi-qup does this in the wrong order, the clocks are enabled and disabled again when the DMA channels are not available yet. This causes issues in some cases: On most SoCs one of the SPI QUP clocks is shared with the UART controller. When using earlycon UART is actively used during boot but might not have probed yet, usually for the same reason (waiting for the DMA controller). In this case, the brief enable/disable cycle ends up gating the clock and further UART console output will halt the system completely. Avoid this by requesting the DMA channels before changing the clock state. Fixes: 612762e82ae6 ("spi: qup: Add DMA capabilities") Signed-off-by: Stephan Gerhold Link: https://lore.kernel.org/r/20230518-spi-qup-clk-defer-v1-1-f49fc9ca4e02@gerhold.net Signed-off-by: Mark Brown --- drivers/spi/spi-qup.c | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index 944ef6b42bce..00e5e88e72c4 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -1028,23 +1028,8 @@ static int spi_qup_probe(struct platform_device *pdev) return -ENXIO; } - ret = clk_prepare_enable(cclk); - if (ret) { - dev_err(dev, "cannot enable core clock\n"); - return ret; - } - - ret = clk_prepare_enable(iclk); - if (ret) { - clk_disable_unprepare(cclk); - dev_err(dev, "cannot enable iface clock\n"); - return ret; - } - master = spi_alloc_master(dev, sizeof(struct spi_qup)); if (!master) { - clk_disable_unprepare(cclk); - clk_disable_unprepare(iclk); dev_err(dev, "cannot allocate master\n"); return -ENOMEM; } @@ -1092,6 +1077,19 @@ static int spi_qup_probe(struct platform_device *pdev) spin_lock_init(&controller->lock); init_completion(&controller->done); + ret = clk_prepare_enable(cclk); + if (ret) { + dev_err(dev, "cannot enable core clock\n"); + goto error_dma; + } + + ret = clk_prepare_enable(iclk); + if (ret) { + clk_disable_unprepare(cclk); + dev_err(dev, "cannot enable iface clock\n"); + goto error_dma; + } + iomode = readl_relaxed(base + QUP_IO_M_MODES); size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode); @@ -1121,7 +1119,7 @@ static int spi_qup_probe(struct platform_device *pdev) ret = spi_qup_set_state(controller, QUP_STATE_RESET); if (ret) { dev_err(dev, "cannot set RESET state\n"); - goto error_dma; + goto error_clk; } writel_relaxed(0, base + QUP_OPERATIONAL); @@ -1145,7 +1143,7 @@ static int spi_qup_probe(struct platform_device *pdev) ret = devm_request_irq(dev, irq, spi_qup_qup_irq, IRQF_TRIGGER_HIGH, pdev->name, controller); if (ret) - goto error_dma; + goto error_clk; pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(dev); @@ -1160,11 +1158,12 @@ static int spi_qup_probe(struct platform_device *pdev) disable_pm: pm_runtime_disable(&pdev->dev); +error_clk: + clk_disable_unprepare(cclk); + clk_disable_unprepare(iclk); error_dma: spi_qup_release_dma(master); error: - clk_disable_unprepare(cclk); - clk_disable_unprepare(iclk); spi_master_put(master); return ret; } -- cgit v1.2.3