diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-12-19 08:54:17 -0600 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-12-19 08:54:17 -0600 |
commit | 9322af3e6aeae04c7eda3e6a0c977e97a13cf6ed (patch) | |
tree | 35dc3e22f52ace8749b12846065f84b2fac63d13 /drivers | |
parent | 1b6a349a40b9dc5fc510c856080e468e3782e5a9 (diff) | |
parent | 25483dedd2f5d9bc6928cd790ee59772fb880a79 (diff) | |
download | linux-9322af3e6aeae04c7eda3e6a0c977e97a13cf6ed.tar.gz linux-9322af3e6aeae04c7eda3e6a0c977e97a13cf6ed.tar.bz2 linux-9322af3e6aeae04c7eda3e6a0c977e97a13cf6ed.zip |
Merge tag 'dmaengine-6.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine updates from Vinod Koul:
"New support:
- Qualcomm SDM670, SM6115 and SM6375 GPI controller support
- Ingenic JZ4755 dmaengine support
- Removal of iop-adma driver
Updates:
- Tegra support for dma-channel-mask
- at_hdmac cleanup and virt-chan support for this driver"
* tag 'dmaengine-6.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (46 commits)
dmaengine: Revert "dmaengine: remove s3c24xx driver"
dmaengine: tegra: Add support for dma-channel-mask
dt-bindings: dmaengine: Add dma-channel-mask to Tegra GPCDMA
dmaengine: idxd: Remove linux/msi.h include
dt-bindings: dmaengine: qcom: gpi: add compatible for SM6375
dmaengine: idxd: Fix crc_val field for completion record
dmaengine: at_hdmac: Convert driver to use virt-dma
dmaengine: at_hdmac: Remove unused member of at_dma_chan
dmaengine: at_hdmac: Rename "chan_common" to "dma_chan"
dmaengine: at_hdmac: Rename "dma_common" to "dma_device"
dmaengine: at_hdmac: Use bitfield access macros
dmaengine: at_hdmac: Keep register definitions and structures private to at_hdmac.c
dmaengine: at_hdmac: Set include entries in alphabetic order
dmaengine: at_hdmac: Use pm_ptr()
dmaengine: at_hdmac: Use devm_clk_get()
dmaengine: at_hdmac: Use devm_platform_ioremap_resource
dmaengine: at_hdmac: Use devm_kzalloc() and struct_size()
dmaengine: at_hdmac: Introduce atc_get_llis_residue()
dmaengine: at_hdmac: s/atc_get_bytes_left/atc_get_residue
dmaengine: at_hdmac: Pass residue by address to avoid unnecessary implicit casts
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dma/Kconfig | 9 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/apple-admac.c | 102 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 1854 | ||||
-rw-r--r-- | drivers/dma/at_hdmac_regs.h | 478 | ||||
-rw-r--r-- | drivers/dma/dma-jz4780.c | 8 | ||||
-rw-r--r-- | drivers/dma/idma64.c | 8 | ||||
-rw-r--r-- | drivers/dma/idxd/device.c | 1 | ||||
-rw-r--r-- | drivers/dma/idxd/sysfs.c | 68 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 1554 | ||||
-rw-r--r-- | drivers/dma/iop-adma.h | 914 | ||||
-rw-r--r-- | drivers/dma/qcom/gpi.c | 7 | ||||
-rw-r--r-- | drivers/dma/sh/shdma-arm.h | 48 | ||||
-rw-r--r-- | drivers/dma/tegra186-gpc-dma.c | 37 | ||||
-rw-r--r-- | drivers/dma/ti/Kconfig | 7 | ||||
-rw-r--r-- | drivers/dma/ti/Makefile | 15 | ||||
-rw-r--r-- | drivers/dma/ti/k3-psil.c | 2 | ||||
-rw-r--r-- | drivers/dma/ti/k3-udma-glue.c | 5 | ||||
-rw-r--r-- | drivers/dma/ti/k3-udma.c | 40 | ||||
-rw-r--r-- | drivers/dma/xilinx/xilinx_dma.c | 4 | ||||
-rw-r--r-- | drivers/of/irq.c | 1 |
22 files changed, 1229 insertions, 3936 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 25e111ab21f8..b6d48d54f42f 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -97,6 +97,7 @@ config AT_HDMAC tristate "Atmel AHB DMA support" depends on ARCH_AT91 select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS help Support the Atmel AHB DMA controller. @@ -357,14 +358,6 @@ config INTEL_IOATDMA If unsure, say N. -config INTEL_IOP_ADMA - tristate "Intel IOP32x ADMA support" - depends on ARCH_IOP32X || COMPILE_TEST - select DMA_ENGINE - select ASYNC_TX_ENABLE_CHANNEL_SWITCH - help - Enable support for the Intel(R) IOP Series RAID engines. - config K3_DMA tristate "Hisilicon K3 DMA support" depends on ARCH_HI3xxx || ARCH_HISI || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 10f7d4241001..5b55ada052a7 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -44,7 +44,6 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o obj-$(CONFIG_INTEL_IDMA64) += idma64.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-y += idxd/ -obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_K3_DMA) += k3dma.o obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c index a2cc520225d3..90f28bda29c8 100644 --- a/drivers/dma/apple-admac.c +++ b/drivers/dma/apple-admac.c @@ -21,6 +21,12 @@ #define NCHANNELS_MAX 64 #define IRQ_NOUTPUTS 4 +/* + * For allocation purposes we split the cache + * memory into blocks of fixed size (given in bytes). + */ +#define SRAM_BLOCK 2048 + #define RING_WRITE_SLOT GENMASK(1, 0) #define RING_READ_SLOT GENMASK(5, 4) #define RING_FULL BIT(9) @@ -36,6 +42,9 @@ #define REG_TX_STOP 0x0004 #define REG_RX_START 0x0008 #define REG_RX_STOP 0x000c +#define REG_IMPRINT 0x0090 +#define REG_TX_SRAM_SIZE 0x0094 +#define REG_RX_SRAM_SIZE 0x0098 #define REG_CHAN_CTL(ch) (0x8000 + (ch) * 0x200) #define REG_CHAN_CTL_RST_RINGS BIT(0) @@ -53,7 +62,9 @@ #define BUS_WIDTH_FRAME_2_WORDS 0x10 #define BUS_WIDTH_FRAME_4_WORDS 0x20 -#define CHAN_BUFSIZE 0x8000 +#define REG_CHAN_SRAM_CARVEOUT(ch) (0x8050 + (ch) * 0x200) +#define CHAN_SRAM_CARVEOUT_SIZE GENMASK(31, 16) +#define CHAN_SRAM_CARVEOUT_BASE GENMASK(15, 0) #define REG_CHAN_FIFOCTL(ch) (0x8054 + (ch) * 0x200) #define CHAN_FIFOCTL_LIMIT GENMASK(31, 16) @@ -76,6 +87,8 @@ struct admac_chan { struct dma_chan chan; struct tasklet_struct tasklet; + u32 carveout; + spinlock_t lock; struct admac_tx *current_tx; int nperiod_acks; @@ -92,12 +105,24 @@ struct admac_chan { struct list_head to_free; }; +struct admac_sram { + u32 size; + /* + * SRAM_CARVEOUT has 16-bit fields, so the SRAM cannot be larger than + * 64K and a 32-bit bitfield over 2K blocks covers it. + */ + u32 allocated; +}; + struct admac_data { struct dma_device dma; struct device *dev; __iomem void *base; struct reset_control *rstc; + struct mutex cache_alloc_lock; + struct admac_sram txcache, rxcache; + int irq; int irq_index; int nchannels; @@ -118,6 +143,60 @@ struct admac_tx { struct list_head node; }; +static int admac_alloc_sram_carveout(struct admac_data *ad, + enum dma_transfer_direction dir, + u32 *out) +{ + struct admac_sram *sram; + int i, ret = 0, nblocks; + + if (dir == DMA_MEM_TO_DEV) + sram = &ad->txcache; + else + sram = &ad->rxcache; + + mutex_lock(&ad->cache_alloc_lock); + + nblocks = sram->size / SRAM_BLOCK; + for (i = 0; i < nblocks; i++) + if (!(sram->allocated & BIT(i))) + break; + + if (i < nblocks) { + *out = FIELD_PREP(CHAN_SRAM_CARVEOUT_BASE, i * SRAM_BLOCK) | + FIELD_PREP(CHAN_SRAM_CARVEOUT_SIZE, SRAM_BLOCK); + sram->allocated |= BIT(i); + } else { + ret = -EBUSY; + } + + mutex_unlock(&ad->cache_alloc_lock); + + return ret; +} + +static void admac_free_sram_carveout(struct admac_data *ad, + enum dma_transfer_direction dir, + u32 carveout) +{ + struct admac_sram *sram; + u32 base = FIELD_GET(CHAN_SRAM_CARVEOUT_BASE, carveout); + int i; + + if (dir == DMA_MEM_TO_DEV) + sram = &ad->txcache; + else + sram = &ad->rxcache; + + if (WARN_ON(base >= sram->size)) + return; + + mutex_lock(&ad->cache_alloc_lock); + i = base / SRAM_BLOCK; + sram->allocated &= ~BIT(i); + mutex_unlock(&ad->cache_alloc_lock); +} + static void admac_modify(struct admac_data *ad, int reg, u32 mask, u32 val) { void __iomem *addr = ad->base + reg; @@ -466,15 +545,28 @@ static void admac_synchronize(struct dma_chan *chan) static int admac_alloc_chan_resources(struct dma_chan *chan) { struct admac_chan *adchan = to_admac_chan(chan); + struct admac_data *ad = adchan->host; + int ret; dma_cookie_init(&adchan->chan); + ret = admac_alloc_sram_carveout(ad, admac_chan_direction(adchan->no), + &adchan->carveout); + if (ret < 0) + return ret; + + writel_relaxed(adchan->carveout, + ad->base + REG_CHAN_SRAM_CARVEOUT(adchan->no)); return 0; } static void admac_free_chan_resources(struct dma_chan *chan) { + struct admac_chan *adchan = to_admac_chan(chan); + admac_terminate_all(chan); admac_synchronize(chan); + admac_free_sram_carveout(adchan->host, admac_chan_direction(adchan->no), + adchan->carveout); } static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec, @@ -712,6 +804,7 @@ static int admac_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ad); ad->dev = &pdev->dev; ad->nchannels = nchannels; + mutex_init(&ad->cache_alloc_lock); /* * The controller has 4 IRQ outputs. Try them all until @@ -801,6 +894,13 @@ static int admac_probe(struct platform_device *pdev) goto free_irq; } + ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE); + ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE); + + dev_info(&pdev->dev, "Audio DMA Controller\n"); + dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n", + readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size); + return 0; free_irq: diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 858bd64f1313..8858470246e1 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -3,6 +3,7 @@ * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) * * Copyright (C) 2008 Atmel Corporation + * Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries * * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. * The only Atmel DMA Controller that is not covered by this driver is the one @@ -10,20 +11,22 @@ */ #include <dt-bindings/dma/at91.h> +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/dmaengine.h> -#include <linux/dma-mapping.h> #include <linux/dmapool.h> +#include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> #include <linux/of.h> +#include <linux/overflow.h> #include <linux/of_device.h> #include <linux/of_dma.h> +#include <linux/platform_device.h> +#include <linux/slab.h> -#include "at_hdmac_regs.h" #include "dmaengine.h" +#include "virt-dma.h" /* * Glossary @@ -34,9 +37,449 @@ * atc_ / atchan : ATmel DMA Channel entity related */ -#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) -#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ - |ATC_DIF(AT_DMA_MEM_IF)) +#define AT_DMA_MAX_NR_CHANNELS 8 + +/* Global Configuration Register */ +#define AT_DMA_GCFG 0x00 +#define AT_DMA_IF_BIGEND(i) BIT((i)) /* AHB-Lite Interface i in Big-endian mode */ +#define AT_DMA_ARB_CFG BIT(4) /* Arbiter mode. */ + +/* Controller Enable Register */ +#define AT_DMA_EN 0x04 +#define AT_DMA_ENABLE BIT(0) + +/* Software Single Request Register */ +#define AT_DMA_SREQ 0x08 +#define AT_DMA_SSREQ(x) BIT((x) << 1) /* Request a source single transfer on channel x */ +#define AT_DMA_DSREQ(x) BIT(1 + ((x) << 1)) /* Request a destination single transfer on channel x */ + +/* Software Chunk Transfer Request Register */ +#define AT_DMA_CREQ 0x0c +#define AT_DMA_SCREQ(x) BIT((x) << 1) /* Request a source chunk transfer on channel x */ +#define AT_DMA_DCREQ(x) BIT(1 + ((x) << 1)) /* Request a destination chunk transfer on channel x */ + +/* Software Last Transfer Flag Register */ +#define AT_DMA_LAST 0x10 +#define AT_DMA_SLAST(x) BIT((x) << 1) /* This src rq is last tx of buffer on channel x */ +#define AT_DMA_DLAST(x) BIT(1 + ((x) << 1)) /* This dst rq is last tx of buffer on channel x */ + +/* Request Synchronization Register */ +#define AT_DMA_SYNC 0x14 +#define AT_DMA_SYR(h) BIT((h)) /* Synchronize handshake line h */ + +/* Error, Chained Buffer transfer completed and Buffer transfer completed Interrupt registers */ +#define AT_DMA_EBCIER 0x18 /* Enable register */ +#define AT_DMA_EBCIDR 0x1c /* Disable register */ +#define AT_DMA_EBCIMR 0x20 /* Mask Register */ +#define AT_DMA_EBCISR 0x24 /* Status Register */ +#define AT_DMA_CBTC_OFFSET 8 +#define AT_DMA_ERR_OFFSET 16 +#define AT_DMA_BTC(x) BIT((x)) +#define AT_DMA_CBTC(x) BIT(AT_DMA_CBTC_OFFSET + (x)) +#define AT_DMA_ERR(x) BIT(AT_DMA_ERR_OFFSET + (x)) + +/* Channel Handler Enable Register */ +#define AT_DMA_CHER 0x28 +#define AT_DMA_ENA(x) BIT((x)) +#define AT_DMA_SUSP(x) BIT(8 + (x)) +#define AT_DMA_KEEP(x) BIT(24 + (x)) + +/* Channel Handler Disable Register */ +#define AT_DMA_CHDR 0x2c +#define AT_DMA_DIS(x) BIT(x) +#define AT_DMA_RES(x) BIT(8 + (x)) + +/* Channel Handler Status Register */ +#define AT_DMA_CHSR 0x30 +#define AT_DMA_EMPT(x) BIT(16 + (x)) +#define AT_DMA_STAL(x) BIT(24 + (x)) + +/* Channel registers base address */ +#define AT_DMA_CH_REGS_BASE 0x3c +#define ch_regs(x) (AT_DMA_CH_REGS_BASE + (x) * 0x28) /* Channel x base addr */ + +/* Hardware register offset for each channel */ +#define ATC_SADDR_OFFSET 0x00 /* Source Address Register */ +#define ATC_DADDR_OFFSET 0x04 /* Destination Address Register */ +#define ATC_DSCR_OFFSET 0x08 /* Descriptor Address Register */ +#define ATC_CTRLA_OFFSET 0x0c /* Control A Register */ +#define ATC_CTRLB_OFFSET 0x10 /* Control B Register */ +#define ATC_CFG_OFFSET 0x14 /* Configuration Register */ +#define ATC_SPIP_OFFSET 0x18 /* Src PIP Configuration Register */ +#define ATC_DPIP_OFFSET 0x1c /* Dst PIP Configuration Register */ + + +/* Bitfield definitions */ + +/* Bitfields in DSCR */ +#define ATC_DSCR_IF GENMASK(1, 0) /* Dsc feched via AHB-Lite Interface */ + +/* Bitfields in CTRLA */ +#define ATC_BTSIZE_MAX GENMASK(15, 0) /* Maximum Buffer Transfer Size */ +#define ATC_BTSIZE GENMASK(15, 0) /* Buffer Transfer Size */ +#define ATC_SCSIZE GENMASK(18, 16) /* Source Chunk Transfer Size */ +#define ATC_DCSIZE GENMASK(22, 20) /* Destination Chunk Transfer Size */ +#define ATC_SRC_WIDTH GENMASK(25, 24) /* Source Single Transfer Size */ +#define ATC_DST_WIDTH GENMASK(29, 28) /* Destination Single Transfer Size */ +#define ATC_DONE BIT(31) /* Tx Done (only written back in descriptor) */ + +/* Bitfields in CTRLB */ +#define ATC_SIF GENMASK(1, 0) /* Src tx done via AHB-Lite Interface i */ +#define ATC_DIF GENMASK(5, 4) /* Dst tx done via AHB-Lite Interface i */ +#define AT_DMA_MEM_IF 0x0 /* interface 0 as memory interface */ +#define AT_DMA_PER_IF 0x1 /* interface 1 as peripheral interface */ +#define ATC_SRC_PIP BIT(8) /* Source Picture-in-Picture enabled */ +#define ATC_DST_PIP BIT(12) /* Destination Picture-in-Picture enabled */ +#define ATC_SRC_DSCR_DIS BIT(16) /* Src Descriptor fetch disable */ +#define ATC_DST_DSCR_DIS BIT(20) /* Dst Descriptor fetch disable */ +#define ATC_FC GENMASK(22, 21) /* Choose Flow Controller */ +#define ATC_FC_MEM2MEM 0x0 /* Mem-to-Mem (DMA) */ +#define ATC_FC_MEM2PER 0x1 /* Mem-to-Periph (DMA) */ +#define ATC_FC_PER2MEM 0x2 /* Periph-to-Mem (DMA) */ +#define ATC_FC_PER2PER 0x3 /* Periph-to-Periph (DMA) */ +#define ATC_FC_PER2MEM_PER 0x4 /* Periph-to-Mem (Peripheral) */ +#define ATC_FC_MEM2PER_PER 0x5 /* Mem-to-Periph (Peripheral) */ +#define ATC_FC_PER2PER_SRCPER 0x6 /* Periph-to-Periph (Src Peripheral) */ +#define ATC_FC_PER2PER_DSTPER 0x7 /* Periph-to-Periph (Dst Peripheral) */ +#define ATC_SRC_ADDR_MODE GENMASK(25, 24) +#define ATC_SRC_ADDR_MODE_INCR 0x0 /* Incrementing Mode */ +#define ATC_SRC_ADDR_MODE_DECR 0x1 /* Decrementing Mode */ +#define ATC_SRC_ADDR_MODE_FIXED 0x2 /* Fixed Mode */ +#define ATC_DST_ADDR_MODE GENMASK(29, 28) +#define ATC_DST_ADDR_MODE_INCR 0x0 /* Incrementing Mode */ +#define ATC_DST_ADDR_MODE_DECR 0x1 /* Decrementing Mode */ +#define ATC_DST_ADDR_MODE_FIXED 0x2 /* Fixed Mode */ +#define ATC_IEN BIT(30) /* BTC interrupt enable (active low) */ +#define ATC_AUTO BIT(31) /* Auto multiple buffer tx enable */ + +/* Bitfields in CFG */ +#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ + +#define ATC_SRC_PER GENMASK(3, 0) /* Channel src rq associated with periph handshaking ifc h */ +#define ATC_DST_PER GENMASK(7, 4) /* Channel dst rq associated with periph handshaking ifc h */ +#define ATC_SRC_REP BIT(8) /* Source Replay Mod */ +#define ATC_SRC_H2SEL BIT(9) /* Source Handshaking Mod */ +#define ATC_SRC_PER_MSB GENMASK(11, 10) /* Channel src rq (most significant bits) */ +#define ATC_DST_REP BIT(12) /* Destination Replay Mod */ +#define ATC_DST_H2SEL BIT(13) /* Destination Handshaking Mod */ +#define ATC_DST_PER_MSB GENMASK(15, 14) /* Channel dst rq (most significant bits) */ +#define ATC_SOD BIT(16) /* Stop On Done */ +#define ATC_LOCK_IF BIT(20) /* Interface Lock */ +#define ATC_LOCK_B BIT(21) /* AHB Bus Lock */ +#define ATC_LOCK_IF_L BIT(22) /* Master Interface Arbiter Lock */ +#define ATC_AHB_PROT GENMASK(26, 24) /* AHB Protection */ +#define ATC_FIFOCFG GENMASK(29, 28) /* FIFO Request Configuration */ +#define ATC_FIFOCFG_LARGESTBURST 0x0 +#define ATC_FIFOCFG_HALFFIFO 0x1 +#define ATC_FIFOCFG_ENOUGHSPACE 0x2 + +/* Bitfields in SPIP */ +#define ATC_SPIP_HOLE GENMASK(15, 0) +#define ATC_SPIP_BOUNDARY GENMASK(25, 16) + +/* Bitfields in DPIP */ +#define ATC_DPIP_HOLE GENMASK(15, 0) +#define ATC_DPIP_BOUNDARY GENMASK(25, 16) + +#define ATC_SRC_PER_ID(id) (FIELD_PREP(ATC_SRC_PER_MSB, (id)) | \ + FIELD_PREP(ATC_SRC_PER, (id))) +#define ATC_DST_PER_ID(id) (FIELD_PREP(ATC_DST_PER_MSB, (id)) | \ + FIELD_PREP(ATC_DST_PER, (id))) + + + +/*-- descriptors -----------------------------------------------------*/ + +/* LLI == Linked List Item; aka DMA buffer descriptor */ +struct at_lli { + /* values that are not changed by hardware */ + u32 saddr; + u32 daddr; + /* value that may get written back: */ + u32 ctrla; + /* more values that are not changed by hardware */ + u32 ctrlb; + u32 dscr; /* chain to next lli */ +}; + +/** + * struct atdma_sg - atdma scatter gather entry + * @len: length of the current Linked List Item. + * @lli: linked list item that is passed to the DMA controller + * @lli_phys: physical address of the LLI. + */ +struct atdma_sg { + unsigned int len; + struct at_lli *lli; + dma_addr_t lli_phys; +}; + +/** + * struct at_desc - software descriptor + * @vd: pointer to the virtual dma descriptor. + * @atchan: pointer to the atmel dma channel. + * @total_len: total transaction byte count + * @sg_len: number of sg entries. + * @sg: array of sgs. + */ +struct at_desc { + struct virt_dma_desc vd; + struct at_dma_chan *atchan; + size_t total_len; + unsigned int sglen; + /* Interleaved data */ + size_t boundary; + size_t dst_hole; + size_t src_hole; + + /* Memset temporary buffer */ + bool memset_buffer; + dma_addr_t memset_paddr; + int *memset_vaddr; + struct atdma_sg sg[]; +}; + +/*-- Channels --------------------------------------------------------*/ + +/** + * atc_status - information bits stored in channel status flag + * + * Manipulated with atomic operations. + */ +enum atc_status { + ATC_IS_PAUSED = 1, + ATC_IS_CYCLIC = 24, +}; + +/** + * struct at_dma_chan - internal representation of an Atmel HDMAC channel + * @vc: virtual dma channel entry. + * @atdma: pointer to the driver data. + * @ch_regs: memory mapped register base + * @mask: channel index in a mask + * @per_if: peripheral interface + * @mem_if: memory interface + * @status: transmit status information from irq/prep* functions + * to tasklet (use atomic operations) + * @save_cfg: configuration register that is saved on suspend/resume cycle + * @save_dscr: for cyclic operations, preserve next descriptor address in + * the cyclic list on suspend/resume cycle + * @dma_sconfig: configuration for slave transfers, passed via + * .device_config + * @desc: pointer to the atmel dma descriptor. + */ +struct at_dma_chan { + struct virt_dma_chan vc; + struct at_dma *atdma; + void __iomem *ch_regs; + u8 mask; + u8 per_if; + u8 mem_if; + unsigned long status; + u32 save_cfg; + u32 save_dscr; + struct dma_slave_config dma_sconfig; + bool cyclic; + struct at_desc *desc; +}; + +#define channel_readl(atchan, name) \ + __raw_readl((atchan)->ch_regs + ATC_##name##_OFFSET) + +#define channel_writel(atchan, name, val) \ + __raw_writel((val), (atchan)->ch_regs + ATC_##name##_OFFSET) + +/* + * Fix sconfig's burst size according to at_hdmac. We need to convert them as: + * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3, 32 -> 4, 64 -> 5, 128 -> 6, 256 -> 7. + * + * This can be done by finding most significant bit set. + */ +static inline void convert_burst(u32 *maxburst) +{ + if (*maxburst > 1) + *maxburst = fls(*maxburst) - 2; + else + *maxburst = 0; +} + +/* + * Fix sconfig's bus width according to at_hdmac. + * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2. + */ +static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width) +{ + switch (addr_width) { + case DMA_SLAVE_BUSWIDTH_2_BYTES: + return 1; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + return 2; + default: + /* For 1 byte width or fallback */ + return 0; + } +} + +/*-- Controller ------------------------------------------------------*/ + +/** + * struct at_dma - internal representation of an Atmel HDMA Controller + * @dma_device: dmaengine dma_device object members + * @atdma_devtype: identifier of DMA controller compatibility + * @ch_regs: memory mapped register base + * @clk: dma controller clock + * @save_imr: interrupt mask register that is saved on suspend/resume cycle + * @all_chan_mask: all channels availlable in a mask + * @lli_pool: hw lli table + * @chan: channels table to store at_dma_chan structures + */ +struct at_dma { + struct dma_device dma_device; + void __iomem *regs; + struct clk *clk; + u32 save_imr; + + u8 all_chan_mask; + + struct dma_pool *lli_pool; + struct dma_pool *memset_pool; + /* AT THE END channels table */ + struct at_dma_chan chan[]; +}; + +#define dma_readl(atdma, name) \ + __raw_readl((atdma)->regs + AT_DMA_##name) +#define dma_writel(atdma, name, val) \ + __raw_writel((val), (atdma)->regs + AT_DMA_##name) + +static inline struct at_desc *to_atdma_desc(struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct at_desc, vd.tx); +} + +static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct at_dma_chan, vc.chan); +} + +static inline struct at_dma *to_at_dma(struct dma_device *ddev) +{ + return container_of(ddev, struct at_dma, dma_device); +} + + +/*-- Helper functions ------------------------------------------------*/ + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} + +#if defined(VERBOSE_DEBUG) +static void vdbg_dump_regs(struct at_dma_chan *atchan) +{ + struct at_dma *atdma = to_at_dma(atchan->vc.chan.device); + + dev_err(chan2dev(&atchan->vc.chan), + " channel %d : imr = 0x%x, chsr = 0x%x\n", + atchan->vc.chan.chan_id, + dma_readl(atdma, EBCIMR), + dma_readl(atdma, CHSR)); + + dev_err(chan2dev(&atchan->vc.chan), + " channel: s0x%x d0x%x ctrl0x%x:0x%x cfg0x%x l0x%x\n", + channel_readl(atchan, SADDR), + channel_readl(atchan, DADDR), + channel_readl(atchan, CTRLA), + channel_readl(atchan, CTRLB), + channel_readl(atchan, CFG), + channel_readl(atchan, DSCR)); +} +#else +static void vdbg_dump_regs(struct at_dma_chan *atchan) {} +#endif + +static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) +{ + dev_crit(chan2dev(&atchan->vc.chan), + "desc: s%pad d%pad ctrl0x%x:0x%x l%pad\n", + &lli->saddr, &lli->daddr, + lli->ctrla, lli->ctrlb, &lli->dscr); +} + + +static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on) +{ + u32 ebci; + + /* enable interrupts on buffer transfer completion & error */ + ebci = AT_DMA_BTC(chan_id) + | AT_DMA_ERR(chan_id); + if (on) + dma_writel(atdma, EBCIER, ebci); + else + dma_writel(atdma, EBCIDR, ebci); +} + +static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id) +{ + atc_setup_irq(atdma, chan_id, 1); +} + +static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id) +{ + atc_setup_irq(atdma, chan_id, 0); +} + + +/** + * atc_chan_is_enabled - test if given channel is enabled + * @atchan: channel we want to test status + */ +static inline int atc_chan_is_enabled(struct at_dma_chan *atchan) +{ + struct at_dma *atdma = to_at_dma(atchan->vc.chan.device); + + return !!(dma_readl(atdma, CHSR) & atchan->mask); +} + +/** + * atc_chan_is_paused - test channel pause/resume status + * @atchan: channel we want to test status + */ +static inline int atc_chan_is_paused(struct at_dma_chan *atchan) +{ + return test_bit(ATC_IS_PAUSED, &atchan->status); +} + +/** + * atc_chan_is_cyclic - test if given channel has cyclic property set + * @atchan: channel we want to test status + */ +static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan) +{ + return test_bit(ATC_IS_CYCLIC, &atchan->status); +} + +/** + * set_lli_eol - set end-of-link to descriptor so it will end transfer + * @desc: descriptor, signle or at the end of a chain, to end chain on + * @i: index of the atmel scatter gather entry that is at the end of the chain. + */ +static void set_lli_eol(struct at_desc *desc, unsigned int i) +{ + u32 ctrlb = desc->sg[i].lli->ctrlb; + + ctrlb &= ~ATC_IEN; + ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS; + + desc->sg[i].lli->ctrlb = ctrlb; + desc->sg[i].lli->dscr = 0; +} + +#define ATC_DEFAULT_CFG FIELD_PREP(ATC_FIFOCFG, ATC_FIFOCFG_HALFFIFO) +#define ATC_DEFAULT_CTRLB (FIELD_PREP(ATC_SIF, AT_DMA_MEM_IF) | \ + FIELD_PREP(ATC_DIF, AT_DMA_MEM_IF)) #define ATC_DMA_BUSWIDTHS\ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ @@ -74,13 +517,6 @@ struct at_dma_slave { u32 cfg; }; -/* prototypes */ -static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); -static void atc_issue_pending(struct dma_chan *chan); - - -/*----------------------------------------------------------------------*/ - static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst, size_t len) { @@ -96,194 +532,72 @@ static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst, return width; } -static struct at_desc *atc_first_active(struct at_dma_chan *atchan) -{ - return list_first_entry(&atchan->active_list, - struct at_desc, desc_node); -} - -static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) -{ - return list_first_entry(&atchan->queue, - struct at_desc, desc_node); -} - -/** - * atc_alloc_descriptor - allocate and return an initialized descriptor - * @chan: the channel to allocate descriptors for - * @gfp_flags: GFP allocation flags - * - * Note: The ack-bit is positioned in the descriptor flag at creation time - * to make initial allocation more convenient. This bit will be cleared - * and control will be given to client at usage time (during - * preparation functions). - */ -static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, - gfp_t gfp_flags) -{ - struct at_desc *desc = NULL; - struct at_dma *atdma = to_at_dma(chan->device); - dma_addr_t phys; - - desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys); - if (desc) { - INIT_LIST_HEAD(&desc->tx_list); - dma_async_tx_descriptor_init(&desc->txd, chan); - /* txd.flags will be overwritten in prep functions */ - desc->txd.flags = DMA_CTRL_ACK; - desc->txd.tx_submit = atc_tx_submit; - desc->txd.phys = phys; - } - - return desc; -} - -/** - * atc_desc_get - get an unused descriptor from free_list - * @atchan: channel we want a new descriptor for - */ -static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) +static void atdma_lli_chain(struct at_desc *desc, unsigned int i) { - struct at_desc *desc, *_desc; - struct at_desc *ret = NULL; - unsigned long flags; - unsigned int i = 0; - - spin_lock_irqsave(&atchan->lock, flags); - list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { - i++; - if (async_tx_test_ack(&desc->txd)) { - list_del(&desc->desc_node); - ret = desc; - break; - } - dev_dbg(chan2dev(&atchan->chan_common), - "desc %p not ACKed\n", desc); - } - spin_unlock_irqrestore(&atchan->lock, flags); - dev_vdbg(chan2dev(&atchan->chan_common), - "scanned %u descriptors on freelist\n", i); - - /* no more descriptor available in initial pool: create one more */ - if (!ret) - ret = atc_alloc_descriptor(&atchan->chan_common, GFP_NOWAIT); - - return ret; -} + struct atdma_sg *atdma_sg = &desc->sg[i]; -/** - * atc_desc_put - move a descriptor, including any children, to the free list - * @atchan: channel we work on - * @desc: descriptor, at the head of a chain, to move to free list - */ -static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) -{ - if (desc) { - struct at_desc *child; - unsigned long flags; - - spin_lock_irqsave(&atchan->lock, flags); - list_for_each_entry(child, &desc->tx_list, desc_node) - dev_vdbg(chan2dev(&atchan->chan_common), - "moving child desc %p to freelist\n", - child); - list_splice_init(&desc->tx_list, &atchan->free_list); - dev_vdbg(chan2dev(&atchan->chan_common), - "moving desc %p to freelist\n", desc); - list_add(&desc->desc_node, &atchan->free_list); - spin_unlock_irqrestore(&atchan->lock, flags); - } -} - -/** - * atc_desc_chain - build chain adding a descriptor - * @first: address of first descriptor of the chain - * @prev: address of previous descriptor of the chain - * @desc: descriptor to queue - * - * Called from prep_* functions - */ -static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, - struct at_desc *desc) -{ - if (!(*first)) { - *first = desc; - } else { - /* inform the HW lli about chaining */ - (*prev)->lli.dscr = desc->txd.phys; - /* insert the link descriptor to the LD ring */ - list_add_tail(&desc->desc_node, - &(*first)->tx_list); - } - *prev = desc; + if (i) + desc->sg[i - 1].lli->dscr = atdma_sg->lli_phys; } /** * atc_dostart - starts the DMA engine for real * @atchan: the channel we want to start - * @first: first descriptor in the list we want to begin with - * - * Called with atchan->lock held and bh disabled */ -static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) +static void atc_dostart(struct at_dma_chan *atchan) { - struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + struct virt_dma_desc *vd = vchan_next_desc(&atchan->vc); + struct at_desc *desc; - /* ASSERT: channel is idle */ - if (atc_chan_is_enabled(atchan)) { - dev_err(chan2dev(&atchan->chan_common), - "BUG: Attempted to start non-idle channel\n"); - dev_err(chan2dev(&atchan->chan_common), - " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", - channel_readl(atchan, SADDR), - channel_readl(atchan, DADDR), - channel_readl(atchan, CTRLA), - channel_readl(atchan, CTRLB), - channel_readl(atchan, DSCR)); - - /* The tasklet will hopefully advance the queue... */ + if (!vd) { + atchan->desc = NULL; return; } vdbg_dump_regs(atchan); + list_del(&vd->node); + atchan->desc = desc = to_atdma_desc(&vd->tx); + channel_writel(atchan, SADDR, 0); channel_writel(atchan, DADDR, 0); channel_writel(atchan, CTRLA, 0); channel_writel(atchan, CTRLB, 0); - channel_writel(atchan, DSCR, first->txd.phys); - channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) | - ATC_SPIP_BOUNDARY(first->boundary)); - channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) | - ATC_DPIP_BOUNDARY(first->boundary)); + channel_writel(atchan, DSCR, desc->sg[0].lli_phys); + channel_writel(atchan, SPIP, + FIELD_PREP(ATC_SPIP_HOLE, desc->src_hole) | + FIELD_PREP(ATC_SPIP_BOUNDARY, desc->boundary)); + channel_writel(atchan, DPIP, + FIELD_PREP(ATC_DPIP_HOLE, desc->dst_hole) | + FIELD_PREP(ATC_DPIP_BOUNDARY, desc->boundary)); + /* Don't allow CPU to reorder channel enable. */ wmb(); - dma_writel(atdma, CHER, atchan->mask); + dma_writel(atchan->atdma, CHER, atchan->mask); vdbg_dump_regs(atchan); } -/* - * atc_get_desc_by_cookie - get the descriptor of a cookie - * @atchan: the DMA channel - * @cookie: the cookie to get the descriptor for - */ -static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, - dma_cookie_t cookie) +static void atdma_desc_free(struct virt_dma_desc *vd) { - struct at_desc *desc, *_desc; + struct at_dma *atdma = to_at_dma(vd->tx.chan->device); + struct at_desc *desc = to_atdma_desc(&vd->tx); + unsigned int i; - list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) { - if (desc->txd.cookie == cookie) - return desc; + for (i = 0; i < desc->sglen; i++) { + if (desc->sg[i].lli) + dma_pool_free(atdma->lli_pool, desc->sg[i].lli, + desc->sg[i].lli_phys); } - list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { - if (desc->txd.cookie == cookie) - return desc; + /* If the transfer was a memset, free our temporary buffer */ + if (desc->memset_buffer) { + dma_pool_free(atdma->memset_pool, desc->memset_vaddr, + desc->memset_paddr); + desc->memset_buffer = false; } - return NULL; + kfree(desc); } /** @@ -293,10 +607,10 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, * @current_len: the number of bytes left before reading CTRLA * @ctrla: the value of CTRLA */ -static inline int atc_calc_bytes_left(int current_len, u32 ctrla) +static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla) { - u32 btsize = (ctrla & ATC_BTSIZE_MAX); - u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla); + u32 btsize = FIELD_GET(ATC_BTSIZE, ctrla); + u32 src_width = FIELD_GET(ATC_SRC_WIDTH, ctrla); /* * According to the datasheet, when reading the Control A Register @@ -308,246 +622,153 @@ static inline int atc_calc_bytes_left(int current_len, u32 ctrla) } /** - * atc_get_bytes_left - get the number of bytes residue for a cookie - * @chan: DMA channel - * @cookie: transaction identifier to check status of + * atc_get_llis_residue - Get residue for a hardware linked list transfer + * + * Calculate the residue by removing the length of the Linked List Item (LLI) + * already transferred from the total length. To get the current LLI we can use + * the value of the channel's DSCR register and compare it against the DSCR + * value of each LLI. + * + * The CTRLA register provides us with the amount of data already read from the + * source for the LLI. So we can compute a more accurate residue by also + * removing the number of bytes corresponding to this amount of data. + * + * However, the DSCR and CTRLA registers cannot be read both atomically. Hence a + * race condition may occur: the first read register may refer to one LLI + * whereas the second read may refer to a later LLI in the list because of the + * DMA transfer progression inbetween the two reads. + * + * One solution could have been to pause the DMA transfer, read the DSCR and + * CTRLA then resume the DMA transfer. Nonetheless, this approach presents some + * drawbacks: + * - If the DMA transfer is paused, RX overruns or TX underruns are more likey + * to occur depending on the system latency. Taking the USART driver as an + * example, it uses a cyclic DMA transfer to read data from the Receive + * Holding Register (RHR) to avoid RX overruns since the RHR is not protected + * by any FIFO on most Atmel SoCs. So pausing the DMA transfer to compute the + * residue would break the USART driver design. + * - The atc_pause() function masks interrupts but we'd rather avoid to do so + * for system latency purpose. + * + * Then we'd rather use another solution: the DSCR is read a first time, the + * CTRLA is read in turn, next the DSCR is read a second time. If the two + * consecutive read values of the DSCR are the same then we assume both refers + * to the very same LLI as well as the CTRLA value read inbetween does. For + * cyclic tranfers, the assumption is that a full loop is "not so fast". If the + * two DSCR values are different, we read again the CTRLA then the DSCR till two + * consecutive read values from DSCR are equal or till the maximum trials is + * reach. This algorithm is very unlikely not to find a stable value for DSCR. + * @atchan: pointer to an atmel hdmac channel. + * @desc: pointer to the descriptor for which the residue is calculated. + * @residue: residue to be set to dma_tx_state. + * Returns 0 on success, -errno otherwise. */ -static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) +static int atc_get_llis_residue(struct at_dma_chan *atchan, + struct at_desc *desc, u32 *residue) { - struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct at_desc *desc_first = atc_first_active(atchan); - struct at_desc *desc; - int ret; - u32 ctrla, dscr; + u32 len, ctrla, dscr; unsigned int i; - /* - * If the cookie doesn't match to the currently running transfer then - * we can return the total length of the associated DMA transfer, - * because it is still queued. - */ - desc = atc_get_desc_by_cookie(atchan, cookie); - if (desc == NULL) - return -EINVAL; - else if (desc != desc_first) - return desc->total_len; + len = desc->total_len; + dscr = channel_readl(atchan, DSCR); + rmb(); /* ensure DSCR is read before CTRLA */ + ctrla = channel_readl(atchan, CTRLA); + for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) { + u32 new_dscr; - /* cookie matches to the currently running transfer */ - ret = desc_first->total_len; - - if (desc_first->lli.dscr) { - /* hardware linked list transfer */ + rmb(); /* ensure DSCR is read after CTRLA */ + new_dscr = channel_readl(atchan, DSCR); /* - * Calculate the residue by removing the length of the child - * descriptors already transferred from the total length. - * To get the current child descriptor we can use the value of - * the channel's DSCR register and compare it against the value - * of the hardware linked list structure of each child - * descriptor. - * - * The CTRLA register provides us with the amount of data - * already read from the source for the current child - * descriptor. So we can compute a more accurate residue by also - * removing the number of bytes corresponding to this amount of - * data. - * - * However, the DSCR and CTRLA registers cannot be read both - * atomically. Hence a race condition may occur: the first read - * register may refer to one child descriptor whereas the second - * read may refer to a later child descriptor in the list - * because of the DMA transfer progression inbetween the two - * reads. - * - * One solution could have been to pause the DMA transfer, read - * the DSCR and CTRLA then resume the DMA transfer. Nonetheless, - * this approach presents some drawbacks: - * - If the DMA transfer is paused, RX overruns or TX underruns - * are more likey to occur depending on the system latency. - * Taking the USART driver as an example, it uses a cyclic DMA - * transfer to read data from the Receive Holding Register - * (RHR) to avoid RX overruns since the RHR is not protected - * by any FIFO on most Atmel SoCs. So pausing the DMA transfer - * to compute the residue would break the USART driver design. - * - The atc_pause() function masks interrupts but we'd rather - * avoid to do so for system latency purpose. - * - * Then we'd rather use another solution: the DSCR is read a - * first time, the CTRLA is read in turn, next the DSCR is read - * a second time. If the two consecutive read values of the DSCR - * are the same then we assume both refers to the very same - * child descriptor as well as the CTRLA value read inbetween - * does. For cyclic tranfers, the assumption is that a full loop - * is "not so fast". - * If the two DSCR values are different, we read again the CTRLA - * then the DSCR till two consecutive read values from DSCR are - * equal or till the maxium trials is reach. - * This algorithm is very unlikely not to find a stable value for - * DSCR. + * If the DSCR register value has not changed inside the DMA + * controller since the previous read, we assume that both the + * dscr and ctrla values refers to the very same descriptor. */ - - dscr = channel_readl(atchan, DSCR); - rmb(); /* ensure DSCR is read before CTRLA */ - ctrla = channel_readl(atchan, CTRLA); - for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) { - u32 new_dscr; - - rmb(); /* ensure DSCR is read after CTRLA */ - new_dscr = channel_readl(atchan, DSCR); - - /* - * If the DSCR register value has not changed inside the - * DMA controller since the previous read, we assume - * that both the dscr and ctrla values refers to the - * very same descriptor. - */ - if (likely(new_dscr == dscr)) - break; - - /* - * DSCR has changed inside the DMA controller, so the - * previouly read value of CTRLA may refer to an already - * processed descriptor hence could be outdated. - * We need to update ctrla to match the current - * descriptor. - */ - dscr = new_dscr; - rmb(); /* ensure DSCR is read before CTRLA */ - ctrla = channel_readl(atchan, CTRLA); - } - if (unlikely(i == ATC_MAX_DSCR_TRIALS)) - return -ETIMEDOUT; - - /* for the first descriptor we can be more accurate */ - if (desc_first->lli.dscr == dscr) - return atc_calc_bytes_left(ret, ctrla); - - ret -= desc_first->len; - list_for_each_entry(desc, &desc_first->tx_list, desc_node) { - if (desc->lli.dscr == dscr) - break; - - ret -= desc->len; - } + if (likely(new_dscr == dscr)) + break; /* - * For the current descriptor in the chain we can calculate - * the remaining bytes using the channel's register. + * DSCR has changed inside the DMA controller, so the previouly + * read value of CTRLA may refer to an already processed + * descriptor hence could be outdated. We need to update ctrla + * to match the current descriptor. */ - ret = atc_calc_bytes_left(ret, ctrla); - } else { - /* single transfer */ + dscr = new_dscr; + rmb(); /* ensure DSCR is read before CTRLA */ ctrla = channel_readl(atchan, CTRLA); - ret = atc_calc_bytes_left(ret, ctrla); } + if (unlikely(i == ATC_MAX_DSCR_TRIALS)) + return -ETIMEDOUT; - return ret; -} - -/** - * atc_chain_complete - finish work for one transaction chain - * @atchan: channel we work on - * @desc: descriptor at the head of the chain we want do complete - */ -static void -atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) -{ - struct dma_async_tx_descriptor *txd = &desc->txd; - struct at_dma *atdma = to_at_dma(atchan->chan_common.device); - unsigned long flags; - - dev_vdbg(chan2dev(&atchan->chan_common), - "descriptor %u complete\n", txd->cookie); - - spin_lock_irqsave(&atchan->lock, flags); - - /* mark the descriptor as complete for non cyclic cases only */ - if (!atc_chan_is_cyclic(atchan)) - dma_cookie_complete(txd); - - spin_unlock_irqrestore(&atchan->lock, flags); - - dma_descriptor_unmap(txd); - /* for cyclic transfers, - * no need to replay callback function while stopping */ - if (!atc_chan_is_cyclic(atchan)) - dmaengine_desc_get_callback_invoke(txd, NULL); + /* For the first descriptor we can be more accurate. */ + if (desc->sg[0].lli->dscr == dscr) { + *residue = atc_calc_bytes_left(len, ctrla); + return 0; + } + len -= desc->sg[0].len; - dma_run_dependencies(txd); + for (i = 1; i < desc->sglen; i++) { + if (desc->sg[i].lli && desc->sg[i].lli->dscr == dscr) + break; + len -= desc->sg[i].len; + } - spin_lock_irqsave(&atchan->lock, flags); - /* move children to free_list */ - list_splice_init(&desc->tx_list, &atchan->free_list); - /* add myself to free_list */ - list_add(&desc->desc_node, &atchan->free_list); - spin_unlock_irqrestore(&atchan->lock, flags); + /* + * For the current LLI in the chain we can calculate the remaining bytes + * using the channel's CTRLA register. + */ + *residue = atc_calc_bytes_left(len, ctrla); + return 0; - /* If the transfer was a memset, free our temporary buffer */ - if (desc->memset_buffer) { - dma_pool_free(atdma->memset_pool, desc->memset_vaddr, - desc->memset_paddr); - desc->memset_buffer = false; - } } /** - * atc_advance_work - at the end of a transaction, move forward - * @atchan: channel where the transaction ended + * atc_get_residue - get the number of bytes residue for a cookie. + * The residue is passed by address and updated on success. + * @chan: DMA channel + * @cookie: transaction identifier to check status of + * @residue: residue to be updated. + * Return 0 on success, -errono otherwise. */ -static void atc_advance_work(struct at_dma_chan *atchan) +static int atc_get_residue(struct dma_chan *chan, dma_cookie_t cookie, + u32 *residue) { - struct at_desc *desc; - unsigned long flags; + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct virt_dma_desc *vd; + struct at_desc *desc = NULL; + u32 len, ctrla; - dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); + vd = vchan_find_desc(&atchan->vc, cookie); + if (vd) + desc = to_atdma_desc(&vd->tx); + else if (atchan->desc && atchan->desc->vd.tx.cookie == cookie) + desc = atchan->desc; - spin_lock_irqsave(&atchan->lock, flags); - if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list)) - return spin_unlock_irqrestore(&atchan->lock, flags); + if (!desc) + return -EINVAL; - desc = atc_first_active(atchan); - /* Remove the transfer node from the active list. */ - list_del_init(&desc->desc_node); - spin_unlock_irqrestore(&atchan->lock, flags); - atc_chain_complete(atchan, desc); + if (desc->sg[0].lli->dscr) + /* hardware linked list transfer */ + return atc_get_llis_residue(atchan, desc, residue); - /* advance work */ - spin_lock_irqsave(&atchan->lock, flags); - if (!list_empty(&atchan->active_list)) { - desc = atc_first_queued(atchan); - list_move_tail(&desc->desc_node, &atchan->active_list); - atc_dostart(atchan, desc); - } - spin_unlock_irqrestore(&atchan->lock, flags); + /* single transfer */ + len = desc->total_len; + ctrla = channel_readl(atchan, CTRLA); + *residue = atc_calc_bytes_left(len, ctrla); + return 0; } - /** * atc_handle_error - handle errors reported by DMA controller - * @atchan: channel where error occurs + * @atchan: channel where error occurs. + * @i: channel index */ -static void atc_handle_error(struct at_dma_chan *atchan) +static void atc_handle_error(struct at_dma_chan *atchan, unsigned int i) { - struct at_desc *bad_desc; - struct at_desc *desc; - struct at_desc *child; - unsigned long flags; + struct at_desc *desc = atchan->desc; - spin_lock_irqsave(&atchan->lock, flags); - /* - * The descriptor currently at the head of the active list is - * broked. Since we don't have any way to report errors, we'll - * just have to scream loudly and try to carry on. - */ - bad_desc = atc_first_active(atchan); - list_del_init(&bad_desc->desc_node); - - /* Try to restart the controller */ - if (!list_empty(&atchan->active_list)) { - desc = atc_first_queued(atchan); - list_move_tail(&desc->desc_node, &atchan->active_list); - atc_dostart(atchan, desc); - } + /* Disable channel on AHB error */ + dma_writel(atchan->atdma, CHDR, AT_DMA_RES(i) | atchan->mask); /* * KERN_CRITICAL may seem harsh, but since this only happens @@ -556,54 +777,42 @@ static void atc_handle_error(struct at_dma_chan *atchan) * controller flagged an error instead of scribbling over * random memory locations. */ - dev_crit(chan2dev(&atchan->chan_common), - "Bad descriptor submitted for DMA!\n"); - dev_crit(chan2dev(&atchan->chan_common), - " cookie: %d\n", bad_desc->txd.cookie); - atc_dump_lli(atchan, &bad_desc->lli); - list_for_each_entry(child, &bad_desc->tx_list, desc_node) - atc_dump_lli(atchan, &child->lli); - - spin_unlock_irqrestore(&atchan->lock, flags); - - /* Pretend the descriptor completed successfully */ - atc_chain_complete(atchan, bad_desc); + dev_crit(chan2dev(&atchan->vc.chan), "Bad descriptor submitted for DMA!\n"); + dev_crit(chan2dev(&atchan->vc.chan), "cookie: %d\n", + desc->vd.tx.cookie); + for (i = 0; i < desc->sglen; i++) + atc_dump_lli(atchan, desc->sg[i].lli); } -/** - * atc_handle_cyclic - at the end of a period, run callback function - * @atchan: channel used for cyclic operations - */ -static void atc_handle_cyclic(struct at_dma_chan *atchan) +static void atdma_handle_chan_done(struct at_dma_chan *atchan, u32 pending, + unsigned int i) { - struct at_desc *first = atc_first_active(atchan); - struct dma_async_tx_descriptor *txd = &first->txd; - - dev_vdbg(chan2dev(&atchan->chan_common), - "new cyclic period llp 0x%08x\n", - channel_readl(atchan, DSCR)); - - dmaengine_desc_get_callback_invoke(txd, NULL); -} - -/*-- IRQ & Tasklet ---------------------------------------------------*/ - -static void atc_tasklet(struct tasklet_struct *t) -{ - struct at_dma_chan *atchan = from_tasklet(atchan, t, tasklet); + struct at_desc *desc; - if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) - return atc_handle_error(atchan); + spin_lock(&atchan->vc.lock); + desc = atchan->desc; - if (atc_chan_is_cyclic(atchan)) - return atc_handle_cyclic(atchan); + if (desc) { + if (pending & AT_DMA_ERR(i)) { + atc_handle_error(atchan, i); + /* Pretend the descriptor completed successfully */ + } - atc_advance_work(atchan); + if (atc_chan_is_cyclic(atchan)) { + vchan_cyclic_callback(&desc->vd); + } else { + vchan_cookie_complete(&desc->vd); + atchan->desc = NULL; + if (!(atc_chan_is_enabled(atchan))) + atc_dostart(atchan); + } + } + spin_unlock(&atchan->vc.lock); } static irqreturn_t at_dma_interrupt(int irq, void *dev_id) { - struct at_dma *atdma = (struct at_dma *)dev_id; + struct at_dma *atdma = dev_id; struct at_dma_chan *atchan; int i; u32 status, pending, imr; @@ -617,23 +826,16 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) if (!pending) break; - dev_vdbg(atdma->dma_common.dev, + dev_vdbg(atdma->dma_device.dev, "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", status, imr, pending); - for (i = 0; i < atdma->dma_common.chancnt; i++) { + for (i = 0; i < atdma->dma_device.chancnt; i++) { atchan = &atdma->chan[i]; - if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { - if (pending & AT_DMA_ERR(i)) { - /* Disable channel on AHB error */ - dma_writel(atdma, CHDR, - AT_DMA_RES(i) | atchan->mask); - /* Give information to tasklet */ - set_bit(ATC_IS_ERROR, &atchan->status); - } - tasklet_schedule(&atchan->tasklet); - ret = IRQ_HANDLED; - } + if (!(pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i)))) + continue; + atdma_handle_chan_done(atchan, pending, i); + ret = IRQ_HANDLED; } } while (pending); @@ -641,35 +843,7 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) return ret; } - /*-- DMA Engine API --------------------------------------------------*/ - -/** - * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine - * @tx: descriptor at the head of the transaction chain - * - * Queue chain if DMA engine is working already - * - * Cookie increment and adding to active_list or queue must be atomic - */ -static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct at_desc *desc = txd_to_at_desc(tx); - struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); - dma_cookie_t cookie; - unsigned long flags; - - spin_lock_irqsave(&atchan->lock, flags); - cookie = dma_cookie_assign(tx); - - list_add_tail(&desc->desc_node, &atchan->queue); - spin_unlock_irqrestore(&atchan->lock, flags); - - dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", - desc->txd.cookie); - return cookie; -} - /** * atc_prep_dma_interleaved - prepare memory to memory interleaved operation * @chan: the channel to prepare operation on @@ -681,9 +855,12 @@ atc_prep_dma_interleaved(struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags) { + struct at_dma *atdma = to_at_dma(chan->device); struct at_dma_chan *atchan = to_at_dma_chan(chan); struct data_chunk *first; - struct at_desc *desc = NULL; + struct atdma_sg *atdma_sg; + struct at_desc *desc; + struct at_lli *lli; size_t xfer_count; unsigned int dwidth; u32 ctrla; @@ -722,8 +899,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan, len += chunk->size; } - dwidth = atc_get_xfer_width(xt->src_start, - xt->dst_start, len); + dwidth = atc_get_xfer_width(xt->src_start, xt->dst_start, len); xfer_count = len >> dwidth; if (xfer_count > ATC_BTSIZE_MAX) { @@ -731,42 +907,43 @@ atc_prep_dma_interleaved(struct dma_chan *chan, return NULL; } - ctrla = ATC_SRC_WIDTH(dwidth) | - ATC_DST_WIDTH(dwidth); - - ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN - | ATC_SRC_ADDR_MODE_INCR - | ATC_DST_ADDR_MODE_INCR - | ATC_SRC_PIP - | ATC_DST_PIP - | ATC_FC_MEM2MEM; - - /* create the transfer */ - desc = atc_desc_get(atchan); - if (!desc) { - dev_err(chan2dev(chan), - "%s: couldn't allocate our descriptor\n", __func__); + ctrla = FIELD_PREP(ATC_SRC_WIDTH, dwidth) | + FIELD_PREP(ATC_DST_WIDTH, dwidth); + + ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | + FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_INCR) | + FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) | + ATC_SRC_PIP | ATC_DST_PIP | + FIELD_PREP(ATC_FC, ATC_FC_MEM2MEM); + + desc = kzalloc(struct_size(desc, sg, 1), GFP_ATOMIC); + if (!desc) + return NULL; + desc->sglen = 1; + + atdma_sg = desc->sg; + atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, GFP_NOWAIT, + &atdma_sg->lli_phys); + if (!atdma_sg->lli) { + kfree(desc); return NULL; } + lli = atdma_sg->lli; - desc->lli.saddr = xt->src_start; - desc->lli.daddr = xt->dst_start; - desc->lli.ctrla = ctrla | xfer_count; - desc->lli.ctrlb = ctrlb; + lli->saddr = xt->src_start; + lli->daddr = xt->dst_start; + lli->ctrla = ctrla | xfer_count; + lli->ctrlb = ctrlb; desc->boundary = first->size >> dwidth; desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1; desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1; - desc->txd.cookie = -EBUSY; - desc->total_len = desc->len = len; - - /* set end-of-link to the last link descriptor of list*/ - set_desc_eol(desc); - - desc->txd.flags = flags; /* client is in control of this ack */ + atdma_sg->len = len; + desc->total_len = len; - return &desc->txd; + set_lli_eol(desc, 0); + return vchan_tx_prep(&atchan->vc, &desc->vd, flags); } /** @@ -781,29 +958,36 @@ static struct dma_async_tx_descriptor * atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { + struct at_dma *atdma = to_at_dma(chan->device); struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_desc *desc = NULL; - struct at_desc *first = NULL; - struct at_desc *prev = NULL; size_t xfer_count; size_t offset; + size_t sg_len; unsigned int src_width; unsigned int dst_width; + unsigned int i; u32 ctrla; u32 ctrlb; - dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n", - &dest, &src, len, flags); + dev_dbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n", + &dest, &src, len, flags); if (unlikely(!len)) { - dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); + dev_err(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); return NULL; } - ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN - | ATC_SRC_ADDR_MODE_INCR - | ATC_DST_ADDR_MODE_INCR - | ATC_FC_MEM2MEM; + sg_len = DIV_ROUND_UP(len, ATC_BTSIZE_MAX); + desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC); + if (!desc) + return NULL; + desc->sglen = sg_len; + + ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | + FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_INCR) | + FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) | + FIELD_PREP(ATC_FC, ATC_FC_MEM2MEM); /* * We can be a lot more clever here, but this should take care @@ -811,82 +995,78 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, */ src_width = dst_width = atc_get_xfer_width(src, dest, len); - ctrla = ATC_SRC_WIDTH(src_width) | - ATC_DST_WIDTH(dst_width); + ctrla = FIELD_PREP(ATC_SRC_WIDTH, src_width) | + FIELD_PREP(ATC_DST_WIDTH, dst_width); - for (offset = 0; offset < len; offset += xfer_count << src_width) { - xfer_count = min_t(size_t, (len - offset) >> src_width, - ATC_BTSIZE_MAX); + for (offset = 0, i = 0; offset < len; + offset += xfer_count << src_width, i++) { + struct atdma_sg *atdma_sg = &desc->sg[i]; + struct at_lli *lli; - desc = atc_desc_get(atchan); - if (!desc) + atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, GFP_NOWAIT, + &atdma_sg->lli_phys); + if (!atdma_sg->lli) goto err_desc_get; + lli = atdma_sg->lli; + + xfer_count = min_t(size_t, (len - offset) >> src_width, + ATC_BTSIZE_MAX); - desc->lli.saddr = src + offset; - desc->lli.daddr = dest + offset; - desc->lli.ctrla = ctrla | xfer_count; - desc->lli.ctrlb = ctrlb; + lli->saddr = src + offset; + lli->daddr = dest + offset; + lli->ctrla = ctrla | xfer_count; + lli->ctrlb = ctrlb; - desc->txd.cookie = 0; - desc->len = xfer_count << src_width; + desc->sg[i].len = xfer_count << src_width; - atc_desc_chain(&first, &prev, desc); + atdma_lli_chain(desc, i); } - /* First descriptor of the chain embedds additional information */ - first->txd.cookie = -EBUSY; - first->total_len = len; + desc->total_len = len; /* set end-of-link to the last link descriptor of list*/ - set_desc_eol(desc); - - first->txd.flags = flags; /* client is in control of this ack */ + set_lli_eol(desc, i - 1); - return &first->txd; + return vchan_tx_prep(&atchan->vc, &desc->vd, flags); err_desc_get: - atc_desc_put(atchan, first); + atdma_desc_free(&desc->vd); return NULL; } -static struct at_desc *atc_create_memset_desc(struct dma_chan *chan, - dma_addr_t psrc, - dma_addr_t pdst, - size_t len) +static int atdma_create_memset_lli(struct dma_chan *chan, + struct atdma_sg *atdma_sg, + dma_addr_t psrc, dma_addr_t pdst, size_t len) { - struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct at_desc *desc; + struct at_dma *atdma = to_at_dma(chan->device); + struct at_lli *lli; size_t xfer_count; - - u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2); + u32 ctrla = FIELD_PREP(ATC_SRC_WIDTH, 2) | FIELD_PREP(ATC_DST_WIDTH, 2); u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | - ATC_SRC_ADDR_MODE_FIXED | - ATC_DST_ADDR_MODE_INCR | - ATC_FC_MEM2MEM; + FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_FIXED) | + FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) | + FIELD_PREP(ATC_FC, ATC_FC_MEM2MEM); xfer_count = len >> 2; if (xfer_count > ATC_BTSIZE_MAX) { - dev_err(chan2dev(chan), "%s: buffer is too big\n", - __func__); - return NULL; + dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__); + return -EINVAL; } - desc = atc_desc_get(atchan); - if (!desc) { - dev_err(chan2dev(chan), "%s: can't get a descriptor\n", - __func__); - return NULL; - } + atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, GFP_NOWAIT, + &atdma_sg->lli_phys); + if (!atdma_sg->lli) + return -ENOMEM; + lli = atdma_sg->lli; - desc->lli.saddr = psrc; - desc->lli.daddr = pdst; - desc->lli.ctrla = ctrla | xfer_count; - desc->lli.ctrlb = ctrlb; + lli->saddr = psrc; + lli->daddr = pdst; + lli->ctrla = ctrla | xfer_count; + lli->ctrlb = ctrlb; - desc->txd.cookie = 0; - desc->len = len; + atdma_sg->len = len; - return desc; + return 0; } /** @@ -901,11 +1081,13 @@ static struct dma_async_tx_descriptor * atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) { + struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); struct at_desc *desc; void __iomem *vaddr; dma_addr_t paddr; char fill_pattern; + int ret; dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__, &dest, value, len, flags); @@ -936,27 +1118,28 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, (fill_pattern << 8) | fill_pattern; - desc = atc_create_memset_desc(chan, paddr, dest, len); - if (!desc) { - dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n", - __func__); + desc = kzalloc(struct_size(desc, sg, 1), GFP_ATOMIC); + if (!desc) goto err_free_buffer; - } + desc->sglen = 1; + + ret = atdma_create_memset_lli(chan, desc->sg, paddr, dest, len); + if (ret) + goto err_free_desc; desc->memset_paddr = paddr; desc->memset_vaddr = vaddr; desc->memset_buffer = true; - desc->txd.cookie = -EBUSY; desc->total_len = len; /* set end-of-link on the descriptor */ - set_desc_eol(desc); - - desc->txd.flags = flags; + set_lli_eol(desc, 0); - return &desc->txd; + return vchan_tx_prep(&atchan->vc, &desc->vd, flags); +err_free_desc: + kfree(desc); err_free_buffer: dma_pool_free(atdma->memset_pool, vaddr, paddr); return NULL; @@ -970,12 +1153,13 @@ atc_prep_dma_memset_sg(struct dma_chan *chan, { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); - struct at_desc *desc = NULL, *first = NULL, *prev = NULL; + struct at_desc *desc; struct scatterlist *sg; void __iomem *vaddr; dma_addr_t paddr; size_t total_len = 0; int i; + int ret; dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__, value, sg_len, flags); @@ -994,6 +1178,11 @@ atc_prep_dma_memset_sg(struct dma_chan *chan, } *(u32*)vaddr = value; + desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC); + if (!desc) + goto err_free_dma_buf; + desc->sglen = sg_len; + for_each_sg(sgl, sg, sg_len, i) { dma_addr_t dest = sg_dma_address(sg); size_t len = sg_dma_len(sg); @@ -1004,38 +1193,33 @@ atc_prep_dma_memset_sg(struct dma_chan *chan, if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { dev_err(chan2dev(chan), "%s: buffer is not aligned\n", __func__); - goto err_put_desc; + goto err_free_desc; } - desc = atc_create_memset_desc(chan, paddr, dest, len); - if (!desc) - goto err_put_desc; - - atc_desc_chain(&first, &prev, desc); + ret = atdma_create_memset_lli(chan, &desc->sg[i], paddr, dest, + len); + if (ret) + goto err_free_desc; + atdma_lli_chain(desc, i); total_len += len; } - /* - * Only set the buffer pointers on the last descriptor to - * avoid free'ing while we have our transfer still going - */ desc->memset_paddr = paddr; desc->memset_vaddr = vaddr; desc->memset_buffer = true; - first->txd.cookie = -EBUSY; - first->total_len = total_len; + desc->total_len = total_len; /* set end-of-link on the descriptor */ - set_desc_eol(desc); - - first->txd.flags = flags; + set_lli_eol(desc, i - 1); - return &first->txd; + return vchan_tx_prep(&atchan->vc, &desc->vd, flags); -err_put_desc: - atc_desc_put(atchan, first); +err_free_desc: + atdma_desc_free(&desc->vd); +err_free_dma_buf: + dma_pool_free(atdma->memset_pool, vaddr, paddr); return NULL; } @@ -1053,11 +1237,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { + struct at_dma *atdma = to_at_dma(chan->device); struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_slave *atslave = chan->private; struct dma_slave_config *sconfig = &atchan->dma_sconfig; - struct at_desc *first = NULL; - struct at_desc *prev = NULL; + struct at_desc *desc; u32 ctrla; u32 ctrlb; dma_addr_t reg; @@ -1077,27 +1261,38 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, return NULL; } - ctrla = ATC_SCSIZE(sconfig->src_maxburst) - | ATC_DCSIZE(sconfig->dst_maxburst); + desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC); + if (!desc) + return NULL; + desc->sglen = sg_len; + + ctrla = FIELD_PREP(ATC_SCSIZE, sconfig->src_maxburst) | + FIELD_PREP(ATC_DCSIZE, sconfig->dst_maxburst); ctrlb = ATC_IEN; switch (direction) { case DMA_MEM_TO_DEV: reg_width = convert_buswidth(sconfig->dst_addr_width); - ctrla |= ATC_DST_WIDTH(reg_width); - ctrlb |= ATC_DST_ADDR_MODE_FIXED - | ATC_SRC_ADDR_MODE_INCR - | ATC_FC_MEM2PER - | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if); + ctrla |= FIELD_PREP(ATC_DST_WIDTH, reg_width); + ctrlb |= FIELD_PREP(ATC_DST_ADDR_MODE, + ATC_DST_ADDR_MODE_FIXED) | + FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_INCR) | + FIELD_PREP(ATC_FC, ATC_FC_MEM2PER) | + FIELD_PREP(ATC_SIF, atchan->mem_if) | + FIELD_PREP(ATC_DIF, atchan->per_if); reg = sconfig->dst_addr; for_each_sg(sgl, sg, sg_len, i) { - struct at_desc *desc; + struct atdma_sg *atdma_sg = &desc->sg[i]; + struct at_lli *lli; u32 len; u32 mem; - desc = atc_desc_get(atchan); - if (!desc) + atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, + GFP_NOWAIT, + &atdma_sg->lli_phys); + if (!atdma_sg->lli) goto err_desc_get; + lli = atdma_sg->lli; mem = sg_dma_address(sg); len = sg_dma_len(sg); @@ -1110,35 +1305,43 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (unlikely(mem & 3 || len & 3)) mem_width = 0; - desc->lli.saddr = mem; - desc->lli.daddr = reg; - desc->lli.ctrla = ctrla - | ATC_SRC_WIDTH(mem_width) - | len >> mem_width; - desc->lli.ctrlb = ctrlb; - desc->len = len; + lli->saddr = mem; + lli->daddr = reg; + lli->ctrla = ctrla | + FIELD_PREP(ATC_SRC_WIDTH, mem_width) | + len >> mem_width; + lli->ctrlb = ctrlb; - atc_desc_chain(&first, &prev, desc); + atdma_sg->len = len; total_len += len; + + desc->sg[i].len = len; + atdma_lli_chain(desc, i); } break; case DMA_DEV_TO_MEM: reg_width = convert_buswidth(sconfig->src_addr_width); - ctrla |= ATC_SRC_WIDTH(reg_width); - ctrlb |= ATC_DST_ADDR_MODE_INCR - | ATC_SRC_ADDR_MODE_FIXED - | ATC_FC_PER2MEM - | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if); + ctrla |= FIELD_PREP(ATC_SRC_WIDTH, reg_width); + ctrlb |= FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) | + FIELD_PREP(ATC_SRC_ADDR_MODE, + ATC_SRC_ADDR_MODE_FIXED) | + FIELD_PREP(ATC_FC, ATC_FC_PER2MEM) | + FIELD_PREP(ATC_SIF, atchan->per_if) | + FIELD_PREP(ATC_DIF, atchan->mem_if); reg = sconfig->src_addr; for_each_sg(sgl, sg, sg_len, i) { - struct at_desc *desc; + struct atdma_sg *atdma_sg = &desc->sg[i]; + struct at_lli *lli; u32 len; u32 mem; - desc = atc_desc_get(atchan); - if (!desc) + atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, + GFP_NOWAIT, + &atdma_sg->lli_phys); + if (!atdma_sg->lli) goto err_desc_get; + lli = atdma_sg->lli; mem = sg_dma_address(sg); len = sg_dma_len(sg); @@ -1151,16 +1354,17 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (unlikely(mem & 3 || len & 3)) mem_width = 0; - desc->lli.saddr = reg; - desc->lli.daddr = mem; - desc->lli.ctrla = ctrla - | ATC_DST_WIDTH(mem_width) - | len >> reg_width; - desc->lli.ctrlb = ctrlb; - desc->len = len; + lli->saddr = reg; + lli->daddr = mem; + lli->ctrla = ctrla | + FIELD_PREP(ATC_DST_WIDTH, mem_width) | + len >> reg_width; + lli->ctrlb = ctrlb; - atc_desc_chain(&first, &prev, desc); + desc->sg[i].len = len; total_len += len; + + atdma_lli_chain(desc, i); } break; default: @@ -1168,21 +1372,16 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, } /* set end-of-link to the last link descriptor of list*/ - set_desc_eol(prev); + set_lli_eol(desc, i - 1); - /* First descriptor of the chain embedds additional information */ - first->txd.cookie = -EBUSY; - first->total_len = total_len; + desc->total_len = total_len; - /* first link descriptor of list is responsible of flags */ - first->txd.flags = flags; /* client is in control of this ack */ - - return &first->txd; + return vchan_tx_prep(&atchan->vc, &desc->vd, flags); err_desc_get: dev_err(chan2dev(chan), "not enough descriptors available\n"); err: - atc_desc_put(atchan, first); + atdma_desc_free(&desc->vd); return NULL; } @@ -1212,50 +1411,59 @@ err_out: */ static int atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, - unsigned int period_index, dma_addr_t buf_addr, + unsigned int i, dma_addr_t buf_addr, unsigned int reg_width, size_t period_len, enum dma_transfer_direction direction) { + struct at_dma *atdma = to_at_dma(chan->device); struct at_dma_chan *atchan = to_at_dma_chan(chan); struct dma_slave_config *sconfig = &atchan->dma_sconfig; - u32 ctrla; + struct atdma_sg *atdma_sg = &desc->sg[i]; + struct at_lli *lli; - /* prepare common CRTLA value */ - ctrla = ATC_SCSIZE(sconfig->src_maxburst) - | ATC_DCSIZE(sconfig->dst_maxburst) - | ATC_DST_WIDTH(reg_width) - | ATC_SRC_WIDTH(reg_width) - | period_len >> reg_width; + atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, GFP_ATOMIC, + &atdma_sg->lli_phys); + if (!atdma_sg->lli) + return -ENOMEM; + lli = atdma_sg->lli; switch (direction) { case DMA_MEM_TO_DEV: - desc->lli.saddr = buf_addr + (period_len * period_index); - desc->lli.daddr = sconfig->dst_addr; - desc->lli.ctrla = ctrla; - desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED - | ATC_SRC_ADDR_MODE_INCR - | ATC_FC_MEM2PER - | ATC_SIF(atchan->mem_if) - | ATC_DIF(atchan->per_if); - desc->len = period_len; + lli->saddr = buf_addr + (period_len * i); + lli->daddr = sconfig->dst_addr; + lli->ctrlb = FIELD_PREP(ATC_DST_ADDR_MODE, + ATC_DST_ADDR_MODE_FIXED) | + FIELD_PREP(ATC_SRC_ADDR_MODE, + ATC_SRC_ADDR_MODE_INCR) | + FIELD_PREP(ATC_FC, ATC_FC_MEM2PER) | + FIELD_PREP(ATC_SIF, atchan->mem_if) | + FIELD_PREP(ATC_DIF, atchan->per_if); + break; case DMA_DEV_TO_MEM: - desc->lli.saddr = sconfig->src_addr; - desc->lli.daddr = buf_addr + (period_len * period_index); - desc->lli.ctrla = ctrla; - desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR - | ATC_SRC_ADDR_MODE_FIXED - | ATC_FC_PER2MEM - | ATC_SIF(atchan->per_if) - | ATC_DIF(atchan->mem_if); - desc->len = period_len; + lli->saddr = sconfig->src_addr; + lli->daddr = buf_addr + (period_len * i); + lli->ctrlb = FIELD_PREP(ATC_DST_ADDR_MODE, + ATC_DST_ADDR_MODE_INCR) | + FIELD_PREP(ATC_SRC_ADDR_MODE, + ATC_SRC_ADDR_MODE_FIXED) | + FIELD_PREP(ATC_FC, ATC_FC_PER2MEM) | + FIELD_PREP(ATC_SIF, atchan->per_if) | + FIELD_PREP(ATC_DIF, atchan->mem_if); break; default: return -EINVAL; } + lli->ctrla = FIELD_PREP(ATC_SCSIZE, sconfig->src_maxburst) | + FIELD_PREP(ATC_DCSIZE, sconfig->dst_maxburst) | + FIELD_PREP(ATC_DST_WIDTH, reg_width) | + FIELD_PREP(ATC_SRC_WIDTH, reg_width) | + period_len >> reg_width; + desc->sg[i].len = period_len; + return 0; } @@ -1276,8 +1484,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_slave *atslave = chan->private; struct dma_slave_config *sconfig = &atchan->dma_sconfig; - struct at_desc *first = NULL; - struct at_desc *prev = NULL; + struct at_desc *desc; unsigned long was_cyclic; unsigned int reg_width; unsigned int periods = buf_len / period_len; @@ -1311,33 +1518,26 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len)) goto err_out; + desc = kzalloc(struct_size(desc, sg, periods), GFP_ATOMIC); + if (!desc) + goto err_out; + desc->sglen = periods; + /* build cyclic linked list */ for (i = 0; i < periods; i++) { - struct at_desc *desc; - - desc = atc_desc_get(atchan); - if (!desc) - goto err_desc_get; - if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, reg_width, period_len, direction)) - goto err_desc_get; - - atc_desc_chain(&first, &prev, desc); + goto err_fill_desc; + atdma_lli_chain(desc, i); } - + desc->total_len = buf_len; /* lets make a cyclic list */ - prev->lli.dscr = first->txd.phys; + desc->sg[i - 1].lli->dscr = desc->sg[0].lli_phys; - /* First descriptor of the chain embedds additional information */ - first->txd.cookie = -EBUSY; - first->total_len = buf_len; + return vchan_tx_prep(&atchan->vc, &desc->vd, flags); - return &first->txd; - -err_desc_get: - dev_err(chan2dev(chan), "not enough descriptors available\n"); - atc_desc_put(atchan, first); +err_fill_desc: + atdma_desc_free(&desc->vd); err_out: clear_bit(ATC_IS_CYCLIC, &atchan->status); return NULL; @@ -1366,17 +1566,17 @@ static int atc_pause(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); - int chan_id = atchan->chan_common.chan_id; + int chan_id = atchan->vc.chan.chan_id; unsigned long flags; dev_vdbg(chan2dev(chan), "%s\n", __func__); - spin_lock_irqsave(&atchan->lock, flags); + spin_lock_irqsave(&atchan->vc.lock, flags); dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); set_bit(ATC_IS_PAUSED, &atchan->status); - spin_unlock_irqrestore(&atchan->lock, flags); + spin_unlock_irqrestore(&atchan->vc.lock, flags); return 0; } @@ -1385,7 +1585,7 @@ static int atc_resume(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); - int chan_id = atchan->chan_common.chan_id; + int chan_id = atchan->vc.chan.chan_id; unsigned long flags; dev_vdbg(chan2dev(chan), "%s\n", __func__); @@ -1393,12 +1593,12 @@ static int atc_resume(struct dma_chan *chan) if (!atc_chan_is_paused(atchan)) return 0; - spin_lock_irqsave(&atchan->lock, flags); + spin_lock_irqsave(&atchan->vc.lock, flags); dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); clear_bit(ATC_IS_PAUSED, &atchan->status); - spin_unlock_irqrestore(&atchan->lock, flags); + spin_unlock_irqrestore(&atchan->vc.lock, flags); return 0; } @@ -1407,9 +1607,11 @@ static int atc_terminate_all(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); - int chan_id = atchan->chan_common.chan_id; + int chan_id = atchan->vc.chan.chan_id; unsigned long flags; + LIST_HEAD(list); + dev_vdbg(chan2dev(chan), "%s\n", __func__); /* @@ -1418,7 +1620,7 @@ static int atc_terminate_all(struct dma_chan *chan) * channel. We still have to poll the channel enable bit due * to AHB/HSB limitations. */ - spin_lock_irqsave(&atchan->lock, flags); + spin_lock_irqsave(&atchan->vc.lock, flags); /* disabling channel: must also remove suspend state */ dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); @@ -1427,15 +1629,20 @@ static int atc_terminate_all(struct dma_chan *chan) while (dma_readl(atdma, CHSR) & atchan->mask) cpu_relax(); - /* active_list entries will end up before queued entries */ - list_splice_tail_init(&atchan->queue, &atchan->free_list); - list_splice_tail_init(&atchan->active_list, &atchan->free_list); + if (atchan->desc) { + vchan_terminate_vdesc(&atchan->desc->vd); + atchan->desc = NULL; + } + + vchan_get_all_descriptors(&atchan->vc, &list); clear_bit(ATC_IS_PAUSED, &atchan->status); /* if channel dedicated to cyclic operations, free it */ clear_bit(ATC_IS_CYCLIC, &atchan->status); - spin_unlock_irqrestore(&atchan->lock, flags); + spin_unlock_irqrestore(&atchan->vc.lock, flags); + + vchan_dma_desc_free_list(&atchan->vc, &list); return 0; } @@ -1457,60 +1664,43 @@ atc_tx_status(struct dma_chan *chan, { struct at_dma_chan *atchan = to_at_dma_chan(chan); unsigned long flags; - enum dma_status ret; - int bytes = 0; - - ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE) - return ret; - /* - * There's no point calculating the residue if there's - * no txstate to store the value. - */ - if (!txstate) - return DMA_ERROR; + enum dma_status dma_status; + u32 residue; + int ret; - spin_lock_irqsave(&atchan->lock, flags); + dma_status = dma_cookie_status(chan, cookie, txstate); + if (dma_status == DMA_COMPLETE || !txstate) + return dma_status; + spin_lock_irqsave(&atchan->vc.lock, flags); /* Get number of bytes left in the active transactions */ - bytes = atc_get_bytes_left(chan, cookie); - - spin_unlock_irqrestore(&atchan->lock, flags); + ret = atc_get_residue(chan, cookie, &residue); + spin_unlock_irqrestore(&atchan->vc.lock, flags); - if (unlikely(bytes < 0)) { + if (unlikely(ret < 0)) { dev_vdbg(chan2dev(chan), "get residual bytes error\n"); return DMA_ERROR; } else { - dma_set_residue(txstate, bytes); + dma_set_residue(txstate, residue); } - dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n", - ret, cookie, bytes); + dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %u\n", + dma_status, cookie, residue); - return ret; + return dma_status; } -/** - * atc_issue_pending - takes the first transaction descriptor in the pending - * queue and starts the transfer. - * @chan: target DMA channel - */ static void atc_issue_pending(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct at_desc *desc; unsigned long flags; - dev_vdbg(chan2dev(chan), "issue_pending\n"); - - spin_lock_irqsave(&atchan->lock, flags); - if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue)) - return spin_unlock_irqrestore(&atchan->lock, flags); - - desc = atc_first_queued(atchan); - list_move_tail(&desc->desc_node, &atchan->active_list); - atc_dostart(atchan, desc); - spin_unlock_irqrestore(&atchan->lock, flags); + spin_lock_irqsave(&atchan->vc.lock, flags); + if (vchan_issue_pending(&atchan->vc) && !atchan->desc) { + if (!(atc_chan_is_enabled(atchan))) + atc_dostart(atchan); + } + spin_unlock_irqrestore(&atchan->vc.lock, flags); } /** @@ -1523,9 +1713,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); - struct at_desc *desc; struct at_dma_slave *atslave; - int i; u32 cfg; dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); @@ -1536,11 +1724,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) return -EIO; } - if (!list_empty(&atchan->free_list)) { - dev_dbg(chan2dev(chan), "can't allocate channel resources (channel not freed from a previous use)\n"); - return -EIO; - } - cfg = ATC_DEFAULT_CFG; atslave = chan->private; @@ -1549,33 +1732,17 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) * We need controller-specific data to set up slave * transfers. */ - BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); + BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_device.dev); /* if cfg configuration specified take it instead of default */ if (atslave->cfg) cfg = atslave->cfg; } - /* Allocate initial pool of descriptors */ - for (i = 0; i < init_nr_desc_per_channel; i++) { - desc = atc_alloc_descriptor(chan, GFP_KERNEL); - if (!desc) { - dev_err(atdma->dma_common.dev, - "Only %d initial descriptors\n", i); - break; - } - list_add_tail(&desc->desc_node, &atchan->free_list); - } - - dma_cookie_init(chan); - /* channel parameters */ channel_writel(atchan, CFG, cfg); - dev_dbg(chan2dev(chan), - "alloc_chan_resources: allocated %d descriptors\n", i); - - return i; + return 0; } /** @@ -1585,22 +1752,10 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) static void atc_free_chan_resources(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct at_dma *atdma = to_at_dma(chan->device); - struct at_desc *desc, *_desc; - LIST_HEAD(list); - /* ASSERT: channel is idle */ - BUG_ON(!list_empty(&atchan->active_list)); - BUG_ON(!list_empty(&atchan->queue)); BUG_ON(atc_chan_is_enabled(atchan)); - list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { - dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); - list_del(&desc->desc_node); - /* free link descriptor */ - dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); - } - list_splice_init(&atchan->free_list, &list); + vchan_free_chan_resources(to_virt_chan(chan)); atchan->status = 0; /* @@ -1651,14 +1806,13 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, return NULL; } - atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW; + atslave->cfg = ATC_DST_H2SEL | ATC_SRC_H2SEL; /* * We can fill both SRC_PER and DST_PER, one of these fields will be * ignored depending on DMA transfer direction. */ per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK; - atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id) - | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id); + atslave->cfg |= ATC_DST_PER_ID(per_id) | ATC_SRC_PER_ID(per_id); /* * We have to translate the value we get from the device tree since * the half FIFO configuration value had to be 0 to keep backward @@ -1666,14 +1820,16 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, */ switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) { case AT91_DMA_CFG_FIFOCFG_ALAP: - atslave->cfg |= ATC_FIFOCFG_LARGESTBURST; + atslave->cfg |= FIELD_PREP(ATC_FIFOCFG, + ATC_FIFOCFG_LARGESTBURST); break; case AT91_DMA_CFG_FIFOCFG_ASAP: - atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE; + atslave->cfg |= FIELD_PREP(ATC_FIFOCFG, + ATC_FIFOCFG_ENOUGHSPACE); break; case AT91_DMA_CFG_FIFOCFG_HALF: default: - atslave->cfg |= ATC_FIFOCFG_HALFFIFO; + atslave->cfg |= FIELD_PREP(ATC_FIFOCFG, ATC_FIFOCFG_HALFFIFO); } atslave->dma_dev = &dmac_pdev->dev; @@ -1768,9 +1924,7 @@ static void at_dma_off(struct at_dma *atdma) static int __init at_dma_probe(struct platform_device *pdev) { - struct resource *io; struct at_dma *atdma; - size_t size; int irq; int err; int i; @@ -1790,44 +1944,31 @@ static int __init at_dma_probe(struct platform_device *pdev) if (!plat_dat) return -ENODEV; - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!io) - return -EINVAL; + atdma = devm_kzalloc(&pdev->dev, + struct_size(atdma, chan, plat_dat->nr_channels), + GFP_KERNEL); + if (!atdma) + return -ENOMEM; + + atdma->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(atdma->regs)) + return PTR_ERR(atdma->regs); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; - size = sizeof(struct at_dma); - size += plat_dat->nr_channels * sizeof(struct at_dma_chan); - atdma = kzalloc(size, GFP_KERNEL); - if (!atdma) - return -ENOMEM; - /* discover transaction capabilities */ - atdma->dma_common.cap_mask = plat_dat->cap_mask; + atdma->dma_device.cap_mask = plat_dat->cap_mask; atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; - size = resource_size(io); - if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { - err = -EBUSY; - goto err_kfree; - } - - atdma->regs = ioremap(io->start, size); - if (!atdma->regs) { - err = -ENOMEM; - goto err_release_r; - } + atdma->clk = devm_clk_get(&pdev->dev, "dma_clk"); + if (IS_ERR(atdma->clk)) + return PTR_ERR(atdma->clk); - atdma->clk = clk_get(&pdev->dev, "dma_clk"); - if (IS_ERR(atdma->clk)) { - err = PTR_ERR(atdma->clk); - goto err_clk; - } err = clk_prepare_enable(atdma->clk); if (err) - goto err_clk_prepare; + return err; /* force dma off, just in case */ at_dma_off(atdma); @@ -1839,11 +1980,11 @@ static int __init at_dma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, atdma); /* create a pool of consistent memory blocks for hardware descriptors */ - atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", - &pdev->dev, sizeof(struct at_desc), - 4 /* word alignment */, 0); - if (!atdma->dma_desc_pool) { - dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); + atdma->lli_pool = dma_pool_create("at_hdmac_lli_pool", + &pdev->dev, sizeof(struct at_lli), + 4 /* word alignment */, 0); + if (!atdma->lli_pool) { + dev_err(&pdev->dev, "Unable to allocate DMA LLI descriptor pool\n"); err = -ENOMEM; goto err_desc_pool_create; } @@ -1862,73 +2003,66 @@ static int __init at_dma_probe(struct platform_device *pdev) cpu_relax(); /* initialize channels related values */ - INIT_LIST_HEAD(&atdma->dma_common.channels); + INIT_LIST_HEAD(&atdma->dma_device.channels); for (i = 0; i < plat_dat->nr_channels; i++) { struct at_dma_chan *atchan = &atdma->chan[i]; atchan->mem_if = AT_DMA_MEM_IF; atchan->per_if = AT_DMA_PER_IF; - atchan->chan_common.device = &atdma->dma_common; - dma_cookie_init(&atchan->chan_common); - list_add_tail(&atchan->chan_common.device_node, - &atdma->dma_common.channels); atchan->ch_regs = atdma->regs + ch_regs(i); - spin_lock_init(&atchan->lock); atchan->mask = 1 << i; - INIT_LIST_HEAD(&atchan->active_list); - INIT_LIST_HEAD(&atchan->queue); - INIT_LIST_HEAD(&atchan->free_list); - - tasklet_setup(&atchan->tasklet, atc_tasklet); + atchan->atdma = atdma; + atchan->vc.desc_free = atdma_desc_free; + vchan_init(&atchan->vc, &atdma->dma_device); atc_enable_chan_irq(atdma, i); } /* set base routines */ - atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; - atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; - atdma->dma_common.device_tx_status = atc_tx_status; - atdma->dma_common.device_issue_pending = atc_issue_pending; - atdma->dma_common.dev = &pdev->dev; + atdma->dma_device.device_alloc_chan_resources = atc_alloc_chan_resources; + atdma->dma_device.device_free_chan_resources = atc_free_chan_resources; + atdma->dma_device.device_tx_status = atc_tx_status; + atdma->dma_device.device_issue_pending = atc_issue_pending; + atdma->dma_device.dev = &pdev->dev; /* set prep routines based on capability */ - if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask)) - atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved; + if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_device.cap_mask)) + atdma->dma_device.device_prep_interleaved_dma = atc_prep_dma_interleaved; - if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) - atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; + if (dma_has_cap(DMA_MEMCPY, atdma->dma_device.cap_mask)) + atdma->dma_device.device_prep_dma_memcpy = atc_prep_dma_memcpy; - if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) { - atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset; - atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg; - atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES; + if (dma_has_cap(DMA_MEMSET, atdma->dma_device.cap_mask)) { + atdma->dma_device.device_prep_dma_memset = atc_prep_dma_memset; + atdma->dma_device.device_prep_dma_memset_sg = atc_prep_dma_memset_sg; + atdma->dma_device.fill_align = DMAENGINE_ALIGN_4_BYTES; } - if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { - atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; + if (dma_has_cap(DMA_SLAVE, atdma->dma_device.cap_mask)) { + atdma->dma_device.device_prep_slave_sg = atc_prep_slave_sg; /* controller can do slave DMA: can trigger cyclic transfers */ - dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); - atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; - atdma->dma_common.device_config = atc_config; - atdma->dma_common.device_pause = atc_pause; - atdma->dma_common.device_resume = atc_resume; - atdma->dma_common.device_terminate_all = atc_terminate_all; - atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS; - atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS; - atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + dma_cap_set(DMA_CYCLIC, atdma->dma_device.cap_mask); + atdma->dma_device.device_prep_dma_cyclic = atc_prep_dma_cyclic; + atdma->dma_device.device_config = atc_config; + atdma->dma_device.device_pause = atc_pause; + atdma->dma_device.device_resume = atc_resume; + atdma->dma_device.device_terminate_all = atc_terminate_all; + atdma->dma_device.src_addr_widths = ATC_DMA_BUSWIDTHS; + atdma->dma_device.dst_addr_widths = ATC_DMA_BUSWIDTHS; + atdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + atdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; } dma_writel(atdma, EN, AT_DMA_ENABLE); dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n", - dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", - dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "", - dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", + dma_has_cap(DMA_MEMCPY, atdma->dma_device.cap_mask) ? "cpy " : "", + dma_has_cap(DMA_MEMSET, atdma->dma_device.cap_mask) ? "set " : "", + dma_has_cap(DMA_SLAVE, atdma->dma_device.cap_mask) ? "slave " : "", plat_dat->nr_channels); - err = dma_async_device_register(&atdma->dma_common); + err = dma_async_device_register(&atdma->dma_device); if (err) { dev_err(&pdev->dev, "Unable to register: %d.\n", err); goto err_dma_async_device_register; @@ -1951,24 +2085,15 @@ static int __init at_dma_probe(struct platform_device *pdev) return 0; err_of_dma_controller_register: - dma_async_device_unregister(&atdma->dma_common); + dma_async_device_unregister(&atdma->dma_device); err_dma_async_device_register: dma_pool_destroy(atdma->memset_pool); err_memset_pool_create: - dma_pool_destroy(atdma->dma_desc_pool); + dma_pool_destroy(atdma->lli_pool); err_desc_pool_create: free_irq(platform_get_irq(pdev, 0), atdma); err_irq: clk_disable_unprepare(atdma->clk); -err_clk_prepare: - clk_put(atdma->clk); -err_clk: - iounmap(atdma->regs); - atdma->regs = NULL; -err_release_r: - release_mem_region(io->start, size); -err_kfree: - kfree(atdma); return err; } @@ -1976,38 +2101,24 @@ static int at_dma_remove(struct platform_device *pdev) { struct at_dma *atdma = platform_get_drvdata(pdev); struct dma_chan *chan, *_chan; - struct resource *io; at_dma_off(atdma); if (pdev->dev.of_node) of_dma_controller_free(pdev->dev.of_node); - dma_async_device_unregister(&atdma->dma_common); + dma_async_device_unregister(&atdma->dma_device); dma_pool_destroy(atdma->memset_pool); - dma_pool_destroy(atdma->dma_desc_pool); + dma_pool_destroy(atdma->lli_pool); free_irq(platform_get_irq(pdev, 0), atdma); - list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, + list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels, device_node) { - struct at_dma_chan *atchan = to_at_dma_chan(chan); - /* Disable interrupts */ atc_disable_chan_irq(atdma, chan->chan_id); - - tasklet_kill(&atchan->tasklet); list_del(&chan->device_node); } clk_disable_unprepare(atdma->clk); - clk_put(atdma->clk); - - iounmap(atdma->regs); - atdma->regs = NULL; - - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(io->start, resource_size(io)); - - kfree(atdma); return 0; } @@ -2025,7 +2136,7 @@ static int at_dma_prepare(struct device *dev) struct at_dma *atdma = dev_get_drvdata(dev); struct dma_chan *chan, *_chan; - list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, + list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels, device_node) { struct at_dma_chan *atchan = to_at_dma_chan(chan); /* wait for transaction completion (except in cyclic case) */ @@ -2037,7 +2148,7 @@ static int at_dma_prepare(struct device *dev) static void atc_suspend_cyclic(struct at_dma_chan *atchan) { - struct dma_chan *chan = &atchan->chan_common; + struct dma_chan *chan = &atchan->vc.chan; /* Channel should be paused by user * do it anyway even if it is not done already */ @@ -2060,7 +2171,7 @@ static int at_dma_suspend_noirq(struct device *dev) struct dma_chan *chan, *_chan; /* preserve data */ - list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, + list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels, device_node) { struct at_dma_chan *atchan = to_at_dma_chan(chan); @@ -2078,7 +2189,7 @@ static int at_dma_suspend_noirq(struct device *dev) static void atc_resume_cyclic(struct at_dma_chan *atchan) { - struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + struct at_dma *atdma = to_at_dma(atchan->vc.chan.device); /* restore channel status for cyclic descriptors list: * next descriptor in the cyclic list at the time of suspend */ @@ -2110,7 +2221,7 @@ static int at_dma_resume_noirq(struct device *dev) /* restore saved data */ dma_writel(atdma, EBCIER, atdma->save_imr); - list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, + list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels, device_node) { struct at_dma_chan *atchan = to_at_dma_chan(chan); @@ -2121,7 +2232,7 @@ static int at_dma_resume_noirq(struct device *dev) return 0; } -static const struct dev_pm_ops at_dma_dev_pm_ops = { +static const struct dev_pm_ops __maybe_unused at_dma_dev_pm_ops = { .prepare = at_dma_prepare, .suspend_noirq = at_dma_suspend_noirq, .resume_noirq = at_dma_resume_noirq, @@ -2133,7 +2244,7 @@ static struct platform_driver at_dma_driver = { .id_table = atdma_devtypes, .driver = { .name = "at_hdmac", - .pm = &at_dma_dev_pm_ops, + .pm = pm_ptr(&at_dma_dev_pm_ops), .of_match_table = of_match_ptr(atmel_dma_dt_ids), }, }; @@ -2152,5 +2263,6 @@ module_exit(at_dma_exit); MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); +MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:at_hdmac"); diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h deleted file mode 100644 index d4d382d74607..000000000000 --- a/drivers/dma/at_hdmac_regs.h +++ /dev/null @@ -1,478 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Header file for the Atmel AHB DMA Controller driver - * - * Copyright (C) 2008 Atmel Corporation - */ -#ifndef AT_HDMAC_REGS_H -#define AT_HDMAC_REGS_H - -#define AT_DMA_MAX_NR_CHANNELS 8 - - -#define AT_DMA_GCFG 0x00 /* Global Configuration Register */ -#define AT_DMA_IF_BIGEND(i) (0x1 << (i)) /* AHB-Lite Interface i in Big-endian mode */ -#define AT_DMA_ARB_CFG (0x1 << 4) /* Arbiter mode. */ -#define AT_DMA_ARB_CFG_FIXED (0x0 << 4) -#define AT_DMA_ARB_CFG_ROUND_ROBIN (0x1 << 4) - -#define AT_DMA_EN 0x04 /* Controller Enable Register */ -#define AT_DMA_ENABLE (0x1 << 0) - -#define AT_DMA_SREQ 0x08 /* Software Single Request Register */ -#define AT_DMA_SSREQ(x) (0x1 << ((x) << 1)) /* Request a source single transfer on channel x */ -#define AT_DMA_DSREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination single transfer on channel x */ - -#define AT_DMA_CREQ 0x0C /* Software Chunk Transfer Request Register */ -#define AT_DMA_SCREQ(x) (0x1 << ((x) << 1)) /* Request a source chunk transfer on channel x */ -#define AT_DMA_DCREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination chunk transfer on channel x */ - -#define AT_DMA_LAST 0x10 /* Software Last Transfer Flag Register */ -#define AT_DMA_SLAST(x) (0x1 << ((x) << 1)) /* This src rq is last tx of buffer on channel x */ -#define AT_DMA_DLAST(x) (0x1 << (1 + ((x) << 1))) /* This dst rq is last tx of buffer on channel x */ - -#define AT_DMA_SYNC 0x14 /* Request Synchronization Register */ -#define AT_DMA_SYR(h) (0x1 << (h)) /* Synchronize handshake line h */ - -/* Error, Chained Buffer transfer completed and Buffer transfer completed Interrupt registers */ -#define AT_DMA_EBCIER 0x18 /* Enable register */ -#define AT_DMA_EBCIDR 0x1C /* Disable register */ -#define AT_DMA_EBCIMR 0x20 /* Mask Register */ -#define AT_DMA_EBCISR 0x24 /* Status Register */ -#define AT_DMA_CBTC_OFFSET 8 -#define AT_DMA_ERR_OFFSET 16 -#define AT_DMA_BTC(x) (0x1 << (x)) -#define AT_DMA_CBTC(x) (0x1 << (AT_DMA_CBTC_OFFSET + (x))) -#define AT_DMA_ERR(x) (0x1 << (AT_DMA_ERR_OFFSET + (x))) - -#define AT_DMA_CHER 0x28 /* Channel Handler Enable Register */ -#define AT_DMA_ENA(x) (0x1 << (x)) -#define AT_DMA_SUSP(x) (0x1 << ( 8 + (x))) -#define AT_DMA_KEEP(x) (0x1 << (24 + (x))) - -#define AT_DMA_CHDR 0x2C /* Channel Handler Disable Register */ -#define AT_DMA_DIS(x) (0x1 << (x)) -#define AT_DMA_RES(x) (0x1 << ( 8 + (x))) - -#define AT_DMA_CHSR 0x30 /* Channel Handler Status Register */ -#define AT_DMA_EMPT(x) (0x1 << (16 + (x))) -#define AT_DMA_STAL(x) (0x1 << (24 + (x))) - - -#define AT_DMA_CH_REGS_BASE 0x3C /* Channel registers base address */ -#define ch_regs(x) (AT_DMA_CH_REGS_BASE + (x) * 0x28) /* Channel x base addr */ - -/* Hardware register offset for each channel */ -#define ATC_SADDR_OFFSET 0x00 /* Source Address Register */ -#define ATC_DADDR_OFFSET 0x04 /* Destination Address Register */ -#define ATC_DSCR_OFFSET 0x08 /* Descriptor Address Register */ -#define ATC_CTRLA_OFFSET 0x0C /* Control A Register */ -#define ATC_CTRLB_OFFSET 0x10 /* Control B Register */ -#define ATC_CFG_OFFSET 0x14 /* Configuration Register */ -#define ATC_SPIP_OFFSET 0x18 /* Src PIP Configuration Register */ -#define ATC_DPIP_OFFSET 0x1C /* Dst PIP Configuration Register */ - - -/* Bitfield definitions */ - -/* Bitfields in DSCR */ -#define ATC_DSCR_IF(i) (0x3 & (i)) /* Dsc feched via AHB-Lite Interface i */ - -/* Bitfields in CTRLA */ -#define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */ -#define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */ -#define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */ -#define ATC_SCSIZE(x) (ATC_SCSIZE_MASK & ((x) << 16)) -#define ATC_SCSIZE_1 (0x0 << 16) -#define ATC_SCSIZE_4 (0x1 << 16) -#define ATC_SCSIZE_8 (0x2 << 16) -#define ATC_SCSIZE_16 (0x3 << 16) -#define ATC_SCSIZE_32 (0x4 << 16) -#define ATC_SCSIZE_64 (0x5 << 16) -#define ATC_SCSIZE_128 (0x6 << 16) -#define ATC_SCSIZE_256 (0x7 << 16) -#define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */ -#define ATC_DCSIZE(x) (ATC_DCSIZE_MASK & ((x) << 20)) -#define ATC_DCSIZE_1 (0x0 << 20) -#define ATC_DCSIZE_4 (0x1 << 20) -#define ATC_DCSIZE_8 (0x2 << 20) -#define ATC_DCSIZE_16 (0x3 << 20) -#define ATC_DCSIZE_32 (0x4 << 20) -#define ATC_DCSIZE_64 (0x5 << 20) -#define ATC_DCSIZE_128 (0x6 << 20) -#define ATC_DCSIZE_256 (0x7 << 20) -#define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */ -#define ATC_SRC_WIDTH(x) ((x) << 24) -#define ATC_SRC_WIDTH_BYTE (0x0 << 24) -#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) -#define ATC_SRC_WIDTH_WORD (0x2 << 24) -#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3) -#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ -#define ATC_DST_WIDTH(x) ((x) << 28) -#define ATC_DST_WIDTH_BYTE (0x0 << 28) -#define ATC_DST_WIDTH_HALFWORD (0x1 << 28) -#define ATC_DST_WIDTH_WORD (0x2 << 28) -#define ATC_DONE (0x1 << 31) /* Tx Done (only written back in descriptor) */ - -/* Bitfields in CTRLB */ -#define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */ -#define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */ - /* Specify AHB interfaces */ -#define AT_DMA_MEM_IF 0 /* interface 0 as memory interface */ -#define AT_DMA_PER_IF 1 /* interface 1 as peripheral interface */ - -#define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */ -#define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */ -#define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */ -#define ATC_DST_DSCR_DIS (0x1 << 20) /* Dst Descriptor fetch disable */ -#define ATC_FC_MASK (0x7 << 21) /* Choose Flow Controller */ -#define ATC_FC_MEM2MEM (0x0 << 21) /* Mem-to-Mem (DMA) */ -#define ATC_FC_MEM2PER (0x1 << 21) /* Mem-to-Periph (DMA) */ -#define ATC_FC_PER2MEM (0x2 << 21) /* Periph-to-Mem (DMA) */ -#define ATC_FC_PER2PER (0x3 << 21) /* Periph-to-Periph (DMA) */ -#define ATC_FC_PER2MEM_PER (0x4 << 21) /* Periph-to-Mem (Peripheral) */ -#define ATC_FC_MEM2PER_PER (0x5 << 21) /* Mem-to-Periph (Peripheral) */ -#define ATC_FC_PER2PER_SRCPER (0x6 << 21) /* Periph-to-Periph (Src Peripheral) */ -#define ATC_FC_PER2PER_DSTPER (0x7 << 21) /* Periph-to-Periph (Dst Peripheral) */ -#define ATC_SRC_ADDR_MODE_MASK (0x3 << 24) -#define ATC_SRC_ADDR_MODE_INCR (0x0 << 24) /* Incrementing Mode */ -#define ATC_SRC_ADDR_MODE_DECR (0x1 << 24) /* Decrementing Mode */ -#define ATC_SRC_ADDR_MODE_FIXED (0x2 << 24) /* Fixed Mode */ -#define ATC_DST_ADDR_MODE_MASK (0x3 << 28) -#define ATC_DST_ADDR_MODE_INCR (0x0 << 28) /* Incrementing Mode */ -#define ATC_DST_ADDR_MODE_DECR (0x1 << 28) /* Decrementing Mode */ -#define ATC_DST_ADDR_MODE_FIXED (0x2 << 28) /* Fixed Mode */ -#define ATC_IEN (0x1 << 30) /* BTC interrupt enable (active low) */ -#define ATC_AUTO (0x1 << 31) /* Auto multiple buffer tx enable */ - -/* Bitfields in CFG */ -#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ - -#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */ -#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */ -#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */ -#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */ -#define ATC_SRC_H2SEL_SW (0x0 << 9) -#define ATC_SRC_H2SEL_HW (0x1 << 9) -#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */ -#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */ -#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */ -#define ATC_DST_H2SEL_SW (0x0 << 13) -#define ATC_DST_H2SEL_HW (0x1 << 13) -#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */ -#define ATC_SOD (0x1 << 16) /* Stop On Done */ -#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */ -#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */ -#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */ -#define ATC_LOCK_IF_L_CHUNK (0x0 << 22) -#define ATC_LOCK_IF_L_BUFFER (0x1 << 22) -#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */ -#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */ -#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28) -#define ATC_FIFOCFG_HALFFIFO (0x1 << 28) -#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28) - -/* Bitfields in SPIP */ -#define ATC_SPIP_HOLE(x) (0xFFFFU & (x)) -#define ATC_SPIP_BOUNDARY(x) ((0x3FF & (x)) << 16) - -/* Bitfields in DPIP */ -#define ATC_DPIP_HOLE(x) (0xFFFFU & (x)) -#define ATC_DPIP_BOUNDARY(x) ((0x3FF & (x)) << 16) - - -/*-- descriptors -----------------------------------------------------*/ - -/* LLI == Linked List Item; aka DMA buffer descriptor */ -struct at_lli { - /* values that are not changed by hardware */ - u32 saddr; - u32 daddr; - /* value that may get written back: */ - u32 ctrla; - /* more values that are not changed by hardware */ - u32 ctrlb; - u32 dscr; /* chain to next lli */ -}; - -/** - * struct at_desc - software descriptor - * @at_lli: hardware lli structure - * @txd: support for the async_tx api - * @desc_node: node on the channed descriptors list - * @len: descriptor byte count - * @total_len: total transaction byte count - */ -struct at_desc { - /* FIRST values the hardware uses */ - struct at_lli lli; - - /* THEN values for driver housekeeping */ - struct list_head tx_list; - struct dma_async_tx_descriptor txd; - struct list_head desc_node; - size_t len; - size_t total_len; - - /* Interleaved data */ - size_t boundary; - size_t dst_hole; - size_t src_hole; - - /* Memset temporary buffer */ - bool memset_buffer; - dma_addr_t memset_paddr; - int *memset_vaddr; -}; - -static inline struct at_desc * -txd_to_at_desc(struct dma_async_tx_descriptor *txd) -{ - return container_of(txd, struct at_desc, txd); -} - - -/*-- Channels --------------------------------------------------------*/ - -/** - * atc_status - information bits stored in channel status flag - * - * Manipulated with atomic operations. - */ -enum atc_status { - ATC_IS_ERROR = 0, - ATC_IS_PAUSED = 1, - ATC_IS_CYCLIC = 24, -}; - -/** - * struct at_dma_chan - internal representation of an Atmel HDMAC channel - * @chan_common: common dmaengine channel object members - * @device: parent device - * @ch_regs: memory mapped register base - * @mask: channel index in a mask - * @per_if: peripheral interface - * @mem_if: memory interface - * @status: transmit status information from irq/prep* functions - * to tasklet (use atomic operations) - * @tasklet: bottom half to finish transaction work - * @save_cfg: configuration register that is saved on suspend/resume cycle - * @save_dscr: for cyclic operations, preserve next descriptor address in - * the cyclic list on suspend/resume cycle - * @dma_sconfig: configuration for slave transfers, passed via - * .device_config - * @lock: serializes enqueue/dequeue operations to descriptors lists - * @active_list: list of descriptors dmaengine is being running on - * @queue: list of descriptors ready to be submitted to engine - * @free_list: list of descriptors usable by the channel - */ -struct at_dma_chan { - struct dma_chan chan_common; - struct at_dma *device; - void __iomem *ch_regs; - u8 mask; - u8 per_if; - u8 mem_if; - unsigned long status; - struct tasklet_struct tasklet; - u32 save_cfg; - u32 save_dscr; - struct dma_slave_config dma_sconfig; - - spinlock_t lock; - - /* these other elements are all protected by lock */ - struct list_head active_list; - struct list_head queue; - struct list_head free_list; -}; - -#define channel_readl(atchan, name) \ - __raw_readl((atchan)->ch_regs + ATC_##name##_OFFSET) - -#define channel_writel(atchan, name, val) \ - __raw_writel((val), (atchan)->ch_regs + ATC_##name##_OFFSET) - -static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan) -{ - return container_of(dchan, struct at_dma_chan, chan_common); -} - -/* - * Fix sconfig's burst size according to at_hdmac. We need to convert them as: - * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3, 32 -> 4, 64 -> 5, 128 -> 6, 256 -> 7. - * - * This can be done by finding most significant bit set. - */ -static inline void convert_burst(u32 *maxburst) -{ - if (*maxburst > 1) - *maxburst = fls(*maxburst) - 2; - else - *maxburst = 0; -} - -/* - * Fix sconfig's bus width according to at_hdmac. - * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2. - */ -static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width) -{ - switch (addr_width) { - case DMA_SLAVE_BUSWIDTH_2_BYTES: - return 1; - case DMA_SLAVE_BUSWIDTH_4_BYTES: - return 2; - default: - /* For 1 byte width or fallback */ - return 0; - } -} - -/*-- Controller ------------------------------------------------------*/ - -/** - * struct at_dma - internal representation of an Atmel HDMA Controller - * @chan_common: common dmaengine dma_device object members - * @atdma_devtype: identifier of DMA controller compatibility - * @ch_regs: memory mapped register base - * @clk: dma controller clock - * @save_imr: interrupt mask register that is saved on suspend/resume cycle - * @all_chan_mask: all channels availlable in a mask - * @dma_desc_pool: base of DMA descriptor region (DMA address) - * @chan: channels table to store at_dma_chan structures - */ -struct at_dma { - struct dma_device dma_common; - void __iomem *regs; - struct clk *clk; - u32 save_imr; - - u8 all_chan_mask; - - struct dma_pool *dma_desc_pool; - struct dma_pool *memset_pool; - /* AT THE END channels table */ - struct at_dma_chan chan[]; -}; - -#define dma_readl(atdma, name) \ - __raw_readl((atdma)->regs + AT_DMA_##name) -#define dma_writel(atdma, name, val) \ - __raw_writel((val), (atdma)->regs + AT_DMA_##name) - -static inline struct at_dma *to_at_dma(struct dma_device *ddev) -{ - return container_of(ddev, struct at_dma, dma_common); -} - - -/*-- Helper functions ------------------------------------------------*/ - -static struct device *chan2dev(struct dma_chan *chan) -{ - return &chan->dev->device; -} - -#if defined(VERBOSE_DEBUG) -static void vdbg_dump_regs(struct at_dma_chan *atchan) -{ - struct at_dma *atdma = to_at_dma(atchan->chan_common.device); - - dev_err(chan2dev(&atchan->chan_common), - " channel %d : imr = 0x%x, chsr = 0x%x\n", - atchan->chan_common.chan_id, - dma_readl(atdma, EBCIMR), - dma_readl(atdma, CHSR)); - - dev_err(chan2dev(&atchan->chan_common), - " channel: s0x%x d0x%x ctrl0x%x:0x%x cfg0x%x l0x%x\n", - channel_readl(atchan, SADDR), - channel_readl(atchan, DADDR), - channel_readl(atchan, CTRLA), - channel_readl(atchan, CTRLB), - channel_readl(atchan, CFG), - channel_readl(atchan, DSCR)); -} -#else -static void vdbg_dump_regs(struct at_dma_chan *atchan) {} -#endif - -static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) -{ - dev_crit(chan2dev(&atchan->chan_common), - "desc: s%pad d%pad ctrl0x%x:0x%x l%pad\n", - &lli->saddr, &lli->daddr, - lli->ctrla, lli->ctrlb, &lli->dscr); -} - - -static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on) -{ - u32 ebci; - - /* enable interrupts on buffer transfer completion & error */ - ebci = AT_DMA_BTC(chan_id) - | AT_DMA_ERR(chan_id); - if (on) - dma_writel(atdma, EBCIER, ebci); - else - dma_writel(atdma, EBCIDR, ebci); -} - -static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id) -{ - atc_setup_irq(atdma, chan_id, 1); -} - -static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id) -{ - atc_setup_irq(atdma, chan_id, 0); -} - - -/** - * atc_chan_is_enabled - test if given channel is enabled - * @atchan: channel we want to test status - */ -static inline int atc_chan_is_enabled(struct at_dma_chan *atchan) -{ - struct at_dma *atdma = to_at_dma(atchan->chan_common.device); - - return !!(dma_readl(atdma, CHSR) & atchan->mask); -} - -/** - * atc_chan_is_paused - test channel pause/resume status - * @atchan: channel we want to test status - */ -static inline int atc_chan_is_paused(struct at_dma_chan *atchan) -{ - return test_bit(ATC_IS_PAUSED, &atchan->status); -} - -/** - * atc_chan_is_cyclic - test if given channel has cyclic property set - * @atchan: channel we want to test status - */ -static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan) -{ - return test_bit(ATC_IS_CYCLIC, &atchan->status); -} - -/** - * set_desc_eol - set end-of-link to descriptor so it will end transfer - * @desc: descriptor, signle or at the end of a chain, to end chain on - */ -static void set_desc_eol(struct at_desc *desc) -{ - u32 ctrlb = desc->lli.ctrlb; - - ctrlb &= ~ATC_IEN; - ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS; - - desc->lli.ctrlb = ctrlb; - desc->lli.dscr = 0; -} - -#endif /* AT_HDMAC_REGS_H */ diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 2a483802d9ee..9c1a6e9a9c03 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -1038,6 +1038,13 @@ static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = { JZ_SOC_DATA_BREAK_LINKS, }; +static const struct jz4780_dma_soc_data jz4755_dma_soc_data = { + .nb_channels = 4, + .transfer_ord_max = 5, + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC | + JZ_SOC_DATA_BREAK_LINKS, +}; + static const struct jz4780_dma_soc_data jz4760_dma_soc_data = { .nb_channels = 5, .transfer_ord_max = 6, @@ -1101,6 +1108,7 @@ static const struct jz4780_dma_soc_data x1830_dma_soc_data = { static const struct of_device_id jz4780_dma_dt_match[] = { { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data }, { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data }, + { .compatible = "ingenic,jz4755-dma", .data = &jz4755_dma_soc_data }, { .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data }, { .compatible = "ingenic,jz4760-mdma", .data = &jz4760_mdma_soc_data }, { .compatible = "ingenic,jz4760-bdma", .data = &jz4760_bdma_soc_data }, diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index f4c07ad3be15..c33087c5cd02 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -600,7 +600,7 @@ static int idma64_probe(struct idma64_chip *chip) return 0; } -static int idma64_remove(struct idma64_chip *chip) +static void idma64_remove(struct idma64_chip *chip) { struct idma64 *idma64 = chip->idma64; unsigned short i; @@ -618,8 +618,6 @@ static int idma64_remove(struct idma64_chip *chip) tasklet_kill(&idma64c->vchan.task); } - - return 0; } /* ---------------------------------------------------------------------- */ @@ -664,7 +662,9 @@ static int idma64_platform_remove(struct platform_device *pdev) { struct idma64_chip *chip = platform_get_drvdata(pdev); - return idma64_remove(chip); + idma64_remove(chip); + + return 0; } static int __maybe_unused idma64_pm_suspend(struct device *dev) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 6f44fa8f78a5..06f5d3783d77 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -7,7 +7,6 @@ #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/dmaengine.h> #include <linux/irq.h> -#include <linux/msi.h> #include <uapi/linux/idxd.h> #include "../dmaengine.h" #include "idxd.h" diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 7269bd54554f..3229dfc78650 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -528,6 +528,22 @@ static bool idxd_group_attr_progress_limit_invisible(struct attribute *attr, !idxd->hw.group_cap.progress_limit; } +static bool idxd_group_attr_read_buffers_invisible(struct attribute *attr, + struct idxd_device *idxd) +{ + /* + * Intel IAA does not support Read Buffer allocation control, + * make these attributes invisible. + */ + return (attr == &dev_attr_group_use_token_limit.attr || + attr == &dev_attr_group_use_read_buffer_limit.attr || + attr == &dev_attr_group_tokens_allowed.attr || + attr == &dev_attr_group_read_buffers_allowed.attr || + attr == &dev_attr_group_tokens_reserved.attr || + attr == &dev_attr_group_read_buffers_reserved.attr) && + idxd->data->type == IDXD_TYPE_IAX; +} + static umode_t idxd_group_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { @@ -538,6 +554,9 @@ static umode_t idxd_group_attr_visible(struct kobject *kobj, if (idxd_group_attr_progress_limit_invisible(attr, idxd)) return 0; + if (idxd_group_attr_read_buffers_invisible(attr, idxd)) + return 0; + return attr->mode; } @@ -1233,6 +1252,14 @@ static bool idxd_wq_attr_op_config_invisible(struct attribute *attr, !idxd->hw.wq_cap.op_config; } +static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr, + struct idxd_device *idxd) +{ + /* Intel IAA does not support batch processing, make it invisible */ + return attr == &dev_attr_wq_max_batch_size.attr && + idxd->data->type == IDXD_TYPE_IAX; +} + static umode_t idxd_wq_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { @@ -1243,6 +1270,9 @@ static umode_t idxd_wq_attr_visible(struct kobject *kobj, if (idxd_wq_attr_op_config_invisible(attr, idxd)) return 0; + if (idxd_wq_attr_max_batch_size_invisible(attr, idxd)) + return 0; + return attr->mode; } @@ -1533,6 +1563,43 @@ static ssize_t cmd_status_store(struct device *dev, struct device_attribute *att } static DEVICE_ATTR_RW(cmd_status); +static bool idxd_device_attr_max_batch_size_invisible(struct attribute *attr, + struct idxd_device *idxd) +{ + /* Intel IAA does not support batch processing, make it invisible */ + return attr == &dev_attr_max_batch_size.attr && + idxd->data->type == IDXD_TYPE_IAX; +} + +static bool idxd_device_attr_read_buffers_invisible(struct attribute *attr, + struct idxd_device *idxd) +{ + /* + * Intel IAA does not support Read Buffer allocation control, + * make these attributes invisible. + */ + return (attr == &dev_attr_max_tokens.attr || + attr == &dev_attr_max_read_buffers.attr || + attr == &dev_attr_token_limit.attr || + attr == &dev_attr_read_buffer_limit.attr) && + idxd->data->type == IDXD_TYPE_IAX; +} + +static umode_t idxd_device_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct idxd_device *idxd = confdev_to_idxd(dev); + + if (idxd_device_attr_max_batch_size_invisible(attr, idxd)) + return 0; + + if (idxd_device_attr_read_buffers_invisible(attr, idxd)) + return 0; + + return attr->mode; +} + static struct attribute *idxd_device_attributes[] = { &dev_attr_version.attr, &dev_attr_max_groups.attr, @@ -1560,6 +1627,7 @@ static struct attribute *idxd_device_attributes[] = { static const struct attribute_group idxd_device_attribute_group = { .attrs = idxd_device_attributes, + .is_visible = idxd_device_attr_visible, }; static const struct attribute_group *idxd_attribute_groups[] = { diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index e2070df6cad2..79d244011093 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -33,7 +33,7 @@ MODULE_PARM_DESC(completion_timeout, static int idle_timeout = 2000; module_param(idle_timeout, int, 0644); MODULE_PARM_DESC(idle_timeout, - "set ioat idel timeout [msec] (default 2000 [msec])"); + "set ioat idle timeout [msec] (default 2000 [msec])"); #define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout) #define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout) diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c deleted file mode 100644 index 310b899d581f..000000000000 --- a/drivers/dma/iop-adma.c +++ /dev/null @@ -1,1554 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * offload engine driver for the Intel Xscale series of i/o processors - * Copyright © 2006, Intel Corporation. - */ - -/* - * This driver supports the asynchrounous DMA copy and RAID engines available - * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x) - */ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/delay.h> -#include <linux/dma-mapping.h> -#include <linux/spinlock.h> -#include <linux/interrupt.h> -#include <linux/platform_device.h> -#include <linux/prefetch.h> -#include <linux/memory.h> -#include <linux/ioport.h> -#include <linux/raid/pq.h> -#include <linux/slab.h> - -#include "iop-adma.h" -#include "dmaengine.h" - -#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common) -#define to_iop_adma_device(dev) \ - container_of(dev, struct iop_adma_device, common) -#define tx_to_iop_adma_slot(tx) \ - container_of(tx, struct iop_adma_desc_slot, async_tx) - -/** - * iop_adma_free_slots - flags descriptor slots for reuse - * @slot: Slot to free - * Caller must hold &iop_chan->lock while calling this function - */ -static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) -{ - int stride = slot->slots_per_op; - - while (stride--) { - slot->slots_per_op = 0; - slot = list_entry(slot->slot_node.next, - struct iop_adma_desc_slot, - slot_node); - } -} - -static dma_cookie_t -iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *iop_chan, dma_cookie_t cookie) -{ - struct dma_async_tx_descriptor *tx = &desc->async_tx; - - BUG_ON(tx->cookie < 0); - if (tx->cookie > 0) { - cookie = tx->cookie; - tx->cookie = 0; - - /* call the callback (must not sleep or submit new - * operations to this channel) - */ - dmaengine_desc_get_callback_invoke(tx, NULL); - - dma_descriptor_unmap(tx); - if (desc->group_head) - desc->group_head = NULL; - } - - /* run dependent operations */ - dma_run_dependencies(tx); - - return cookie; -} - -static int -iop_adma_clean_slot(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *iop_chan) -{ - /* the client is allowed to attach dependent operations - * until 'ack' is set - */ - if (!async_tx_test_ack(&desc->async_tx)) - return 0; - - /* leave the last descriptor in the chain - * so we can append to it - */ - if (desc->chain_node.next == &iop_chan->chain) - return 1; - - dev_dbg(iop_chan->device->common.dev, - "\tfree slot: %d slots_per_op: %d\n", - desc->idx, desc->slots_per_op); - - list_del(&desc->chain_node); - iop_adma_free_slots(desc); - - return 0; -} - -static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) -{ - struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL; - dma_cookie_t cookie = 0; - u32 current_desc = iop_chan_get_current_descriptor(iop_chan); - int busy = iop_chan_is_busy(iop_chan); - int seen_current = 0, slot_cnt = 0, slots_per_op = 0; - - dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); - /* free completed slots from the chain starting with - * the oldest descriptor - */ - list_for_each_entry_safe(iter, _iter, &iop_chan->chain, - chain_node) { - pr_debug("\tcookie: %d slot: %d busy: %d " - "this_desc: %pad next_desc: %#llx ack: %d\n", - iter->async_tx.cookie, iter->idx, busy, - &iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter), - async_tx_test_ack(&iter->async_tx)); - prefetch(_iter); - prefetch(&_iter->async_tx); - - /* do not advance past the current descriptor loaded into the - * hardware channel, subsequent descriptors are either in - * process or have not been submitted - */ - if (seen_current) - break; - - /* stop the search if we reach the current descriptor and the - * channel is busy, or if it appears that the current descriptor - * needs to be re-read (i.e. has been appended to) - */ - if (iter->async_tx.phys == current_desc) { - BUG_ON(seen_current++); - if (busy || iop_desc_get_next_desc(iter)) - break; - } - - /* detect the start of a group transaction */ - if (!slot_cnt && !slots_per_op) { - slot_cnt = iter->slot_cnt; - slots_per_op = iter->slots_per_op; - if (slot_cnt <= slots_per_op) { - slot_cnt = 0; - slots_per_op = 0; - } - } - - if (slot_cnt) { - pr_debug("\tgroup++\n"); - if (!grp_start) - grp_start = iter; - slot_cnt -= slots_per_op; - } - - /* all the members of a group are complete */ - if (slots_per_op != 0 && slot_cnt == 0) { - struct iop_adma_desc_slot *grp_iter, *_grp_iter; - int end_of_chain = 0; - pr_debug("\tgroup end\n"); - - /* collect the total results */ - if (grp_start->xor_check_result) { - u32 zero_sum_result = 0; - slot_cnt = grp_start->slot_cnt; - grp_iter = grp_start; - - list_for_each_entry_from(grp_iter, - &iop_chan->chain, chain_node) { - zero_sum_result |= - iop_desc_get_zero_result(grp_iter); - pr_debug("\titer%d result: %d\n", - grp_iter->idx, zero_sum_result); - slot_cnt -= slots_per_op; - if (slot_cnt == 0) - break; - } - pr_debug("\tgrp_start->xor_check_result: %p\n", - grp_start->xor_check_result); - *grp_start->xor_check_result = zero_sum_result; - } - - /* clean up the group */ - slot_cnt = grp_start->slot_cnt; - grp_iter = grp_start; - list_for_each_entry_safe_from(grp_iter, _grp_iter, - &iop_chan->chain, chain_node) { - cookie = iop_adma_run_tx_complete_actions( - grp_iter, iop_chan, cookie); - - slot_cnt -= slots_per_op; - end_of_chain = iop_adma_clean_slot(grp_iter, - iop_chan); - - if (slot_cnt == 0 || end_of_chain) - break; - } - - /* the group should be complete at this point */ - BUG_ON(slot_cnt); - - slots_per_op = 0; - grp_start = NULL; - if (end_of_chain) - break; - else - continue; - } else if (slots_per_op) /* wait for group completion */ - continue; - - /* write back zero sum results (single descriptor case) */ - if (iter->xor_check_result && iter->async_tx.cookie) - *iter->xor_check_result = - iop_desc_get_zero_result(iter); - - cookie = iop_adma_run_tx_complete_actions( - iter, iop_chan, cookie); - - if (iop_adma_clean_slot(iter, iop_chan)) - break; - } - - if (cookie > 0) { - iop_chan->common.completed_cookie = cookie; - pr_debug("\tcompleted cookie %d\n", cookie); - } -} - -static void -iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) -{ - spin_lock_bh(&iop_chan->lock); - __iop_adma_slot_cleanup(iop_chan); - spin_unlock_bh(&iop_chan->lock); -} - -static void iop_adma_tasklet(struct tasklet_struct *t) -{ - struct iop_adma_chan *iop_chan = from_tasklet(iop_chan, t, - irq_tasklet); - - /* lockdep will flag depedency submissions as potentially - * recursive locking, this is not the case as a dependency - * submission will never recurse a channels submit routine. - * There are checks in async_tx.c to prevent this. - */ - spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING); - __iop_adma_slot_cleanup(iop_chan); - spin_unlock(&iop_chan->lock); -} - -static struct iop_adma_desc_slot * -iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots, - int slots_per_op) -{ - struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL; - LIST_HEAD(chain); - int slots_found, retry = 0; - - /* start search from the last allocated descrtiptor - * if a contiguous allocation can not be found start searching - * from the beginning of the list - */ -retry: - slots_found = 0; - if (retry == 0) - iter = iop_chan->last_used; - else - iter = list_entry(&iop_chan->all_slots, - struct iop_adma_desc_slot, - slot_node); - - list_for_each_entry_safe_continue( - iter, _iter, &iop_chan->all_slots, slot_node) { - prefetch(_iter); - prefetch(&_iter->async_tx); - if (iter->slots_per_op) { - /* give up after finding the first busy slot - * on the second pass through the list - */ - if (retry) - break; - - slots_found = 0; - continue; - } - - /* start the allocation if the slot is correctly aligned */ - if (!slots_found++) { - if (iop_desc_is_aligned(iter, slots_per_op)) - alloc_start = iter; - else { - slots_found = 0; - continue; - } - } - - if (slots_found == num_slots) { - struct iop_adma_desc_slot *alloc_tail = NULL; - struct iop_adma_desc_slot *last_used = NULL; - iter = alloc_start; - while (num_slots) { - int i; - dev_dbg(iop_chan->device->common.dev, - "allocated slot: %d " - "(desc %p phys: %#llx) slots_per_op %d\n", - iter->idx, iter->hw_desc, - (u64)iter->async_tx.phys, slots_per_op); - - /* pre-ack all but the last descriptor */ - if (num_slots != slots_per_op) - async_tx_ack(&iter->async_tx); - - list_add_tail(&iter->chain_node, &chain); - alloc_tail = iter; - iter->async_tx.cookie = 0; - iter->slot_cnt = num_slots; - iter->xor_check_result = NULL; - for (i = 0; i < slots_per_op; i++) { - iter->slots_per_op = slots_per_op - i; - last_used = iter; - iter = list_entry(iter->slot_node.next, - struct iop_adma_desc_slot, - slot_node); - } - num_slots -= slots_per_op; - } - alloc_tail->group_head = alloc_start; - alloc_tail->async_tx.cookie = -EBUSY; - list_splice(&chain, &alloc_tail->tx_list); - iop_chan->last_used = last_used; - iop_desc_clear_next_desc(alloc_start); - iop_desc_clear_next_desc(alloc_tail); - return alloc_tail; - } - } - if (!retry++) - goto retry; - - /* perform direct reclaim if the allocation fails */ - __iop_adma_slot_cleanup(iop_chan); - - return NULL; -} - -static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) -{ - dev_dbg(iop_chan->device->common.dev, "pending: %d\n", - iop_chan->pending); - - if (iop_chan->pending >= IOP_ADMA_THRESHOLD) { - iop_chan->pending = 0; - iop_chan_append(iop_chan); - } -} - -static dma_cookie_t -iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); - struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan); - struct iop_adma_desc_slot *grp_start, *old_chain_tail; - int slot_cnt; - dma_cookie_t cookie; - dma_addr_t next_dma; - - grp_start = sw_desc->group_head; - slot_cnt = grp_start->slot_cnt; - - spin_lock_bh(&iop_chan->lock); - cookie = dma_cookie_assign(tx); - - old_chain_tail = list_entry(iop_chan->chain.prev, - struct iop_adma_desc_slot, chain_node); - list_splice_init(&sw_desc->tx_list, - &old_chain_tail->chain_node); - - /* fix up the hardware chain */ - next_dma = grp_start->async_tx.phys; - iop_desc_set_next_desc(old_chain_tail, next_dma); - BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */ - - /* check for pre-chained descriptors */ - iop_paranoia(iop_desc_get_next_desc(sw_desc)); - - /* increment the pending count by the number of slots - * memcpy operations have a 1:1 (slot:operation) relation - * other operations are heavier and will pop the threshold - * more often. - */ - iop_chan->pending += slot_cnt; - iop_adma_check_threshold(iop_chan); - spin_unlock_bh(&iop_chan->lock); - - dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", - __func__, sw_desc->async_tx.cookie, sw_desc->idx); - - return cookie; -} - -static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); -static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); - -/** - * iop_adma_alloc_chan_resources - returns the number of allocated descriptors - * @chan: allocate descriptor resources for this channel - * - * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To - * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be - * greater than 2x the number slots needed to satisfy a device->max_xor - * request. - * */ -static int iop_adma_alloc_chan_resources(struct dma_chan *chan) -{ - char *hw_desc; - dma_addr_t dma_desc; - int idx; - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *slot = NULL; - int init = iop_chan->slots_allocated ? 0 : 1; - struct iop_adma_platform_data *plat_data = - dev_get_platdata(&iop_chan->device->pdev->dev); - int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE; - - /* Allocate descriptor slots */ - do { - idx = iop_chan->slots_allocated; - if (idx == num_descs_in_pool) - break; - - slot = kzalloc(sizeof(*slot), GFP_KERNEL); - if (!slot) { - printk(KERN_INFO "IOP ADMA Channel only initialized" - " %d descriptor slots", idx); - break; - } - hw_desc = (char *) iop_chan->device->dma_desc_pool_virt; - slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; - - dma_async_tx_descriptor_init(&slot->async_tx, chan); - slot->async_tx.tx_submit = iop_adma_tx_submit; - INIT_LIST_HEAD(&slot->tx_list); - INIT_LIST_HEAD(&slot->chain_node); - INIT_LIST_HEAD(&slot->slot_node); - dma_desc = iop_chan->device->dma_desc_pool; - slot->async_tx.phys = dma_desc + idx * IOP_ADMA_SLOT_SIZE; - slot->idx = idx; - - spin_lock_bh(&iop_chan->lock); - iop_chan->slots_allocated++; - list_add_tail(&slot->slot_node, &iop_chan->all_slots); - spin_unlock_bh(&iop_chan->lock); - } while (iop_chan->slots_allocated < num_descs_in_pool); - - if (idx && !iop_chan->last_used) - iop_chan->last_used = list_entry(iop_chan->all_slots.next, - struct iop_adma_desc_slot, - slot_node); - - dev_dbg(iop_chan->device->common.dev, - "allocated %d descriptor slots last_used: %p\n", - iop_chan->slots_allocated, iop_chan->last_used); - - /* initialize the channel and the chain with a null operation */ - if (init) { - if (dma_has_cap(DMA_MEMCPY, - iop_chan->device->common.cap_mask)) - iop_chan_start_null_memcpy(iop_chan); - else if (dma_has_cap(DMA_XOR, - iop_chan->device->common.cap_mask)) - iop_chan_start_null_xor(iop_chan); - else - BUG(); - } - - return (idx > 0) ? idx : -ENOMEM; -} - -static struct dma_async_tx_descriptor * -iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *sw_desc, *grp_start; - int slot_cnt, slots_per_op; - - dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - grp_start = sw_desc->group_head; - iop_desc_init_interrupt(grp_start, iop_chan); - sw_desc->async_tx.flags = flags; - } - spin_unlock_bh(&iop_chan->lock); - - return sw_desc ? &sw_desc->async_tx : NULL; -} - -static struct dma_async_tx_descriptor * -iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, - dma_addr_t dma_src, size_t len, unsigned long flags) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *sw_desc, *grp_start; - int slot_cnt, slots_per_op; - - if (unlikely(!len)) - return NULL; - BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); - - dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n", - __func__, len); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - grp_start = sw_desc->group_head; - iop_desc_init_memcpy(grp_start, flags); - iop_desc_set_byte_count(grp_start, iop_chan, len); - iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); - iop_desc_set_memcpy_src_addr(grp_start, dma_src); - sw_desc->async_tx.flags = flags; - } - spin_unlock_bh(&iop_chan->lock); - - return sw_desc ? &sw_desc->async_tx : NULL; -} - -static struct dma_async_tx_descriptor * -iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, - dma_addr_t *dma_src, unsigned int src_cnt, size_t len, - unsigned long flags) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *sw_desc, *grp_start; - int slot_cnt, slots_per_op; - - if (unlikely(!len)) - return NULL; - BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); - - dev_dbg(iop_chan->device->common.dev, - "%s src_cnt: %d len: %zu flags: %lx\n", - __func__, src_cnt, len, flags); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - grp_start = sw_desc->group_head; - iop_desc_init_xor(grp_start, src_cnt, flags); - iop_desc_set_byte_count(grp_start, iop_chan, len); - iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); - sw_desc->async_tx.flags = flags; - while (src_cnt--) - iop_desc_set_xor_src_addr(grp_start, src_cnt, - dma_src[src_cnt]); - } - spin_unlock_bh(&iop_chan->lock); - - return sw_desc ? &sw_desc->async_tx : NULL; -} - -static struct dma_async_tx_descriptor * -iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, - unsigned int src_cnt, size_t len, u32 *result, - unsigned long flags) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *sw_desc, *grp_start; - int slot_cnt, slots_per_op; - - if (unlikely(!len)) - return NULL; - - dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n", - __func__, src_cnt, len); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - grp_start = sw_desc->group_head; - iop_desc_init_zero_sum(grp_start, src_cnt, flags); - iop_desc_set_zero_sum_byte_count(grp_start, len); - grp_start->xor_check_result = result; - pr_debug("\t%s: grp_start->xor_check_result: %p\n", - __func__, grp_start->xor_check_result); - sw_desc->async_tx.flags = flags; - while (src_cnt--) - iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, - dma_src[src_cnt]); - } - spin_unlock_bh(&iop_chan->lock); - - return sw_desc ? &sw_desc->async_tx : NULL; -} - -static struct dma_async_tx_descriptor * -iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, - unsigned int src_cnt, const unsigned char *scf, size_t len, - unsigned long flags) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *sw_desc, *g; - int slot_cnt, slots_per_op; - int continue_srcs; - - if (unlikely(!len)) - return NULL; - BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); - - dev_dbg(iop_chan->device->common.dev, - "%s src_cnt: %d len: %zu flags: %lx\n", - __func__, src_cnt, len, flags); - - if (dmaf_p_disabled_continue(flags)) - continue_srcs = 1+src_cnt; - else if (dmaf_continue(flags)) - continue_srcs = 3+src_cnt; - else - continue_srcs = 0+src_cnt; - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - int i; - - g = sw_desc->group_head; - iop_desc_set_byte_count(g, iop_chan, len); - - /* even if P is disabled its destination address (bits - * [3:0]) must match Q. It is ok if P points to an - * invalid address, it won't be written. - */ - if (flags & DMA_PREP_PQ_DISABLE_P) - dst[0] = dst[1] & 0x7; - - iop_desc_set_pq_addr(g, dst); - sw_desc->async_tx.flags = flags; - for (i = 0; i < src_cnt; i++) - iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); - - /* if we are continuing a previous operation factor in - * the old p and q values, see the comment for dma_maxpq - * in include/linux/dmaengine.h - */ - if (dmaf_p_disabled_continue(flags)) - iop_desc_set_pq_src_addr(g, i++, dst[1], 1); - else if (dmaf_continue(flags)) { - iop_desc_set_pq_src_addr(g, i++, dst[0], 0); - iop_desc_set_pq_src_addr(g, i++, dst[1], 1); - iop_desc_set_pq_src_addr(g, i++, dst[1], 0); - } - iop_desc_init_pq(g, i, flags); - } - spin_unlock_bh(&iop_chan->lock); - - return sw_desc ? &sw_desc->async_tx : NULL; -} - -static struct dma_async_tx_descriptor * -iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, - unsigned int src_cnt, const unsigned char *scf, - size_t len, enum sum_check_flags *pqres, - unsigned long flags) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *sw_desc, *g; - int slot_cnt, slots_per_op; - - if (unlikely(!len)) - return NULL; - BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); - - dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n", - __func__, src_cnt, len); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - /* for validate operations p and q are tagged onto the - * end of the source list - */ - int pq_idx = src_cnt; - - g = sw_desc->group_head; - iop_desc_init_pq_zero_sum(g, src_cnt+2, flags); - iop_desc_set_pq_zero_sum_byte_count(g, len); - g->pq_check_result = pqres; - pr_debug("\t%s: g->pq_check_result: %p\n", - __func__, g->pq_check_result); - sw_desc->async_tx.flags = flags; - while (src_cnt--) - iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, - src[src_cnt], - scf[src_cnt]); - iop_desc_set_pq_zero_sum_addr(g, pq_idx, src); - } - spin_unlock_bh(&iop_chan->lock); - - return sw_desc ? &sw_desc->async_tx : NULL; -} - -static void iop_adma_free_chan_resources(struct dma_chan *chan) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *iter, *_iter; - int in_use_descs = 0; - - iop_adma_slot_cleanup(iop_chan); - - spin_lock_bh(&iop_chan->lock); - list_for_each_entry_safe(iter, _iter, &iop_chan->chain, - chain_node) { - in_use_descs++; - list_del(&iter->chain_node); - } - list_for_each_entry_safe_reverse( - iter, _iter, &iop_chan->all_slots, slot_node) { - list_del(&iter->slot_node); - kfree(iter); - iop_chan->slots_allocated--; - } - iop_chan->last_used = NULL; - - dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", - __func__, iop_chan->slots_allocated); - spin_unlock_bh(&iop_chan->lock); - - /* one is ok since we left it on there on purpose */ - if (in_use_descs > 1) - printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n", - in_use_descs - 1); -} - -/** - * iop_adma_status - poll the status of an ADMA transaction - * @chan: ADMA channel handle - * @cookie: ADMA transaction identifier - * @txstate: a holder for the current state of the channel or NULL - */ -static enum dma_status iop_adma_status(struct dma_chan *chan, - dma_cookie_t cookie, - struct dma_tx_state *txstate) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - int ret; - - ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE) - return ret; - - iop_adma_slot_cleanup(iop_chan); - - return dma_cookie_status(chan, cookie, txstate); -} - -static irqreturn_t iop_adma_eot_handler(int irq, void *data) -{ - struct iop_adma_chan *chan = data; - - dev_dbg(chan->device->common.dev, "%s\n", __func__); - - tasklet_schedule(&chan->irq_tasklet); - - iop_adma_device_clear_eot_status(chan); - - return IRQ_HANDLED; -} - -static irqreturn_t iop_adma_eoc_handler(int irq, void *data) -{ - struct iop_adma_chan *chan = data; - - dev_dbg(chan->device->common.dev, "%s\n", __func__); - - tasklet_schedule(&chan->irq_tasklet); - - iop_adma_device_clear_eoc_status(chan); - - return IRQ_HANDLED; -} - -static irqreturn_t iop_adma_err_handler(int irq, void *data) -{ - struct iop_adma_chan *chan = data; - unsigned long status = iop_chan_get_status(chan); - - dev_err(chan->device->common.dev, - "error ( %s%s%s%s%s%s%s)\n", - iop_is_err_int_parity(status, chan) ? "int_parity " : "", - iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "", - iop_is_err_int_tabort(status, chan) ? "int_tabort " : "", - iop_is_err_int_mabort(status, chan) ? "int_mabort " : "", - iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "", - iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "", - iop_is_err_split_tx(status, chan) ? "split_tx " : ""); - - iop_adma_device_clear_err_status(chan); - - BUG(); - - return IRQ_HANDLED; -} - -static void iop_adma_issue_pending(struct dma_chan *chan) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - - if (iop_chan->pending) { - iop_chan->pending = 0; - iop_chan_append(iop_chan); - } -} - -/* - * Perform a transaction to verify the HW works. - */ -#define IOP_ADMA_TEST_SIZE 2000 - -static int iop_adma_memcpy_self_test(struct iop_adma_device *device) -{ - int i; - void *src, *dest; - dma_addr_t src_dma, dest_dma; - struct dma_chan *dma_chan; - dma_cookie_t cookie; - struct dma_async_tx_descriptor *tx; - int err = 0; - struct iop_adma_chan *iop_chan; - - dev_dbg(device->common.dev, "%s\n", __func__); - - src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL); - if (!src) - return -ENOMEM; - dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL); - if (!dest) { - kfree(src); - return -ENOMEM; - } - - /* Fill in src buffer */ - for (i = 0; i < IOP_ADMA_TEST_SIZE; i++) - ((u8 *) src)[i] = (u8)i; - - /* Start copy, using first DMA channel */ - dma_chan = container_of(device->common.channels.next, - struct dma_chan, - device_node); - if (iop_adma_alloc_chan_resources(dma_chan) < 1) { - err = -ENODEV; - goto out; - } - - dest_dma = dma_map_single(dma_chan->device->dev, dest, - IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); - src_dma = dma_map_single(dma_chan->device->dev, src, - IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); - tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, - IOP_ADMA_TEST_SIZE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(1); - - if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_COMPLETE) { - dev_err(dma_chan->device->dev, - "Self-test copy timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - iop_chan = to_iop_adma_chan(dma_chan); - dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, - IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); - if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) { - dev_err(dma_chan->device->dev, - "Self-test copy failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; - } - -free_resources: - iop_adma_free_chan_resources(dma_chan); -out: - kfree(src); - kfree(dest); - return err; -} - -#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */ -static int -iop_adma_xor_val_self_test(struct iop_adma_device *device) -{ - int i, src_idx; - struct page *dest; - struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; - struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; - dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; - dma_addr_t dest_dma; - struct dma_async_tx_descriptor *tx; - struct dma_chan *dma_chan; - dma_cookie_t cookie; - u8 cmp_byte = 0; - u32 cmp_word; - u32 zero_sum_result; - int err = 0; - struct iop_adma_chan *iop_chan; - - dev_dbg(device->common.dev, "%s\n", __func__); - - for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { - xor_srcs[src_idx] = alloc_page(GFP_KERNEL); - if (!xor_srcs[src_idx]) { - while (src_idx--) - __free_page(xor_srcs[src_idx]); - return -ENOMEM; - } - } - - dest = alloc_page(GFP_KERNEL); - if (!dest) { - while (src_idx--) - __free_page(xor_srcs[src_idx]); - return -ENOMEM; - } - - /* Fill in src buffers */ - for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { - u8 *ptr = page_address(xor_srcs[src_idx]); - for (i = 0; i < PAGE_SIZE; i++) - ptr[i] = (1 << src_idx); - } - - for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) - cmp_byte ^= (u8) (1 << src_idx); - - cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | - (cmp_byte << 8) | cmp_byte; - - memset(page_address(dest), 0, PAGE_SIZE); - - dma_chan = container_of(device->common.channels.next, - struct dma_chan, - device_node); - if (iop_adma_alloc_chan_resources(dma_chan) < 1) { - err = -ENODEV; - goto out; - } - - /* test xor */ - dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, - PAGE_SIZE, DMA_FROM_DEVICE); - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) - dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], - 0, PAGE_SIZE, DMA_TO_DEVICE); - tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, - IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(8); - - if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_COMPLETE) { - dev_err(dma_chan->device->dev, - "Self-test xor timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - iop_chan = to_iop_adma_chan(dma_chan); - dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, - PAGE_SIZE, DMA_FROM_DEVICE); - for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { - u32 *ptr = page_address(dest); - if (ptr[i] != cmp_word) { - dev_err(dma_chan->device->dev, - "Self-test xor failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; - } - } - dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma, - PAGE_SIZE, DMA_TO_DEVICE); - - /* skip zero sum if the capability is not present */ - if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) - goto free_resources; - - /* zero sum the sources with the destintation page */ - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) - zero_sum_srcs[i] = xor_srcs[i]; - zero_sum_srcs[i] = dest; - - zero_sum_result = 1; - - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) - dma_srcs[i] = dma_map_page(dma_chan->device->dev, - zero_sum_srcs[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs, - IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, - &zero_sum_result, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(8); - - if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { - dev_err(dma_chan->device->dev, - "Self-test zero sum timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - if (zero_sum_result != 0) { - dev_err(dma_chan->device->dev, - "Self-test zero sum failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - /* test for non-zero parity sum */ - zero_sum_result = 0; - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) - dma_srcs[i] = dma_map_page(dma_chan->device->dev, - zero_sum_srcs[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs, - IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, - &zero_sum_result, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(8); - - if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { - dev_err(dma_chan->device->dev, - "Self-test non-zero sum timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - if (zero_sum_result != 1) { - dev_err(dma_chan->device->dev, - "Self-test non-zero sum failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; - } - -free_resources: - iop_adma_free_chan_resources(dma_chan); -out: - src_idx = IOP_ADMA_NUM_SRC_TEST; - while (src_idx--) - __free_page(xor_srcs[src_idx]); - __free_page(dest); - return err; -} - -#ifdef CONFIG_RAID6_PQ -static int -iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) -{ - /* combined sources, software pq results, and extra hw pq results */ - struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2]; - /* ptr to the extra hw pq buffers defined above */ - struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; - /* address conversion buffers (dma_map / page_address) */ - void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2]; - dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2]; - dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST]; - - int i; - struct dma_async_tx_descriptor *tx; - struct dma_chan *dma_chan; - dma_cookie_t cookie; - u32 zero_sum_result; - int err = 0; - struct device *dev; - - dev_dbg(device->common.dev, "%s\n", __func__); - - for (i = 0; i < ARRAY_SIZE(pq); i++) { - pq[i] = alloc_page(GFP_KERNEL); - if (!pq[i]) { - while (i--) - __free_page(pq[i]); - return -ENOMEM; - } - } - - /* Fill in src buffers */ - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) { - pq_sw[i] = page_address(pq[i]); - memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE); - } - pq_sw[i] = page_address(pq[i]); - pq_sw[i+1] = page_address(pq[i+1]); - - dma_chan = container_of(device->common.channels.next, - struct dma_chan, - device_node); - if (iop_adma_alloc_chan_resources(dma_chan) < 1) { - err = -ENODEV; - goto out; - } - - dev = dma_chan->device->dev; - - /* initialize the dests */ - memset(page_address(pq_hw[0]), 0 , PAGE_SIZE); - memset(page_address(pq_hw[1]), 0 , PAGE_SIZE); - - /* test pq */ - pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE); - pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE); - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) - pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - - tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src, - IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp, - PAGE_SIZE, - DMA_PREP_INTERRUPT | - DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(8); - - if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_COMPLETE) { - dev_err(dev, "Self-test pq timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw); - - if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST], - page_address(pq_hw[0]), PAGE_SIZE) != 0) { - dev_err(dev, "Self-test p failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; - } - if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1], - page_address(pq_hw[1]), PAGE_SIZE) != 0) { - dev_err(dev, "Self-test q failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - /* test correct zero sum using the software generated pq values */ - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++) - pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - - zero_sum_result = ~0; - tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], - pq_src, IOP_ADMA_NUM_SRC_TEST, - raid6_gfexp, PAGE_SIZE, &zero_sum_result, - DMA_PREP_INTERRUPT|DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(8); - - if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_COMPLETE) { - dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - if (zero_sum_result != 0) { - dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n", - zero_sum_result); - err = -ENODEV; - goto free_resources; - } - - /* test incorrect zero sum */ - i = IOP_ADMA_NUM_SRC_TEST; - memset(pq_sw[i] + 100, 0, 100); - memset(pq_sw[i+1] + 200, 0, 200); - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++) - pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - - zero_sum_result = 0; - tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], - pq_src, IOP_ADMA_NUM_SRC_TEST, - raid6_gfexp, PAGE_SIZE, &zero_sum_result, - DMA_PREP_INTERRUPT|DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(8); - - if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_COMPLETE) { - dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) { - dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n", - zero_sum_result); - err = -ENODEV; - goto free_resources; - } - -free_resources: - iop_adma_free_chan_resources(dma_chan); -out: - i = ARRAY_SIZE(pq); - while (i--) - __free_page(pq[i]); - return err; -} -#endif - -static int iop_adma_remove(struct platform_device *dev) -{ - struct iop_adma_device *device = platform_get_drvdata(dev); - struct dma_chan *chan, *_chan; - struct iop_adma_chan *iop_chan; - struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev); - - dma_async_device_unregister(&device->common); - - dma_free_coherent(&dev->dev, plat_data->pool_size, - device->dma_desc_pool_virt, device->dma_desc_pool); - - list_for_each_entry_safe(chan, _chan, &device->common.channels, - device_node) { - iop_chan = to_iop_adma_chan(chan); - list_del(&chan->device_node); - kfree(iop_chan); - } - kfree(device); - - return 0; -} - -static int iop_adma_probe(struct platform_device *pdev) -{ - struct resource *res; - int ret = 0, i; - struct iop_adma_device *adev; - struct iop_adma_chan *iop_chan; - struct dma_device *dma_dev; - struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - - if (!devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), pdev->name)) - return -EBUSY; - - adev = kzalloc(sizeof(*adev), GFP_KERNEL); - if (!adev) - return -ENOMEM; - dma_dev = &adev->common; - - /* allocate coherent memory for hardware descriptors - * note: writecombine gives slightly better performance, but - * requires that we explicitly flush the writes - */ - adev->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev, - plat_data->pool_size, - &adev->dma_desc_pool, - GFP_KERNEL); - if (!adev->dma_desc_pool_virt) { - ret = -ENOMEM; - goto err_free_adev; - } - - dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %pad\n", - __func__, adev->dma_desc_pool_virt, &adev->dma_desc_pool); - - adev->id = plat_data->hw_id; - - /* discover transaction capabilites from the platform data */ - dma_dev->cap_mask = plat_data->cap_mask; - - adev->pdev = pdev; - platform_set_drvdata(pdev, adev); - - INIT_LIST_HEAD(&dma_dev->channels); - - /* set base routines */ - dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources; - dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; - dma_dev->device_tx_status = iop_adma_status; - dma_dev->device_issue_pending = iop_adma_issue_pending; - dma_dev->dev = &pdev->dev; - - /* set prep routines based on capability */ - if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) - dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy; - if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { - dma_dev->max_xor = iop_adma_get_max_xor(); - dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; - } - if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask)) - dma_dev->device_prep_dma_xor_val = - iop_adma_prep_dma_xor_val; - if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { - dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0); - dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq; - } - if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) - dma_dev->device_prep_dma_pq_val = - iop_adma_prep_dma_pq_val; - if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) - dma_dev->device_prep_dma_interrupt = - iop_adma_prep_dma_interrupt; - - iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL); - if (!iop_chan) { - ret = -ENOMEM; - goto err_free_dma; - } - iop_chan->device = adev; - - iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); - if (!iop_chan->mmr_base) { - ret = -ENOMEM; - goto err_free_iop_chan; - } - tasklet_setup(&iop_chan->irq_tasklet, iop_adma_tasklet); - - /* clear errors before enabling interrupts */ - iop_adma_device_clear_err_status(iop_chan); - - for (i = 0; i < 3; i++) { - static const irq_handler_t handler[] = { - iop_adma_eot_handler, - iop_adma_eoc_handler, - iop_adma_err_handler - }; - int irq = platform_get_irq(pdev, i); - if (irq < 0) { - ret = -ENXIO; - goto err_free_iop_chan; - } else { - ret = devm_request_irq(&pdev->dev, irq, - handler[i], 0, pdev->name, iop_chan); - if (ret) - goto err_free_iop_chan; - } - } - - spin_lock_init(&iop_chan->lock); - INIT_LIST_HEAD(&iop_chan->chain); - INIT_LIST_HEAD(&iop_chan->all_slots); - iop_chan->common.device = dma_dev; - dma_cookie_init(&iop_chan->common); - list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); - - if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { - ret = iop_adma_memcpy_self_test(adev); - dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); - if (ret) - goto err_free_iop_chan; - } - - if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { - ret = iop_adma_xor_val_self_test(adev); - dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); - if (ret) - goto err_free_iop_chan; - } - - if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && - dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { - #ifdef CONFIG_RAID6_PQ - ret = iop_adma_pq_zero_sum_self_test(adev); - dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); - #else - /* can not test raid6, so do not publish capability */ - dma_cap_clear(DMA_PQ, dma_dev->cap_mask); - dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask); - ret = 0; - #endif - if (ret) - goto err_free_iop_chan; - } - - dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s)\n", - dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", - dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", - dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", - dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", - dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", - dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); - - dma_async_device_register(dma_dev); - goto out; - - err_free_iop_chan: - kfree(iop_chan); - err_free_dma: - dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, - adev->dma_desc_pool_virt, adev->dma_desc_pool); - err_free_adev: - kfree(adev); - out: - return ret; -} - -static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) -{ - struct iop_adma_desc_slot *sw_desc, *grp_start; - dma_cookie_t cookie; - int slot_cnt, slots_per_op; - - dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - grp_start = sw_desc->group_head; - - list_splice_init(&sw_desc->tx_list, &iop_chan->chain); - async_tx_ack(&sw_desc->async_tx); - iop_desc_init_memcpy(grp_start, 0); - iop_desc_set_byte_count(grp_start, iop_chan, 0); - iop_desc_set_dest_addr(grp_start, iop_chan, 0); - iop_desc_set_memcpy_src_addr(grp_start, 0); - - cookie = dma_cookie_assign(&sw_desc->async_tx); - - /* initialize the completed cookie to be less than - * the most recently used cookie - */ - iop_chan->common.completed_cookie = cookie - 1; - - /* channel should not be busy */ - BUG_ON(iop_chan_is_busy(iop_chan)); - - /* clear any prior error-status bits */ - iop_adma_device_clear_err_status(iop_chan); - - /* disable operation */ - iop_chan_disable(iop_chan); - - /* set the descriptor address */ - iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys); - - /* 1/ don't add pre-chained descriptors - * 2/ dummy read to flush next_desc write - */ - BUG_ON(iop_desc_get_next_desc(sw_desc)); - - /* run the descriptor */ - iop_chan_enable(iop_chan); - } else - dev_err(iop_chan->device->common.dev, - "failed to allocate null descriptor\n"); - spin_unlock_bh(&iop_chan->lock); -} - -static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) -{ - struct iop_adma_desc_slot *sw_desc, *grp_start; - dma_cookie_t cookie; - int slot_cnt, slots_per_op; - - dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - grp_start = sw_desc->group_head; - list_splice_init(&sw_desc->tx_list, &iop_chan->chain); - async_tx_ack(&sw_desc->async_tx); - iop_desc_init_null_xor(grp_start, 2, 0); - iop_desc_set_byte_count(grp_start, iop_chan, 0); - iop_desc_set_dest_addr(grp_start, iop_chan, 0); - iop_desc_set_xor_src_addr(grp_start, 0, 0); - iop_desc_set_xor_src_addr(grp_start, 1, 0); - - cookie = dma_cookie_assign(&sw_desc->async_tx); - - /* initialize the completed cookie to be less than - * the most recently used cookie - */ - iop_chan->common.completed_cookie = cookie - 1; - - /* channel should not be busy */ - BUG_ON(iop_chan_is_busy(iop_chan)); - - /* clear any prior error-status bits */ - iop_adma_device_clear_err_status(iop_chan); - - /* disable operation */ - iop_chan_disable(iop_chan); - - /* set the descriptor address */ - iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys); - - /* 1/ don't add pre-chained descriptors - * 2/ dummy read to flush next_desc write - */ - BUG_ON(iop_desc_get_next_desc(sw_desc)); - - /* run the descriptor */ - iop_chan_enable(iop_chan); - } else - dev_err(iop_chan->device->common.dev, - "failed to allocate null descriptor\n"); - spin_unlock_bh(&iop_chan->lock); -} - -static struct platform_driver iop_adma_driver = { - .probe = iop_adma_probe, - .remove = iop_adma_remove, - .driver = { - .name = "iop-adma", - }, -}; - -module_platform_driver(iop_adma_driver); - -MODULE_AUTHOR("Intel Corporation"); -MODULE_DESCRIPTION("IOP ADMA Engine Driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:iop-adma"); diff --git a/drivers/dma/iop-adma.h b/drivers/dma/iop-adma.h deleted file mode 100644 index d44eabb6f5eb..000000000000 --- a/drivers/dma/iop-adma.h +++ /dev/null @@ -1,914 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright © 2006, Intel Corporation. - */ -#ifndef _ADMA_H -#define _ADMA_H -#include <linux/types.h> -#include <linux/io.h> -#include <linux/platform_data/dma-iop32x.h> - -/* Memory copy units */ -#define DMA_CCR(chan) (chan->mmr_base + 0x0) -#define DMA_CSR(chan) (chan->mmr_base + 0x4) -#define DMA_DAR(chan) (chan->mmr_base + 0xc) -#define DMA_NDAR(chan) (chan->mmr_base + 0x10) -#define DMA_PADR(chan) (chan->mmr_base + 0x14) -#define DMA_PUADR(chan) (chan->mmr_base + 0x18) -#define DMA_LADR(chan) (chan->mmr_base + 0x1c) -#define DMA_BCR(chan) (chan->mmr_base + 0x20) -#define DMA_DCR(chan) (chan->mmr_base + 0x24) - -/* Application accelerator unit */ -#define AAU_ACR(chan) (chan->mmr_base + 0x0) -#define AAU_ASR(chan) (chan->mmr_base + 0x4) -#define AAU_ADAR(chan) (chan->mmr_base + 0x8) -#define AAU_ANDAR(chan) (chan->mmr_base + 0xc) -#define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2))) -#define AAU_DAR(chan) (chan->mmr_base + 0x20) -#define AAU_ABCR(chan) (chan->mmr_base + 0x24) -#define AAU_ADCR(chan) (chan->mmr_base + 0x28) -#define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2))) -#define AAU_EDCR0_IDX 8 -#define AAU_EDCR1_IDX 17 -#define AAU_EDCR2_IDX 26 - -struct iop3xx_aau_desc_ctrl { - unsigned int int_en:1; - unsigned int blk1_cmd_ctrl:3; - unsigned int blk2_cmd_ctrl:3; - unsigned int blk3_cmd_ctrl:3; - unsigned int blk4_cmd_ctrl:3; - unsigned int blk5_cmd_ctrl:3; - unsigned int blk6_cmd_ctrl:3; - unsigned int blk7_cmd_ctrl:3; - unsigned int blk8_cmd_ctrl:3; - unsigned int blk_ctrl:2; - unsigned int dual_xor_en:1; - unsigned int tx_complete:1; - unsigned int zero_result_err:1; - unsigned int zero_result_en:1; - unsigned int dest_write_en:1; -}; - -struct iop3xx_aau_e_desc_ctrl { - unsigned int reserved:1; - unsigned int blk1_cmd_ctrl:3; - unsigned int blk2_cmd_ctrl:3; - unsigned int blk3_cmd_ctrl:3; - unsigned int blk4_cmd_ctrl:3; - unsigned int blk5_cmd_ctrl:3; - unsigned int blk6_cmd_ctrl:3; - unsigned int blk7_cmd_ctrl:3; - unsigned int blk8_cmd_ctrl:3; - unsigned int reserved2:7; -}; - -struct iop3xx_dma_desc_ctrl { - unsigned int pci_transaction:4; - unsigned int int_en:1; - unsigned int dac_cycle_en:1; - unsigned int mem_to_mem_en:1; - unsigned int crc_data_tx_en:1; - unsigned int crc_gen_en:1; - unsigned int crc_seed_dis:1; - unsigned int reserved:21; - unsigned int crc_tx_complete:1; -}; - -struct iop3xx_desc_dma { - u32 next_desc; - union { - u32 pci_src_addr; - u32 pci_dest_addr; - u32 src_addr; - }; - union { - u32 upper_pci_src_addr; - u32 upper_pci_dest_addr; - }; - union { - u32 local_pci_src_addr; - u32 local_pci_dest_addr; - u32 dest_addr; - }; - u32 byte_count; - union { - u32 desc_ctrl; - struct iop3xx_dma_desc_ctrl desc_ctrl_field; - }; - u32 crc_addr; -}; - -struct iop3xx_desc_aau { - u32 next_desc; - u32 src[4]; - u32 dest_addr; - u32 byte_count; - union { - u32 desc_ctrl; - struct iop3xx_aau_desc_ctrl desc_ctrl_field; - }; - union { - u32 src_addr; - u32 e_desc_ctrl; - struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field; - } src_edc[31]; -}; - -struct iop3xx_aau_gfmr { - unsigned int gfmr1:8; - unsigned int gfmr2:8; - unsigned int gfmr3:8; - unsigned int gfmr4:8; -}; - -struct iop3xx_desc_pq_xor { - u32 next_desc; - u32 src[3]; - union { - u32 data_mult1; - struct iop3xx_aau_gfmr data_mult1_field; - }; - u32 dest_addr; - u32 byte_count; - union { - u32 desc_ctrl; - struct iop3xx_aau_desc_ctrl desc_ctrl_field; - }; - union { - u32 src_addr; - u32 e_desc_ctrl; - struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field; - u32 data_multiplier; - struct iop3xx_aau_gfmr data_mult_field; - u32 reserved; - } src_edc_gfmr[19]; -}; - -struct iop3xx_desc_dual_xor { - u32 next_desc; - u32 src0_addr; - u32 src1_addr; - u32 h_src_addr; - u32 d_src_addr; - u32 h_dest_addr; - u32 byte_count; - union { - u32 desc_ctrl; - struct iop3xx_aau_desc_ctrl desc_ctrl_field; - }; - u32 d_dest_addr; -}; - -union iop3xx_desc { - struct iop3xx_desc_aau *aau; - struct iop3xx_desc_dma *dma; - struct iop3xx_desc_pq_xor *pq_xor; - struct iop3xx_desc_dual_xor *dual_xor; - void *ptr; -}; - -/* No support for p+q operations */ -static inline int -iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op) -{ - BUG(); - return 0; -} - -static inline void -iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt, - unsigned long flags) -{ - BUG(); -} - -static inline void -iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr) -{ - BUG(); -} - -static inline void -iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx, - dma_addr_t addr, unsigned char coef) -{ - BUG(); -} - -static inline int -iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op) -{ - BUG(); - return 0; -} - -static inline void -iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, - unsigned long flags) -{ - BUG(); -} - -static inline void -iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) -{ - BUG(); -} - -#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr - -static inline void -iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx, - dma_addr_t *src) -{ - BUG(); -} - -static inline int iop_adma_get_max_xor(void) -{ - return 32; -} - -static inline int iop_adma_get_max_pq(void) -{ - BUG(); - return 0; -} - -static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan) -{ - int id = chan->device->id; - - switch (id) { - case DMA0_ID: - case DMA1_ID: - return __raw_readl(DMA_DAR(chan)); - case AAU_ID: - return __raw_readl(AAU_ADAR(chan)); - default: - BUG(); - } - return 0; -} - -static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan, - u32 next_desc_addr) -{ - int id = chan->device->id; - - switch (id) { - case DMA0_ID: - case DMA1_ID: - __raw_writel(next_desc_addr, DMA_NDAR(chan)); - break; - case AAU_ID: - __raw_writel(next_desc_addr, AAU_ANDAR(chan)); - break; - } - -} - -#define IOP_ADMA_STATUS_BUSY (1 << 10) -#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024) -#define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024) -#define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024) - -static inline int iop_chan_is_busy(struct iop_adma_chan *chan) -{ - u32 status = __raw_readl(DMA_CSR(chan)); - return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0; -} - -static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc, - int num_slots) -{ - /* num_slots will only ever be 1, 2, 4, or 8 */ - return (desc->idx & (num_slots - 1)) ? 0 : 1; -} - -/* to do: support large (i.e. > hw max) buffer sizes */ -static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op) -{ - *slots_per_op = 1; - return 1; -} - -/* to do: support large (i.e. > hw max) buffer sizes */ -static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op) -{ - *slots_per_op = 1; - return 1; -} - -static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt, - int *slots_per_op) -{ - static const char slot_count_table[] = { - 1, 1, 1, 1, /* 01 - 04 */ - 2, 2, 2, 2, /* 05 - 08 */ - 4, 4, 4, 4, /* 09 - 12 */ - 4, 4, 4, 4, /* 13 - 16 */ - 8, 8, 8, 8, /* 17 - 20 */ - 8, 8, 8, 8, /* 21 - 24 */ - 8, 8, 8, 8, /* 25 - 28 */ - 8, 8, 8, 8, /* 29 - 32 */ - }; - *slots_per_op = slot_count_table[src_cnt - 1]; - return *slots_per_op; -} - -static inline int -iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan) -{ - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - return iop_chan_memcpy_slot_count(0, slots_per_op); - case AAU_ID: - return iop3xx_aau_xor_slot_count(0, 2, slots_per_op); - default: - BUG(); - } - return 0; -} - -static inline int iop_chan_xor_slot_count(size_t len, int src_cnt, - int *slots_per_op) -{ - int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op); - - if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT) - return slot_cnt; - - len -= IOP_ADMA_XOR_MAX_BYTE_COUNT; - while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) { - len -= IOP_ADMA_XOR_MAX_BYTE_COUNT; - slot_cnt += *slots_per_op; - } - - slot_cnt += *slots_per_op; - - return slot_cnt; -} - -/* zero sum on iop3xx is limited to 1k at a time so it requires multiple - * descriptors - */ -static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, - int *slots_per_op) -{ - int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op); - - if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) - return slot_cnt; - - len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; - while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) { - len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; - slot_cnt += *slots_per_op; - } - - slot_cnt += *slots_per_op; - - return slot_cnt; -} - -static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan) -{ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - return hw_desc.dma->byte_count; - case AAU_ID: - return hw_desc.aau->byte_count; - default: - BUG(); - } - return 0; -} - -/* translate the src_idx to a descriptor word index */ -static inline int __desc_idx(int src_idx) -{ - static const int desc_idx_table[] = { 0, 0, 0, 0, - 0, 1, 2, 3, - 5, 6, 7, 8, - 9, 10, 11, 12, - 14, 15, 16, 17, - 18, 19, 20, 21, - 23, 24, 25, 26, - 27, 28, 29, 30, - }; - - return desc_idx_table[src_idx]; -} - -static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan, - int src_idx) -{ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - return hw_desc.dma->src_addr; - case AAU_ID: - break; - default: - BUG(); - } - - if (src_idx < 4) - return hw_desc.aau->src[src_idx]; - else - return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr; -} - -static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc, - int src_idx, dma_addr_t addr) -{ - if (src_idx < 4) - hw_desc->src[src_idx] = addr; - else - hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr; -} - -static inline void -iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags) -{ - struct iop3xx_desc_dma *hw_desc = desc->hw_desc; - union { - u32 value; - struct iop3xx_dma_desc_ctrl field; - } u_desc_ctrl; - - u_desc_ctrl.value = 0; - u_desc_ctrl.field.mem_to_mem_en = 1; - u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */ - u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; - hw_desc->desc_ctrl = u_desc_ctrl.value; - hw_desc->upper_pci_src_addr = 0; - hw_desc->crc_addr = 0; -} - -static inline void -iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags) -{ - struct iop3xx_desc_aau *hw_desc = desc->hw_desc; - union { - u32 value; - struct iop3xx_aau_desc_ctrl field; - } u_desc_ctrl; - - u_desc_ctrl.value = 0; - u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */ - u_desc_ctrl.field.dest_write_en = 1; - u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; - hw_desc->desc_ctrl = u_desc_ctrl.value; -} - -static inline u32 -iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, - unsigned long flags) -{ - int i, shift; - u32 edcr; - union { - u32 value; - struct iop3xx_aau_desc_ctrl field; - } u_desc_ctrl; - - u_desc_ctrl.value = 0; - switch (src_cnt) { - case 25 ... 32: - u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ - edcr = 0; - shift = 1; - for (i = 24; i < src_cnt; i++) { - edcr |= (1 << shift); - shift += 3; - } - hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr; - src_cnt = 24; - fallthrough; - case 17 ... 24: - if (!u_desc_ctrl.field.blk_ctrl) { - hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; - u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ - } - edcr = 0; - shift = 1; - for (i = 16; i < src_cnt; i++) { - edcr |= (1 << shift); - shift += 3; - } - hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr; - src_cnt = 16; - fallthrough; - case 9 ... 16: - if (!u_desc_ctrl.field.blk_ctrl) - u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */ - edcr = 0; - shift = 1; - for (i = 8; i < src_cnt; i++) { - edcr |= (1 << shift); - shift += 3; - } - hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr; - src_cnt = 8; - fallthrough; - case 2 ... 8: - shift = 1; - for (i = 0; i < src_cnt; i++) { - u_desc_ctrl.value |= (1 << shift); - shift += 3; - } - - if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4) - u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */ - } - - u_desc_ctrl.field.dest_write_en = 1; - u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */ - u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; - hw_desc->desc_ctrl = u_desc_ctrl.value; - - return u_desc_ctrl.value; -} - -static inline void -iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, - unsigned long flags) -{ - iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags); -} - -/* return the number of operations */ -static inline int -iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, - unsigned long flags) -{ - int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; - struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter; - union { - u32 value; - struct iop3xx_aau_desc_ctrl field; - } u_desc_ctrl; - int i, j; - - hw_desc = desc->hw_desc; - - for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0; - i += slots_per_op, j++) { - iter = iop_hw_desc_slot_idx(hw_desc, i); - u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags); - u_desc_ctrl.field.dest_write_en = 0; - u_desc_ctrl.field.zero_result_en = 1; - u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; - iter->desc_ctrl = u_desc_ctrl.value; - - /* for the subsequent descriptors preserve the store queue - * and chain them together - */ - if (i) { - prev_hw_desc = - iop_hw_desc_slot_idx(hw_desc, i - slots_per_op); - prev_hw_desc->next_desc = - (u32) (desc->async_tx.phys + (i << 5)); - } - } - - return j; -} - -static inline void -iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, - unsigned long flags) -{ - struct iop3xx_desc_aau *hw_desc = desc->hw_desc; - union { - u32 value; - struct iop3xx_aau_desc_ctrl field; - } u_desc_ctrl; - - u_desc_ctrl.value = 0; - switch (src_cnt) { - case 25 ... 32: - u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ - hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; - fallthrough; - case 17 ... 24: - if (!u_desc_ctrl.field.blk_ctrl) { - hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; - u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ - } - hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0; - fallthrough; - case 9 ... 16: - if (!u_desc_ctrl.field.blk_ctrl) - u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */ - hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0; - fallthrough; - case 1 ... 8: - if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4) - u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */ - } - - u_desc_ctrl.field.dest_write_en = 0; - u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; - hw_desc->desc_ctrl = u_desc_ctrl.value; -} - -static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan, - u32 byte_count) -{ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - hw_desc.dma->byte_count = byte_count; - break; - case AAU_ID: - hw_desc.aau->byte_count = byte_count; - break; - default: - BUG(); - } -} - -static inline void -iop_desc_init_interrupt(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan) -{ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - iop_desc_init_memcpy(desc, 1); - hw_desc.dma->byte_count = 0; - hw_desc.dma->dest_addr = 0; - hw_desc.dma->src_addr = 0; - break; - case AAU_ID: - iop_desc_init_null_xor(desc, 2, 1); - hw_desc.aau->byte_count = 0; - hw_desc.aau->dest_addr = 0; - hw_desc.aau->src[0] = 0; - hw_desc.aau->src[1] = 0; - break; - default: - BUG(); - } -} - -static inline void -iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) -{ - int slots_per_op = desc->slots_per_op; - struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; - int i = 0; - - if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) { - hw_desc->byte_count = len; - } else { - do { - iter = iop_hw_desc_slot_idx(hw_desc, i); - iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; - len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; - i += slots_per_op; - } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); - - iter = iop_hw_desc_slot_idx(hw_desc, i); - iter->byte_count = len; - } -} - -static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan, - dma_addr_t addr) -{ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - hw_desc.dma->dest_addr = addr; - break; - case AAU_ID: - hw_desc.aau->dest_addr = addr; - break; - default: - BUG(); - } -} - -static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc, - dma_addr_t addr) -{ - struct iop3xx_desc_dma *hw_desc = desc->hw_desc; - hw_desc->src_addr = addr; -} - -static inline void -iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx, - dma_addr_t addr) -{ - - struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; - int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; - int i; - - for (i = 0; (slot_cnt -= slots_per_op) >= 0; - i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) { - iter = iop_hw_desc_slot_idx(hw_desc, i); - iop3xx_aau_desc_set_src_addr(iter, src_idx, addr); - } -} - -static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc, - int src_idx, dma_addr_t addr) -{ - - struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; - int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; - int i; - - for (i = 0; (slot_cnt -= slots_per_op) >= 0; - i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) { - iter = iop_hw_desc_slot_idx(hw_desc, i); - iop3xx_aau_desc_set_src_addr(iter, src_idx, addr); - } -} - -static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc, - u32 next_desc_addr) -{ - /* hw_desc->next_desc is the same location for all channels */ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - - iop_paranoia(hw_desc.dma->next_desc); - hw_desc.dma->next_desc = next_desc_addr; -} - -static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc) -{ - /* hw_desc->next_desc is the same location for all channels */ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - return hw_desc.dma->next_desc; -} - -static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc) -{ - /* hw_desc->next_desc is the same location for all channels */ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - hw_desc.dma->next_desc = 0; -} - -static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, - u32 val) -{ - struct iop3xx_desc_aau *hw_desc = desc->hw_desc; - hw_desc->src[0] = val; -} - -static inline enum sum_check_flags -iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) -{ - struct iop3xx_desc_aau *hw_desc = desc->hw_desc; - struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field; - - iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en)); - return desc_ctrl.zero_result_err << SUM_CHECK_P; -} - -static inline void iop_chan_append(struct iop_adma_chan *chan) -{ - u32 dma_chan_ctrl; - - dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); - dma_chan_ctrl |= 0x2; - __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); -} - -static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) -{ - return __raw_readl(DMA_CSR(chan)); -} - -static inline void iop_chan_disable(struct iop_adma_chan *chan) -{ - u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); - dma_chan_ctrl &= ~1; - __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); -} - -static inline void iop_chan_enable(struct iop_adma_chan *chan) -{ - u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); - - dma_chan_ctrl |= 1; - __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); -} - -static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan) -{ - u32 status = __raw_readl(DMA_CSR(chan)); - status &= (1 << 9); - __raw_writel(status, DMA_CSR(chan)); -} - -static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan) -{ - u32 status = __raw_readl(DMA_CSR(chan)); - status &= (1 << 8); - __raw_writel(status, DMA_CSR(chan)); -} - -static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan) -{ - u32 status = __raw_readl(DMA_CSR(chan)); - - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1); - break; - case AAU_ID: - status &= (1 << 5); - break; - default: - BUG(); - } - - __raw_writel(status, DMA_CSR(chan)); -} - -static inline int -iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan) -{ - return 0; -} - -static inline int -iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan) -{ - return 0; -} - -static inline int -iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan) -{ - return 0; -} - -static inline int -iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan) -{ - return test_bit(5, &status); -} - -static inline int -iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan) -{ - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - return test_bit(2, &status); - default: - return 0; - } -} - -static inline int -iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan) -{ - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - return test_bit(3, &status); - default: - return 0; - } -} - -static inline int -iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan) -{ - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - return test_bit(1, &status); - default: - return 0; - } -} -#endif /* _ADMA_H */ diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c index 3f56514bbef8..061add832295 100644 --- a/drivers/dma/qcom/gpi.c +++ b/drivers/dma/qcom/gpi.c @@ -2286,9 +2286,14 @@ static int gpi_probe(struct platform_device *pdev) } static const struct of_device_id gpi_of_match[] = { - { .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 }, { .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 }, { .compatible = "qcom,sm6350-gpi-dma", .data = (void *)0x10000 }, + /* + * Do not grow the list for compatible devices. Instead use + * qcom,sdm845-gpi-dma (for ee_offset = 0x0) or qcom,sm6350-gpi-dma + * (for ee_offset = 0x10000). + */ + { .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 }, { .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 }, { .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 }, { .compatible = "qcom,sm8350-gpi-dma", .data = (void *)0x10000 }, diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h deleted file mode 100644 index 7459f9a13b5b..000000000000 --- a/drivers/dma/sh/shdma-arm.h +++ /dev/null @@ -1,48 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Renesas SuperH DMA Engine support - * - * Copyright (C) 2013 Renesas Electronics, Inc. - */ - -#ifndef SHDMA_ARM_H -#define SHDMA_ARM_H - -#include "shdma.h" - -/* Transmit sizes and respective CHCR register values */ -enum { - XMIT_SZ_8BIT = 0, - XMIT_SZ_16BIT = 1, - XMIT_SZ_32BIT = 2, - XMIT_SZ_64BIT = 7, - XMIT_SZ_128BIT = 3, - XMIT_SZ_256BIT = 4, - XMIT_SZ_512BIT = 5, -}; - -/* log2(size / 8) - used to calculate number of transfers */ -#define SH_DMAE_TS_SHIFT { \ - [XMIT_SZ_8BIT] = 0, \ - [XMIT_SZ_16BIT] = 1, \ - [XMIT_SZ_32BIT] = 2, \ - [XMIT_SZ_64BIT] = 3, \ - [XMIT_SZ_128BIT] = 4, \ - [XMIT_SZ_256BIT] = 5, \ - [XMIT_SZ_512BIT] = 6, \ -} - -#define TS_LOW_BIT 0x3 /* --xx */ -#define TS_HI_BIT 0xc /* xx-- */ - -#define TS_LOW_SHIFT (3) -#define TS_HI_SHIFT (20 - 2) /* 2 bits for shifted low TS */ - -#define TS_INDEX2VAL(i) \ - ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\ - (((i) & TS_HI_BIT) << TS_HI_SHIFT)) - -#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL((xmit_sz))) -#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL((xmit_sz))) - -#endif diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c index fa9bda4a2bc6..1d1180db6d4e 100644 --- a/drivers/dma/tegra186-gpc-dma.c +++ b/drivers/dma/tegra186-gpc-dma.c @@ -161,7 +161,10 @@ #define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 5000 /* 5 msec */ /* Channel base address offset from GPCDMA base address */ -#define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000 +#define TEGRA_GPCDMA_CHANNEL_BASE_ADDR_OFFSET 0x10000 + +/* Default channel mask reserving channel0 */ +#define TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK 0xfffffffe struct tegra_dma; struct tegra_dma_channel; @@ -246,6 +249,7 @@ struct tegra_dma { const struct tegra_dma_chip_data *chip_data; unsigned long sid_m2d_reserved; unsigned long sid_d2m_reserved; + u32 chan_mask; void __iomem *base_addr; struct device *dev; struct dma_device dma_dev; @@ -1288,7 +1292,7 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, } static const struct tegra_dma_chip_data tegra186_dma_chip_data = { - .nr_channels = 31, + .nr_channels = 32, .channel_reg_size = SZ_64K, .max_dma_count = SZ_1G, .hw_support_pause = false, @@ -1296,7 +1300,7 @@ static const struct tegra_dma_chip_data tegra186_dma_chip_data = { }; static const struct tegra_dma_chip_data tegra194_dma_chip_data = { - .nr_channels = 31, + .nr_channels = 32, .channel_reg_size = SZ_64K, .max_dma_count = SZ_1G, .hw_support_pause = true, @@ -1304,7 +1308,7 @@ static const struct tegra_dma_chip_data tegra194_dma_chip_data = { }; static const struct tegra_dma_chip_data tegra234_dma_chip_data = { - .nr_channels = 31, + .nr_channels = 32, .channel_reg_size = SZ_64K, .max_dma_count = SZ_1G, .hw_support_pause = true, @@ -1380,15 +1384,28 @@ static int tegra_dma_probe(struct platform_device *pdev) } stream_id = iommu_spec->ids[0] & 0xffff; + ret = device_property_read_u32(&pdev->dev, "dma-channel-mask", + &tdma->chan_mask); + if (ret) { + dev_warn(&pdev->dev, + "Missing dma-channel-mask property, using default channel mask %#x\n", + TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK); + tdma->chan_mask = TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK; + } + INIT_LIST_HEAD(&tdma->dma_dev.channels); for (i = 0; i < cdata->nr_channels; i++) { struct tegra_dma_channel *tdc = &tdma->channels[i]; + /* Check for channel mask */ + if (!(tdma->chan_mask & BIT(i))) + continue; + tdc->irq = platform_get_irq(pdev, i); if (tdc->irq < 0) return tdc->irq; - tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET + + tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADDR_OFFSET + i * cdata->channel_reg_size; snprintf(tdc->name, sizeof(tdc->name), "gpcdma.%d", i); tdc->tdma = tdma; @@ -1449,8 +1466,8 @@ static int tegra_dma_probe(struct platform_device *pdev) return ret; } - dev_info(&pdev->dev, "GPC DMA driver register %d channels\n", - cdata->nr_channels); + dev_info(&pdev->dev, "GPC DMA driver register %lu channels\n", + hweight_long(tdma->chan_mask)); return 0; } @@ -1473,6 +1490,9 @@ static int __maybe_unused tegra_dma_pm_suspend(struct device *dev) for (i = 0; i < tdma->chip_data->nr_channels; i++) { struct tegra_dma_channel *tdc = &tdma->channels[i]; + if (!(tdma->chan_mask & BIT(i))) + continue; + if (tdc->dma_desc) { dev_err(tdma->dev, "channel %u busy\n", i); return -EBUSY; @@ -1492,6 +1512,9 @@ static int __maybe_unused tegra_dma_pm_resume(struct device *dev) for (i = 0; i < tdma->chip_data->nr_channels; i++) { struct tegra_dma_channel *tdc = &tdma->channels[i]; + if (!(tdma->chan_mask & BIT(i))) + continue; + tegra_dma_program_sid(tdc, tdc->stream_id); } diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig index 79618fac119a..2adc2cca10e9 100644 --- a/drivers/dma/ti/Kconfig +++ b/drivers/dma/ti/Kconfig @@ -35,7 +35,7 @@ config DMA_OMAP DMA engine is found on OMAP and DRA7xx parts. config TI_K3_UDMA - bool "Texas Instruments UDMA support" + tristate "Texas Instruments UDMA support" depends on ARCH_K3 depends on TI_SCI_PROTOCOL depends on TI_SCI_INTA_IRQCHIP @@ -48,7 +48,7 @@ config TI_K3_UDMA DMA engine is used in AM65x and j721e. config TI_K3_UDMA_GLUE_LAYER - bool "Texas Instruments UDMA Glue layer for non DMAengine users" + tristate "Texas Instruments UDMA Glue layer for non DMAengine users" depends on ARCH_K3 depends on TI_K3_UDMA help @@ -56,7 +56,8 @@ config TI_K3_UDMA_GLUE_LAYER If unsure, say N. config TI_K3_PSIL - bool + tristate + default TI_K3_UDMA config TI_DMA_CROSSBAR bool diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile index d3a303f0d7c6..b53d05b11ca5 100644 --- a/drivers/dma/ti/Makefile +++ b/drivers/dma/ti/Makefile @@ -4,11 +4,12 @@ obj-$(CONFIG_TI_EDMA) += edma.o obj-$(CONFIG_DMA_OMAP) += omap-dma.o obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o -obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o \ - k3-psil-am654.o \ - k3-psil-j721e.o \ - k3-psil-j7200.o \ - k3-psil-am64.o \ - k3-psil-j721s2.o \ - k3-psil-am62.o +k3-psil-lib-objs := k3-psil.o \ + k3-psil-am654.o \ + k3-psil-j721e.o \ + k3-psil-j7200.o \ + k3-psil-am64.o \ + k3-psil-j721s2.o \ + k3-psil-am62.o +obj-$(CONFIG_TI_K3_PSIL) += k3-psil-lib.o obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c index 761a384093d2..8b6533a1eeeb 100644 --- a/drivers/dma/ti/k3-psil.c +++ b/drivers/dma/ti/k3-psil.c @@ -5,6 +5,7 @@ */ #include <linux/kernel.h> +#include <linux/module.h> #include <linux/device.h> #include <linux/init.h> #include <linux/mutex.h> @@ -101,3 +102,4 @@ int psil_set_new_ep_config(struct device *dev, const char *name, return 0; } EXPORT_SYMBOL_GPL(psil_set_new_ep_config); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c index 4f1aeb81e9c7..789193ed0386 100644 --- a/drivers/dma/ti/k3-udma-glue.c +++ b/drivers/dma/ti/k3-udma-glue.c @@ -6,6 +6,7 @@ * */ +#include <linux/module.h> #include <linux/atomic.h> #include <linux/delay.h> #include <linux/dma-mapping.h> @@ -1436,4 +1437,6 @@ static int __init k3_udma_glue_class_init(void) { return class_register(&k3_udma_glue_devclass); } -arch_initcall(k3_udma_glue_class_init); + +module_init(k3_udma_glue_class_init); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 7b5081989b3d..ce8b80bb34d7 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -5,6 +5,7 @@ */ #include <linux/kernel.h> +#include <linux/module.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> @@ -4335,18 +4336,10 @@ static const struct of_device_id udma_of_match[] = { .compatible = "ti,j721e-navss-mcu-udmap", .data = &j721e_mcu_data, }, - { /* Sentinel */ }, -}; - -static const struct of_device_id bcdma_of_match[] = { { .compatible = "ti,am64-dmss-bcdma", .data = &am64_bcdma_data, }, - { /* Sentinel */ }, -}; - -static const struct of_device_id pktdma_of_match[] = { { .compatible = "ti,am64-dmss-pktdma", .data = &am64_pktdma_data, @@ -5271,14 +5264,9 @@ static int udma_probe(struct platform_device *pdev) return -ENOMEM; match = of_match_node(udma_of_match, dev->of_node); - if (!match) - match = of_match_node(bcdma_of_match, dev->of_node); if (!match) { - match = of_match_node(pktdma_of_match, dev->of_node); - if (!match) { - dev_err(dev, "No compatible match found\n"); - return -ENODEV; - } + dev_err(dev, "No compatible match found\n"); + return -ENODEV; } ud->match_data = match->data; @@ -5511,27 +5499,9 @@ static struct platform_driver udma_driver = { }, .probe = udma_probe, }; -builtin_platform_driver(udma_driver); -static struct platform_driver bcdma_driver = { - .driver = { - .name = "ti-bcdma", - .of_match_table = bcdma_of_match, - .suppress_bind_attrs = true, - }, - .probe = udma_probe, -}; -builtin_platform_driver(bcdma_driver); - -static struct platform_driver pktdma_driver = { - .driver = { - .name = "ti-pktdma", - .of_match_table = pktdma_of_match, - .suppress_bind_attrs = true, - }, - .probe = udma_probe, -}; -builtin_platform_driver(pktdma_driver); +module_platform_driver(udma_driver); +MODULE_LICENSE("GPL v2"); /* Private interfaces to UDMA */ #include "k3-udma-private.c" diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 8cd4e69dc7b4..a8d23cdf883e 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -1659,6 +1659,8 @@ static void xilinx_dma_issue_pending(struct dma_chan *dchan) * xilinx_dma_device_config - Configure the DMA channel * @dchan: DMA channel * @config: channel configuration + * + * Return: 0 always. */ static int xilinx_dma_device_config(struct dma_chan *dchan, struct dma_slave_config *config) @@ -2924,7 +2926,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, * @xdev: Driver specific device structure * @node: Device node * - * Return: 0 always. + * Return: '0' on success and failure value on error. */ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, struct device_node *node) diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 2bac44f09554..e9bf5236ed89 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -730,6 +730,7 @@ struct irq_domain *of_msi_get_domain(struct device *dev, return NULL; } +EXPORT_SYMBOL_GPL(of_msi_get_domain); /** * of_msi_configure - Set the msi_domain field of a device |