diff options
author | Vinod Koul <vinod.koul@intel.com> | 2016-07-16 20:10:54 +0530 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2016-07-16 20:10:54 +0530 |
commit | ad31aa8fedafdd0b9854035fe71eb37994c2d2ce (patch) | |
tree | 2733f9d27f4ed6229b4bdbe0d43375cf239d8bf7 /drivers/dma | |
parent | 3b3fb1a19963a3b735960b0b7e1cce4e53a3e79b (diff) | |
parent | 7cdd3587b8628215f377d5d73a39540d94f33dc1 (diff) | |
download | linux-ad31aa8fedafdd0b9854035fe71eb37994c2d2ce.tar.gz linux-ad31aa8fedafdd0b9854035fe71eb37994c2d2ce.tar.bz2 linux-ad31aa8fedafdd0b9854035fe71eb37994c2d2ce.zip |
Merge branch 'topic/xilinx' into for-linus
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 18 | ||||
-rw-r--r-- | drivers/dma/xilinx/Makefile | 3 | ||||
-rw-r--r-- | drivers/dma/xilinx/xilinx_dma.c (renamed from drivers/dma/xilinx/xilinx_vdma.c) | 489 | ||||
-rw-r--r-- | drivers/dma/xilinx/zynqmp_dma.c | 1145 |
4 files changed, 1596 insertions, 59 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index f6c46d06cef7..739f797b40d9 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -533,19 +533,31 @@ config XGENE_DMA help Enable support for the APM X-Gene SoC DMA engine. -config XILINX_VDMA - tristate "Xilinx AXI VDMA Engine" +config XILINX_DMA + tristate "Xilinx AXI DMAS Engine" depends on (ARCH_ZYNQ || MICROBLAZE || ARM64) select DMA_ENGINE help Enable support for Xilinx AXI VDMA Soft IP. - This engine provides high-bandwidth direct memory access + AXI VDMA engine provides high-bandwidth direct memory access between memory and AXI4-Stream video type target peripherals including peripherals which support AXI4- Stream Video Protocol. It has two stream interfaces/ channels, Memory Mapped to Stream (MM2S) and Stream to Memory Mapped (S2MM) for the data transfers. + AXI CDMA engine provides high-bandwidth direct memory access + between a memory-mapped source address and a memory-mapped + destination address. + AXI DMA engine provides high-bandwidth one dimensional direct + memory access between memory and AXI4-Stream target peripherals. + +config XILINX_ZYNQMP_DMA + tristate "Xilinx ZynqMP DMA Engine" + depends on (ARCH_ZYNQ || MICROBLAZE || ARM64) + select DMA_ENGINE + help + Enable support for Xilinx ZynqMP DMA controller. config ZX_DMA tristate "ZTE ZX296702 DMA support" diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile index 3c4e9f2fea28..9e91f8f5b087 100644 --- a/drivers/dma/xilinx/Makefile +++ b/drivers/dma/xilinx/Makefile @@ -1 +1,2 @@ -obj-$(CONFIG_XILINX_VDMA) += xilinx_vdma.o +obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o +obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_dma.c index df9118540b91..4e223d094433 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -45,6 +45,7 @@ #include <linux/of_irq.h> #include <linux/slab.h> #include <linux/clk.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include "../dmaengine.h" @@ -113,7 +114,7 @@ #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) /* HW specific definitions */ -#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 +#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ @@ -157,12 +158,25 @@ /* AXI DMA Specific Masks/Bit fields */ #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) +#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) #define XILINX_DMA_CR_COALESCE_SHIFT 16 #define XILINX_DMA_BD_SOP BIT(27) #define XILINX_DMA_BD_EOP BIT(26) #define XILINX_DMA_COALESCE_MAX 255 #define XILINX_DMA_NUM_APP_WORDS 5 +/* Multi-Channel DMA Descriptor offsets*/ +#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) +#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) + +/* Multi-Channel DMA Masks/Shifts */ +#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) +#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) +#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) +#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) +#define XILINX_DMA_BD_STRIDE_SHIFT 0 +#define XILINX_DMA_BD_VSIZE_SHIFT 19 + /* AXI CDMA Specific Registers/Offsets */ #define XILINX_CDMA_REG_SRCADDR 0x18 #define XILINX_CDMA_REG_DSTADDR 0x20 @@ -194,22 +208,22 @@ struct xilinx_vdma_desc_hw { /** * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA * @next_desc: Next Descriptor Pointer @0x00 - * @pad1: Reserved @0x04 + * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 * @buf_addr: Buffer address @0x08 - * @pad2: Reserved @0x0C - * @pad3: Reserved @0x10 - * @pad4: Reserved @0x14 + * @buf_addr_msb: MSB of Buffer address @0x0C + * @pad1: Reserved @0x10 + * @pad2: Reserved @0x14 * @control: Control field @0x18 * @status: Status field @0x1C * @app: APP Fields @0x20 - 0x30 */ struct xilinx_axidma_desc_hw { u32 next_desc; - u32 pad1; + u32 next_desc_msb; u32 buf_addr; - u32 pad2; - u32 pad3; - u32 pad4; + u32 buf_addr_msb; + u32 mcdma_control; + u32 vsize_stride; u32 control; u32 status; u32 app[XILINX_DMA_NUM_APP_WORDS]; @@ -218,21 +232,21 @@ struct xilinx_axidma_desc_hw { /** * struct xilinx_cdma_desc_hw - Hardware Descriptor * @next_desc: Next Descriptor Pointer @0x00 - * @pad1: Reserved @0x04 + * @next_descmsb: Next Descriptor Pointer MSB @0x04 * @src_addr: Source address @0x08 - * @pad2: Reserved @0x0C + * @src_addrmsb: Source address MSB @0x0C * @dest_addr: Destination address @0x10 - * @pad3: Reserved @0x14 + * @dest_addrmsb: Destination address MSB @0x14 * @control: Control field @0x18 * @status: Status field @0x1C */ struct xilinx_cdma_desc_hw { u32 next_desc; - u32 pad1; + u32 next_desc_msb; u32 src_addr; - u32 pad2; + u32 src_addr_msb; u32 dest_addr; - u32 pad3; + u32 dest_addr_msb; u32 control; u32 status; } __aligned(64); @@ -278,11 +292,13 @@ struct xilinx_cdma_tx_segment { * @async_tx: Async transaction descriptor * @segments: TX segments list * @node: Node in the channel descriptors list + * @cyclic: Check for cyclic transfers. */ struct xilinx_dma_tx_descriptor { struct dma_async_tx_descriptor async_tx; struct list_head segments; struct list_head node; + bool cyclic; }; /** @@ -302,6 +318,7 @@ struct xilinx_dma_tx_descriptor { * @direction: Transfer direction * @num_frms: Number of frames * @has_sg: Support scatter transfers + * @cyclic: Check for cyclic transfers. * @genlock: Support genlock mode * @err: Channel has errors * @tasklet: Cleanup work after irq @@ -312,6 +329,7 @@ struct xilinx_dma_tx_descriptor { * @desc_submitcount: Descriptor h/w submitted count * @residue: Residue for AXI DMA * @seg_v: Statically allocated segments base + * @cyclic_seg_v: Statically allocated segment base for cyclic transfers * @start_transfer: Differentiate b/w DMA IP's transfer */ struct xilinx_dma_chan { @@ -330,6 +348,7 @@ struct xilinx_dma_chan { enum dma_transfer_direction direction; int num_frms; bool has_sg; + bool cyclic; bool genlock; bool err; struct tasklet_struct tasklet; @@ -340,7 +359,9 @@ struct xilinx_dma_chan { u32 desc_submitcount; u32 residue; struct xilinx_axidma_tx_segment *seg_v; + struct xilinx_axidma_tx_segment *cyclic_seg_v; void (*start_transfer)(struct xilinx_dma_chan *chan); + u16 tdest; }; struct xilinx_dma_config { @@ -357,6 +378,7 @@ struct xilinx_dma_config { * @common: DMA device structure * @chan: Driver specific DMA channel * @has_sg: Specifies whether Scatter-Gather is present or not + * @mcdma: Specifies whether Multi-Channel is present or not * @flush_on_fsync: Flush on frame sync * @ext_addr: Indicates 64 bit addressing is supported by dma device * @pdev: Platform device structure pointer @@ -366,6 +388,8 @@ struct xilinx_dma_config { * @txs_clk: DMA mm2s stream clock * @rx_clk: DMA s2mm clock * @rxs_clk: DMA s2mm stream clock + * @nr_channels: Number of channels DMA device supports + * @chan_id: DMA channel identifier */ struct xilinx_dma_device { void __iomem *regs; @@ -373,6 +397,7 @@ struct xilinx_dma_device { struct dma_device common; struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; bool has_sg; + bool mcdma; u32 flush_on_fsync; bool ext_addr; struct platform_device *pdev; @@ -382,6 +407,8 @@ struct xilinx_dma_device { struct clk *txs_clk; struct clk *rx_clk; struct clk *rxs_clk; + u32 nr_channels; + u32 chan_id; }; /* Macros */ @@ -454,6 +481,34 @@ static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); } +static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) +{ + lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); +} + +static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, + dma_addr_t addr) +{ + if (chan->ext_addr) + dma_writeq(chan, reg, addr); + else + dma_ctrl_write(chan, reg, addr); +} + +static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, + struct xilinx_axidma_desc_hw *hw, + dma_addr_t buf_addr, size_t sg_used, + size_t period_len) +{ + if (chan->ext_addr) { + hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); + hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + + period_len); + } else { + hw->buf_addr = buf_addr + sg_used + period_len; + } +} + /* ----------------------------------------------------------------------------- * Descriptors and segments alloc and free */ @@ -491,11 +546,10 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) struct xilinx_cdma_tx_segment *segment; dma_addr_t phys; - segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); + segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); if (!segment) return NULL; - memset(segment, 0, sizeof(*segment)); segment->phys = phys; return segment; @@ -513,11 +567,10 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) struct xilinx_axidma_tx_segment *segment; dma_addr_t phys; - segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); + segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); if (!segment) return NULL; - memset(segment, 0, sizeof(*segment)); segment->phys = phys; return segment; @@ -660,13 +713,37 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) dev_dbg(chan->dev, "Free all channel resources.\n"); xilinx_dma_free_descriptors(chan); - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v); xilinx_dma_free_tx_segment(chan, chan->seg_v); + } dma_pool_destroy(chan->desc_pool); chan->desc_pool = NULL; } /** + * xilinx_dma_chan_handle_cyclic - Cyclic dma callback + * @chan: Driver specific dma channel + * @desc: dma transaction descriptor + * @flags: flags for spin lock + */ +static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, + struct xilinx_dma_tx_descriptor *desc, + unsigned long *flags) +{ + dma_async_tx_callback callback; + void *callback_param; + + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + if (callback) { + spin_unlock_irqrestore(&chan->lock, *flags); + callback(callback_param); + spin_lock_irqsave(&chan->lock, *flags); + } +} + +/** * xilinx_dma_chan_desc_cleanup - Clean channel descriptors * @chan: Driver specific DMA channel */ @@ -681,6 +758,11 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) dma_async_tx_callback callback; void *callback_param; + if (desc->cyclic) { + xilinx_dma_chan_handle_cyclic(chan, desc, &flags); + break; + } + /* Remove from the list of running transactions */ list_del(&desc->node); @@ -757,7 +839,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) return -ENOMEM; } - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { /* * For AXI DMA case after submitting a pending_list, keep * an extra segment allocated so that the "next descriptor" @@ -768,6 +850,15 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) */ chan->seg_v = xilinx_axidma_alloc_tx_segment(chan); + /* + * For cyclic DMA mode we need to program the tail Descriptor + * register with a value which is not a part of the BD chain + * so allocating a desc segment during channel allocation for + * programming tail descriptor. + */ + chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan); + } + dma_cookie_init(dchan); if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { @@ -1065,12 +1156,12 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) } if (chan->has_sg) { - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); + xilinx_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); /* Update tail ptr register which will start the transfer */ - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); } else { /* In simple mode */ struct xilinx_cdma_tx_segment *segment; @@ -1082,8 +1173,8 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) hw = &segment->hw; - dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); - dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); + xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); + xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, @@ -1124,18 +1215,20 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) tail_segment = list_last_entry(&tail_desc->segments, struct xilinx_axidma_tx_segment, node); - old_head = list_first_entry(&head_desc->segments, - struct xilinx_axidma_tx_segment, node); - new_head = chan->seg_v; - /* Copy Buffer Descriptor fields. */ - new_head->hw = old_head->hw; + if (chan->has_sg && !chan->xdev->mcdma) { + old_head = list_first_entry(&head_desc->segments, + struct xilinx_axidma_tx_segment, node); + new_head = chan->seg_v; + /* Copy Buffer Descriptor fields. */ + new_head->hw = old_head->hw; - /* Swap and save new reserve */ - list_replace_init(&old_head->node, &new_head->node); - chan->seg_v = old_head; + /* Swap and save new reserve */ + list_replace_init(&old_head->node, &new_head->node); + chan->seg_v = old_head; - tail_segment->hw.next_desc = chan->seg_v->phys; - head_desc->async_tx.phys = new_head->phys; + tail_segment->hw.next_desc = chan->seg_v->phys; + head_desc->async_tx.phys = new_head->phys; + } reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); @@ -1146,9 +1239,25 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); } - if (chan->has_sg) - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); + if (chan->has_sg && !chan->xdev->mcdma) + xilinx_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + + if (chan->has_sg && chan->xdev->mcdma) { + if (chan->direction == DMA_MEM_TO_DEV) { + dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + } else { + if (!chan->tdest) { + dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + } else { + dma_ctrl_write(chan, + XILINX_DMA_MCRX_CDESC(chan->tdest), + head_desc->async_tx.phys); + } + } + } xilinx_dma_start(chan); @@ -1156,9 +1265,27 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) return; /* Start the transfer */ - if (chan->has_sg) { - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + if (chan->has_sg && !chan->xdev->mcdma) { + if (chan->cyclic) + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + chan->cyclic_seg_v->phys); + else + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else if (chan->has_sg && chan->xdev->mcdma) { + if (chan->direction == DMA_MEM_TO_DEV) { + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, tail_segment->phys); + } else { + if (!chan->tdest) { + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else { + dma_ctrl_write(chan, + XILINX_DMA_MCRX_TDESC(chan->tdest), + tail_segment->phys); + } + } } else { struct xilinx_axidma_tx_segment *segment; struct xilinx_axidma_desc_hw *hw; @@ -1168,7 +1295,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) node); hw = &segment->hw; - dma_ctrl_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); + xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, @@ -1209,7 +1336,8 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) list_for_each_entry_safe(desc, next, &chan->active_list, node) { list_del(&desc->node); - dma_cookie_complete(&desc->async_tx); + if (!desc->cyclic) + dma_cookie_complete(&desc->async_tx); list_add_tail(&desc->node, &chan->done_list); } } @@ -1397,6 +1525,11 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) unsigned long flags; int err; + if (chan->cyclic) { + xilinx_dma_free_tx_descriptor(chan, desc); + return -EBUSY; + } + if (chan->err) { /* * If reset fails, need to hard reset the system. @@ -1414,6 +1547,9 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) /* Put this transaction onto the tail of the pending queue */ append_desc_queue(chan, desc); + if (desc->cyclic) + chan->cyclic = true; + spin_unlock_irqrestore(&chan->lock, flags); return cookie; @@ -1541,6 +1677,10 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, hw->control = len; hw->src_addr = dma_src; hw->dest_addr = dma_dst; + if (chan->ext_addr) { + hw->src_addr_msb = upper_32_bits(dma_src); + hw->dest_addr_msb = upper_32_bits(dma_dst); + } /* Fill the previous next descriptor with current */ prev = list_last_entry(&desc->segments, @@ -1623,7 +1763,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( hw = &segment->hw; /* Fill in the descriptor */ - hw->buf_addr = sg_dma_address(sg) + sg_used; + xilinx_axidma_buf(chan, hw, sg_dma_address(sg), + sg_used, 0); hw->control = copy; @@ -1669,12 +1810,204 @@ error: } /** + * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction + * @chan: DMA channel + * @sgl: scatterlist to transfer to/from + * @sg_len: number of entries in @scatterlist + * @direction: DMA direction + * @flags: transfer ack flags + */ +static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( + struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; + size_t copy, sg_used; + unsigned int num_periods; + int i; + u32 reg; + + if (!period_len) + return NULL; + + num_periods = buf_len / period_len; + + if (!num_periods) + return NULL; + + if (!is_slave_direction(direction)) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + chan->direction = direction; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + for (i = 0; i < num_periods; ++i) { + sg_used = 0; + + while (sg_used < period_len) { + struct xilinx_axidma_desc_hw *hw; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + /* + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the hw limit + */ + copy = min_t(size_t, period_len - sg_used, + XILINX_DMA_MAX_TRANS_LEN); + hw = &segment->hw; + xilinx_axidma_buf(chan, hw, buf_addr, sg_used, + period_len * i); + hw->control = copy; + + if (prev) + prev->hw.next_desc = segment->phys; + + prev = segment; + sg_used += copy; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + } + } + + head_segment = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = head_segment->phys; + + desc->cyclic = true; + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.next_desc = (u32) head_segment->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (direction == DMA_MEM_TO_DEV) { + head_segment->hw.control |= XILINX_DMA_BD_SOP; + segment->hw.control |= XILINX_DMA_BD_EOP; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** + * xilinx_dma_prep_interleaved - prepare a descriptor for a + * DMA_SLAVE transaction + * @dchan: DMA channel + * @xt: Interleaved template pointer + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor * +xilinx_dma_prep_interleaved(struct dma_chan *dchan, + struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment; + struct xilinx_axidma_desc_hw *hw; + + if (!is_slave_direction(xt->dir)) + return NULL; + + if (!xt->numf || !xt->sgl[0].size) + return NULL; + + if (xt->frame_size != 1) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + chan->direction = xt->dir; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + hw = &segment->hw; + + /* Fill in the descriptor */ + if (xt->dir != DMA_MEM_TO_DEV) + hw->buf_addr = xt->dst_start; + else + hw->buf_addr = xt->src_start; + + hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; + hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & + XILINX_DMA_BD_VSIZE_MASK; + hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & + XILINX_DMA_BD_STRIDE_MASK; + hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + + + segment = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = segment->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (xt->dir == DMA_MEM_TO_DEV) { + segment->hw.control |= XILINX_DMA_BD_SOP; + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.control |= XILINX_DMA_BD_EOP; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** * xilinx_dma_terminate_all - Halt the channel and free descriptors * @chan: Driver specific DMA Channel pointer */ static int xilinx_dma_terminate_all(struct dma_chan *dchan) { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + u32 reg; + + if (chan->cyclic) + xilinx_dma_chan_reset(chan); /* Halt the DMA engine */ xilinx_dma_halt(chan); @@ -1682,6 +2015,13 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) /* Remove and free all of the descriptors in the lists */ xilinx_dma_free_descriptors(chan); + if (chan->cyclic) { + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + chan->cyclic = false; + } + return 0; } @@ -1972,7 +2312,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev) * Return: '0' on success and failure value on error */ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, - struct device_node *node) + struct device_node *node, int chan_id) { struct xilinx_dma_chan *chan; bool has_dre = false; @@ -2014,9 +2354,12 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, if (!has_dre) xdev->common.copy_align = fls(width - 1); - if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) { + if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { chan->direction = DMA_MEM_TO_DEV; - chan->id = 0; + chan->id = chan_id; + chan->tdest = chan_id; chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -2027,9 +2370,12 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, chan->flush_on_fsync = true; } } else if (of_device_is_compatible(node, - "xlnx,axi-vdma-s2mm-channel")) { + "xlnx,axi-vdma-s2mm-channel") || + of_device_is_compatible(node, + "xlnx,axi-dma-s2mm-channel")) { chan->direction = DMA_DEV_TO_MEM; - chan->id = 1; + chan->id = chan_id; + chan->tdest = chan_id - xdev->nr_channels; chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -2084,6 +2430,32 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, } /** + * xilinx_dma_child_probe - Per child node probe + * It get number of dma-channels per child node from + * device-tree and initializes all the channels. + * + * @xdev: Driver specific device structure + * @node: Device node + * + * Return: 0 always. + */ +static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, + struct device_node *node) { + int ret, i, nr_channels = 1; + + ret = of_property_read_u32(node, "dma-channels", &nr_channels); + if ((ret < 0) && xdev->mcdma) + dev_warn(xdev->dev, "missing dma-channels property\n"); + + for (i = 0; i < nr_channels; i++) + xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); + + xdev->nr_channels += nr_channels; + + return 0; +} + +/** * of_dma_xilinx_xlate - Translation function * @dma_spec: Pointer to DMA specifier as found in the device tree * @ofdma: Pointer to DMA controller data @@ -2096,7 +2468,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, struct xilinx_dma_device *xdev = ofdma->of_dma_data; int chan_id = dma_spec->args[0]; - if (chan_id >= XILINX_DMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id]) + if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) return NULL; return dma_get_slave_channel(&xdev->chan[chan_id]->common); @@ -2172,6 +2544,8 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Retrieve the DMA engine properties from the device tree */ xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { err = of_property_read_u32(node, "xlnx,num-fstores", @@ -2218,7 +2592,12 @@ static int xilinx_dma_probe(struct platform_device *pdev) xdev->common.device_tx_status = xilinx_dma_tx_status; xdev->common.device_issue_pending = xilinx_dma_issue_pending; if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; + xdev->common.device_prep_dma_cyclic = + xilinx_dma_prep_dma_cyclic; + xdev->common.device_prep_interleaved_dma = + xilinx_dma_prep_interleaved; /* Residue calculation is supported by only AXI DMA */ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; @@ -2234,13 +2613,13 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Initialize the channels */ for_each_child_of_node(node, child) { - err = xilinx_dma_chan_probe(xdev, child); + err = xilinx_dma_child_probe(xdev, child); if (err < 0) goto disable_clks; } if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { - for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) + for (i = 0; i < xdev->nr_channels; i++) if (xdev->chan[i]) xdev->chan[i]->num_frms = num_frames; } @@ -2263,7 +2642,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) disable_clks: xdma_disable_allclks(xdev); error: - for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) + for (i = 0; i < xdev->nr_channels; i++) if (xdev->chan[i]) xilinx_dma_chan_remove(xdev->chan[i]); @@ -2285,7 +2664,7 @@ static int xilinx_dma_remove(struct platform_device *pdev) dma_async_device_unregister(&xdev->common); - for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) + for (i = 0; i < xdev->nr_channels; i++) if (xdev->chan[i]) xilinx_dma_chan_remove(xdev->chan[i]); diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c new file mode 100644 index 000000000000..f777a5bc0db8 --- /dev/null +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -0,0 +1,1145 @@ +/* + * DMA driver for Xilinx ZynqMP DMA Engine + * + * Copyright (C) 2016 Xilinx, Inc. All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/bitops.h> +#include <linux/dmapool.h> +#include <linux/dma/xilinx_dma.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_dma.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/io-64-nonatomic-lo-hi.h> + +#include "../dmaengine.h" + +/* Register Offsets */ +#define ZYNQMP_DMA_ISR 0x100 +#define ZYNQMP_DMA_IMR 0x104 +#define ZYNQMP_DMA_IER 0x108 +#define ZYNQMP_DMA_IDS 0x10C +#define ZYNQMP_DMA_CTRL0 0x110 +#define ZYNQMP_DMA_CTRL1 0x114 +#define ZYNQMP_DMA_DATA_ATTR 0x120 +#define ZYNQMP_DMA_DSCR_ATTR 0x124 +#define ZYNQMP_DMA_SRC_DSCR_WRD0 0x128 +#define ZYNQMP_DMA_SRC_DSCR_WRD1 0x12C +#define ZYNQMP_DMA_SRC_DSCR_WRD2 0x130 +#define ZYNQMP_DMA_SRC_DSCR_WRD3 0x134 +#define ZYNQMP_DMA_DST_DSCR_WRD0 0x138 +#define ZYNQMP_DMA_DST_DSCR_WRD1 0x13C +#define ZYNQMP_DMA_DST_DSCR_WRD2 0x140 +#define ZYNQMP_DMA_DST_DSCR_WRD3 0x144 +#define ZYNQMP_DMA_SRC_START_LSB 0x158 +#define ZYNQMP_DMA_SRC_START_MSB 0x15C +#define ZYNQMP_DMA_DST_START_LSB 0x160 +#define ZYNQMP_DMA_DST_START_MSB 0x164 +#define ZYNQMP_DMA_RATE_CTRL 0x18C +#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190 +#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194 +#define ZYNQMP_DMA_CTRL2 0x200 + +/* Interrupt registers bit field definitions */ +#define ZYNQMP_DMA_DONE BIT(10) +#define ZYNQMP_DMA_AXI_WR_DATA BIT(9) +#define ZYNQMP_DMA_AXI_RD_DATA BIT(8) +#define ZYNQMP_DMA_AXI_RD_DST_DSCR BIT(7) +#define ZYNQMP_DMA_AXI_RD_SRC_DSCR BIT(6) +#define ZYNQMP_DMA_IRQ_DST_ACCT_ERR BIT(5) +#define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR BIT(4) +#define ZYNQMP_DMA_BYTE_CNT_OVRFL BIT(3) +#define ZYNQMP_DMA_DST_DSCR_DONE BIT(2) +#define ZYNQMP_DMA_INV_APB BIT(0) + +/* Control 0 register bit field definitions */ +#define ZYNQMP_DMA_OVR_FETCH BIT(7) +#define ZYNQMP_DMA_POINT_TYPE_SG BIT(6) +#define ZYNQMP_DMA_RATE_CTRL_EN BIT(3) + +/* Control 1 register bit field definitions */ +#define ZYNQMP_DMA_SRC_ISSUE GENMASK(4, 0) + +/* Data Attribute register bit field definitions */ +#define ZYNQMP_DMA_ARBURST GENMASK(27, 26) +#define ZYNQMP_DMA_ARCACHE GENMASK(25, 22) +#define ZYNQMP_DMA_ARCACHE_OFST 22 +#define ZYNQMP_DMA_ARQOS GENMASK(21, 18) +#define ZYNQMP_DMA_ARQOS_OFST 18 +#define ZYNQMP_DMA_ARLEN GENMASK(17, 14) +#define ZYNQMP_DMA_ARLEN_OFST 14 +#define ZYNQMP_DMA_AWBURST GENMASK(13, 12) +#define ZYNQMP_DMA_AWCACHE GENMASK(11, 8) +#define ZYNQMP_DMA_AWCACHE_OFST 8 +#define ZYNQMP_DMA_AWQOS GENMASK(7, 4) +#define ZYNQMP_DMA_AWQOS_OFST 4 +#define ZYNQMP_DMA_AWLEN GENMASK(3, 0) +#define ZYNQMP_DMA_AWLEN_OFST 0 + +/* Descriptor Attribute register bit field definitions */ +#define ZYNQMP_DMA_AXCOHRNT BIT(8) +#define ZYNQMP_DMA_AXCACHE GENMASK(7, 4) +#define ZYNQMP_DMA_AXCACHE_OFST 4 +#define ZYNQMP_DMA_AXQOS GENMASK(3, 0) +#define ZYNQMP_DMA_AXQOS_OFST 0 + +/* Control register 2 bit field definitions */ +#define ZYNQMP_DMA_ENABLE BIT(0) + +/* Buffer Descriptor definitions */ +#define ZYNQMP_DMA_DESC_CTRL_STOP 0x10 +#define ZYNQMP_DMA_DESC_CTRL_COMP_INT 0x4 +#define ZYNQMP_DMA_DESC_CTRL_SIZE_256 0x2 +#define ZYNQMP_DMA_DESC_CTRL_COHRNT 0x1 + +/* Interrupt Mask specific definitions */ +#define ZYNQMP_DMA_INT_ERR (ZYNQMP_DMA_AXI_RD_DATA | \ + ZYNQMP_DMA_AXI_WR_DATA | \ + ZYNQMP_DMA_AXI_RD_DST_DSCR | \ + ZYNQMP_DMA_AXI_RD_SRC_DSCR | \ + ZYNQMP_DMA_INV_APB) +#define ZYNQMP_DMA_INT_OVRFL (ZYNQMP_DMA_BYTE_CNT_OVRFL | \ + ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \ + ZYNQMP_DMA_IRQ_DST_ACCT_ERR) +#define ZYNQMP_DMA_INT_DONE (ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE) +#define ZYNQMP_DMA_INT_EN_DEFAULT_MASK (ZYNQMP_DMA_INT_DONE | \ + ZYNQMP_DMA_INT_ERR | \ + ZYNQMP_DMA_INT_OVRFL | \ + ZYNQMP_DMA_DST_DSCR_DONE) + +/* Max number of descriptors per channel */ +#define ZYNQMP_DMA_NUM_DESCS 32 + +/* Max transfer size per descriptor */ +#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 + +/* Reset values for data attributes */ +#define ZYNQMP_DMA_AXCACHE_VAL 0xF +#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF +#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF + +#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F + +#define ZYNQMP_DMA_IDS_DEFAULT_MASK 0xFFF + +/* Bus width in bits */ +#define ZYNQMP_DMA_BUS_WIDTH_64 64 +#define ZYNQMP_DMA_BUS_WIDTH_128 128 + +#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size) + +#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \ + common) +#define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \ + async_tx) + +/** + * struct zynqmp_dma_desc_ll - Hw linked list descriptor + * @addr: Buffer address + * @size: Size of the buffer + * @ctrl: Control word + * @nxtdscraddr: Next descriptor base address + * @rsvd: Reserved field and for Hw internal use. + */ +struct zynqmp_dma_desc_ll { + u64 addr; + u32 size; + u32 ctrl; + u64 nxtdscraddr; + u64 rsvd; +}; __aligned(64) + +/** + * struct zynqmp_dma_desc_sw - Per Transaction structure + * @src: Source address for simple mode dma + * @dst: Destination address for simple mode dma + * @len: Transfer length for simple mode dma + * @node: Node in the channel descriptor list + * @tx_list: List head for the current transfer + * @async_tx: Async transaction descriptor + * @src_v: Virtual address of the src descriptor + * @src_p: Physical address of the src descriptor + * @dst_v: Virtual address of the dst descriptor + * @dst_p: Physical address of the dst descriptor + */ +struct zynqmp_dma_desc_sw { + u64 src; + u64 dst; + u32 len; + struct list_head node; + struct list_head tx_list; + struct dma_async_tx_descriptor async_tx; + struct zynqmp_dma_desc_ll *src_v; + dma_addr_t src_p; + struct zynqmp_dma_desc_ll *dst_v; + dma_addr_t dst_p; +}; + +/** + * struct zynqmp_dma_chan - Driver specific DMA channel structure + * @zdev: Driver specific device structure + * @regs: Control registers offset + * @lock: Descriptor operation lock + * @pending_list: Descriptors waiting + * @free_list: Descriptors free + * @active_list: Descriptors active + * @sw_desc_pool: SW descriptor pool + * @done_list: Complete descriptors + * @common: DMA common channel + * @desc_pool_v: Statically allocated descriptor base + * @desc_pool_p: Physical allocated descriptor base + * @desc_free_cnt: Descriptor available count + * @dev: The dma device + * @irq: Channel IRQ + * @is_dmacoherent: Tells whether dma operations are coherent or not + * @tasklet: Cleanup work after irq + * @idle : Channel status; + * @desc_size: Size of the low level descriptor + * @err: Channel has errors + * @bus_width: Bus width + * @src_burst_len: Source burst length + * @dst_burst_len: Dest burst length + * @clk_main: Pointer to main clock + * @clk_apb: Pointer to apb clock + */ +struct zynqmp_dma_chan { + struct zynqmp_dma_device *zdev; + void __iomem *regs; + spinlock_t lock; + struct list_head pending_list; + struct list_head free_list; + struct list_head active_list; + struct zynqmp_dma_desc_sw *sw_desc_pool; + struct list_head done_list; + struct dma_chan common; + void *desc_pool_v; + dma_addr_t desc_pool_p; + u32 desc_free_cnt; + struct device *dev; + int irq; + bool is_dmacoherent; + struct tasklet_struct tasklet; + bool idle; + u32 desc_size; + bool err; + u32 bus_width; + u32 src_burst_len; + u32 dst_burst_len; + struct clk *clk_main; + struct clk *clk_apb; +}; + +/** + * struct zynqmp_dma_device - DMA device structure + * @dev: Device Structure + * @common: DMA device structure + * @chan: Driver specific DMA channel + */ +struct zynqmp_dma_device { + struct device *dev; + struct dma_device common; + struct zynqmp_dma_chan *chan; +}; + +static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg, + u64 value) +{ + lo_hi_writeq(value, chan->regs + reg); +} + +/** + * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller + * @chan: ZynqMP DMA DMA channel pointer + * @desc: Transaction descriptor pointer + */ +static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_sw *desc) +{ + dma_addr_t addr; + + addr = desc->src_p; + zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr); + addr = desc->dst_p; + zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr); +} + +/** + * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor + * @chan: ZynqMP DMA channel pointer + * @desc: Hw descriptor pointer + */ +static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan, + void *desc) +{ + struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc; + + hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP; + hw++; + hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP; +} + +/** + * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor + * @chan: ZynqMP DMA channel pointer + * @sdesc: Hw descriptor pointer + * @src: Source buffer address + * @dst: Destination buffer address + * @len: Transfer length + * @prev: Previous hw descriptor pointer + */ +static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_ll *sdesc, + dma_addr_t src, dma_addr_t dst, size_t len, + struct zynqmp_dma_desc_ll *prev) +{ + struct zynqmp_dma_desc_ll *ddesc = sdesc + 1; + + sdesc->size = ddesc->size = len; + sdesc->addr = src; + ddesc->addr = dst; + + sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256; + if (chan->is_dmacoherent) { + sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; + ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; + } + + if (prev) { + dma_addr_t addr = chan->desc_pool_p + + ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v); + ddesc = prev + 1; + prev->nxtdscraddr = addr; + ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan); + } +} + +/** + * zynqmp_dma_init - Initialize the channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_init(struct zynqmp_dma_chan *chan) +{ + u32 val; + + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + val = readl(chan->regs + ZYNQMP_DMA_ISR); + writel(val, chan->regs + ZYNQMP_DMA_ISR); + + if (chan->is_dmacoherent) { + val = ZYNQMP_DMA_AXCOHRNT; + val = (val & ~ZYNQMP_DMA_AXCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST); + writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR); + } + + val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + if (chan->is_dmacoherent) { + val = (val & ~ZYNQMP_DMA_ARCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST); + val = (val & ~ZYNQMP_DMA_AWCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST); + } + writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); + + /* Clearing the interrupt account rgisters */ + val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); + val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + + chan->idle = true; +} + +/** + * zynqmp_dma_tx_submit - Submit DMA transaction + * @tx: Async transaction descriptor pointer + * + * Return: cookie value + */ +static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct zynqmp_dma_chan *chan = to_chan(tx->chan); + struct zynqmp_dma_desc_sw *desc, *new; + dma_cookie_t cookie; + + new = tx_to_desc(tx); + spin_lock_bh(&chan->lock); + cookie = dma_cookie_assign(tx); + + if (!list_empty(&chan->pending_list)) { + desc = list_last_entry(&chan->pending_list, + struct zynqmp_dma_desc_sw, node); + if (!list_empty(&desc->tx_list)) + desc = list_last_entry(&desc->tx_list, + struct zynqmp_dma_desc_sw, node); + desc->src_v->nxtdscraddr = new->src_p; + desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; + desc->dst_v->nxtdscraddr = new->dst_p; + desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; + } + + list_add_tail(&new->node, &chan->pending_list); + spin_unlock_bh(&chan->lock); + + return cookie; +} + +/** + * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool + * @chan: ZynqMP DMA channel pointer + * + * Return: The sw descriptor + */ +static struct zynqmp_dma_desc_sw * +zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + spin_lock_bh(&chan->lock); + desc = list_first_entry(&chan->free_list, + struct zynqmp_dma_desc_sw, node); + list_del(&desc->node); + spin_unlock_bh(&chan->lock); + + INIT_LIST_HEAD(&desc->tx_list); + /* Clear the src and dst descriptor memory */ + memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); + memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); + + return desc; +} + +/** + * zynqmp_dma_free_descriptor - Issue pending transactions + * @chan: ZynqMP DMA channel pointer + * @sdesc: Transaction descriptor pointer + */ +static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_sw *sdesc) +{ + struct zynqmp_dma_desc_sw *child, *next; + + chan->desc_free_cnt++; + list_add_tail(&sdesc->node, &chan->free_list); + list_for_each_entry_safe(child, next, &sdesc->tx_list, node) { + chan->desc_free_cnt++; + list_move_tail(&child->node, &chan->free_list); + } +} + +/** + * zynqmp_dma_free_desc_list - Free descriptors list + * @chan: ZynqMP DMA channel pointer + * @list: List to parse and delete the descriptor + */ +static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan, + struct list_head *list) +{ + struct zynqmp_dma_desc_sw *desc, *next; + + list_for_each_entry_safe(desc, next, list, node) + zynqmp_dma_free_descriptor(chan, desc); +} + +/** + * zynqmp_dma_alloc_chan_resources - Allocate channel resources + * @dchan: DMA channel + * + * Return: Number of descriptors on success and failure value on error + */ +static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + struct zynqmp_dma_desc_sw *desc; + int i; + + chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS, + GFP_KERNEL); + if (!chan->sw_desc_pool) + return -ENOMEM; + + chan->idle = true; + chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS; + + INIT_LIST_HEAD(&chan->free_list); + + for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { + desc = chan->sw_desc_pool + i; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = zynqmp_dma_tx_submit; + list_add_tail(&desc->node, &chan->free_list); + } + + chan->desc_pool_v = dma_zalloc_coherent(chan->dev, + (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), + &chan->desc_pool_p, GFP_KERNEL); + if (!chan->desc_pool_v) + return -ENOMEM; + + for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { + desc = chan->sw_desc_pool + i; + desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v + + (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2)); + desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1); + desc->src_p = chan->desc_pool_p + + (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2); + desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan); + } + + return ZYNQMP_DMA_NUM_DESCS; +} + +/** + * zynqmp_dma_start - Start DMA channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_start(struct zynqmp_dma_chan *chan) +{ + writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER); + chan->idle = false; + writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2); +} + +/** + * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt + * @chan: ZynqMP DMA channel pointer + * @status: Interrupt status value + */ +static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) +{ + u32 val; + + if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR) + val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR) + val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); +} + +static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) +{ + u32 val; + + val = readl(chan->regs + ZYNQMP_DMA_CTRL0); + val |= ZYNQMP_DMA_POINT_TYPE_SG; + writel(val, chan->regs + ZYNQMP_DMA_CTRL0); + + val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + val = (val & ~ZYNQMP_DMA_ARLEN) | + (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); + val = (val & ~ZYNQMP_DMA_AWLEN) | + (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); + writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); +} + +/** + * zynqmp_dma_device_config - Zynqmp dma device configuration + * @dchan: DMA channel + * @config: DMA device config + */ +static int zynqmp_dma_device_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + chan->src_burst_len = config->src_maxburst; + chan->dst_burst_len = config->dst_maxburst; + + return 0; +} + +/** + * zynqmp_dma_start_transfer - Initiate the new transfer + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + if (!chan->idle) + return; + + zynqmp_dma_config(chan); + + desc = list_first_entry_or_null(&chan->pending_list, + struct zynqmp_dma_desc_sw, node); + if (!desc) + return; + + list_splice_tail_init(&chan->pending_list, &chan->active_list); + zynqmp_dma_update_desc_to_ctrlr(chan, desc); + zynqmp_dma_start(chan); +} + + +/** + * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors + * @chan: ZynqMP DMA channel + */ +static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc, *next; + + list_for_each_entry_safe(desc, next, &chan->done_list, node) { + dma_async_tx_callback callback; + void *callback_param; + + list_del(&desc->node); + + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + if (callback) { + spin_unlock(&chan->lock); + callback(callback_param); + spin_lock(&chan->lock); + } + + /* Run any dependencies, then free the descriptor */ + zynqmp_dma_free_descriptor(chan, desc); + } +} + +/** + * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + desc = list_first_entry_or_null(&chan->active_list, + struct zynqmp_dma_desc_sw, node); + if (!desc) + return; + list_del(&desc->node); + dma_cookie_complete(&desc->async_tx); + list_add_tail(&desc->node, &chan->done_list); +} + +/** + * zynqmp_dma_issue_pending - Issue pending transactions + * @dchan: DMA channel pointer + */ +static void zynqmp_dma_issue_pending(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + zynqmp_dma_start_transfer(chan); + spin_unlock_bh(&chan->lock); +} + +/** + * zynqmp_dma_free_descriptors - Free channel descriptors + * @dchan: DMA channel pointer + */ +static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) +{ + zynqmp_dma_free_desc_list(chan, &chan->active_list); + zynqmp_dma_free_desc_list(chan, &chan->pending_list); + zynqmp_dma_free_desc_list(chan, &chan->done_list); +} + +/** + * zynqmp_dma_free_chan_resources - Free channel resources + * @dchan: DMA channel pointer + */ +static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + zynqmp_dma_free_descriptors(chan); + spin_unlock_bh(&chan->lock); + dma_free_coherent(chan->dev, + (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), + chan->desc_pool_v, chan->desc_pool_p); + kfree(chan->sw_desc_pool); +} + +/** + * zynqmp_dma_reset - Reset the channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan) +{ + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + + zynqmp_dma_complete_descriptor(chan); + zynqmp_dma_chan_desc_cleanup(chan); + zynqmp_dma_free_descriptors(chan); + zynqmp_dma_init(chan); +} + +/** + * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler + * @irq: IRQ number + * @data: Pointer to the ZynqMP DMA channel structure + * + * Return: IRQ_HANDLED/IRQ_NONE + */ +static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data) +{ + struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; + u32 isr, imr, status; + irqreturn_t ret = IRQ_NONE; + + isr = readl(chan->regs + ZYNQMP_DMA_ISR); + imr = readl(chan->regs + ZYNQMP_DMA_IMR); + status = isr & ~imr; + + writel(isr, chan->regs + ZYNQMP_DMA_ISR); + if (status & ZYNQMP_DMA_INT_DONE) { + tasklet_schedule(&chan->tasklet); + ret = IRQ_HANDLED; + } + + if (status & ZYNQMP_DMA_DONE) + chan->idle = true; + + if (status & ZYNQMP_DMA_INT_ERR) { + chan->err = true; + tasklet_schedule(&chan->tasklet); + dev_err(chan->dev, "Channel %p has errors\n", chan); + ret = IRQ_HANDLED; + } + + if (status & ZYNQMP_DMA_INT_OVRFL) { + zynqmp_dma_handle_ovfl_int(chan, status); + dev_info(chan->dev, "Channel %p overflow interrupt\n", chan); + ret = IRQ_HANDLED; + } + + return ret; +} + +/** + * zynqmp_dma_do_tasklet - Schedule completion tasklet + * @data: Pointer to the ZynqMP DMA channel structure + */ +static void zynqmp_dma_do_tasklet(unsigned long data) +{ + struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; + u32 count; + + spin_lock(&chan->lock); + + if (chan->err) { + zynqmp_dma_reset(chan); + chan->err = false; + goto unlock; + } + + count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + + while (count) { + zynqmp_dma_complete_descriptor(chan); + zynqmp_dma_chan_desc_cleanup(chan); + count--; + } + + if (chan->idle) + zynqmp_dma_start_transfer(chan); + +unlock: + spin_unlock(&chan->lock); +} + +/** + * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel + * @dchan: DMA channel pointer + * + * Return: Always '0' + */ +static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + zynqmp_dma_free_descriptors(chan); + spin_unlock_bh(&chan->lock); + + return 0; +} + +/** + * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction + * @dchan: DMA channel + * @dma_dst: Destination buffer address + * @dma_src: Source buffer address + * @len: Transfer length + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( + struct dma_chan *dchan, dma_addr_t dma_dst, + dma_addr_t dma_src, size_t len, ulong flags) +{ + struct zynqmp_dma_chan *chan; + struct zynqmp_dma_desc_sw *new, *first = NULL; + void *desc = NULL, *prev = NULL; + size_t copy; + u32 desc_cnt; + + chan = to_chan(dchan); + + if (len > ZYNQMP_DMA_MAX_TRANS_LEN) + return NULL; + + desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN); + + spin_lock_bh(&chan->lock); + if (desc_cnt > chan->desc_free_cnt) { + spin_unlock_bh(&chan->lock); + dev_dbg(chan->dev, "chan %p descs are not available\n", chan); + return NULL; + } + chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; + spin_unlock_bh(&chan->lock); + + do { + /* Allocate and populate the descriptor */ + new = zynqmp_dma_get_descriptor(chan); + + copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); + desc = (struct zynqmp_dma_desc_ll *)new->src_v; + zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, + dma_dst, copy, prev); + prev = desc; + len -= copy; + dma_src += copy; + dma_dst += copy; + if (!first) + first = new; + else + list_add_tail(&new->node, &first->tx_list); + } while (len); + + zynqmp_dma_desc_config_eod(chan, desc); + async_tx_ack(&first->async_tx); + first->async_tx.flags = flags; + return &first->async_tx; +} + +/** + * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction + * @dchan: DMA channel + * @dst_sg: Destination scatter list + * @dst_sg_len: Number of entries in destination scatter list + * @src_sg: Source scatter list + * @src_sg_len: Number of entries in source scatter list + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg( + struct dma_chan *dchan, struct scatterlist *dst_sg, + unsigned int dst_sg_len, struct scatterlist *src_sg, + unsigned int src_sg_len, unsigned long flags) +{ + struct zynqmp_dma_desc_sw *new, *first = NULL; + struct zynqmp_dma_chan *chan = to_chan(dchan); + void *desc = NULL, *prev = NULL; + size_t len, dst_avail, src_avail; + dma_addr_t dma_dst, dma_src; + u32 desc_cnt = 0, i; + struct scatterlist *sg; + + for_each_sg(src_sg, sg, src_sg_len, i) + desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), + ZYNQMP_DMA_MAX_TRANS_LEN); + + spin_lock_bh(&chan->lock); + if (desc_cnt > chan->desc_free_cnt) { + spin_unlock_bh(&chan->lock); + dev_dbg(chan->dev, "chan %p descs are not available\n", chan); + return NULL; + } + chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; + spin_unlock_bh(&chan->lock); + + dst_avail = sg_dma_len(dst_sg); + src_avail = sg_dma_len(src_sg); + + /* Run until we are out of scatterlist entries */ + while (true) { + /* Allocate and populate the descriptor */ + new = zynqmp_dma_get_descriptor(chan); + desc = (struct zynqmp_dma_desc_ll *)new->src_v; + len = min_t(size_t, src_avail, dst_avail); + len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); + if (len == 0) + goto fetch; + dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - + dst_avail; + dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - + src_avail; + + zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst, + len, prev); + prev = desc; + dst_avail -= len; + src_avail -= len; + + if (!first) + first = new; + else + list_add_tail(&new->node, &first->tx_list); +fetch: + /* Fetch the next dst scatterlist entry */ + if (dst_avail == 0) { + if (dst_sg_len == 0) + break; + dst_sg = sg_next(dst_sg); + if (dst_sg == NULL) + break; + dst_sg_len--; + dst_avail = sg_dma_len(dst_sg); + } + /* Fetch the next src scatterlist entry */ + if (src_avail == 0) { + if (src_sg_len == 0) + break; + src_sg = sg_next(src_sg); + if (src_sg == NULL) + break; + src_sg_len--; + src_avail = sg_dma_len(src_sg); + } + } + + zynqmp_dma_desc_config_eod(chan, desc); + first->async_tx.flags = flags; + return &first->async_tx; +} + +/** + * zynqmp_dma_chan_remove - Channel remove function + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan) +{ + if (!chan) + return; + + devm_free_irq(chan->zdev->dev, chan->irq, chan); + tasklet_kill(&chan->tasklet); + list_del(&chan->common.device_node); + clk_disable_unprepare(chan->clk_apb); + clk_disable_unprepare(chan->clk_main); +} + +/** + * zynqmp_dma_chan_probe - Per Channel Probing + * @zdev: Driver specific device structure + * @pdev: Pointer to the platform_device structure + * + * Return: '0' on success and failure value on error + */ +static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, + struct platform_device *pdev) +{ + struct zynqmp_dma_chan *chan; + struct resource *res; + struct device_node *node = pdev->dev.of_node; + int err; + + chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + chan->dev = zdev->dev; + chan->zdev = zdev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + chan->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(chan->regs)) + return PTR_ERR(chan->regs); + + chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; + chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; + chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; + err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); + if ((err < 0) && ((chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64) || + (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128))) { + dev_err(zdev->dev, "invalid bus-width value"); + return err; + } + + chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent"); + zdev->chan = chan; + tasklet_init(&chan->tasklet, zynqmp_dma_do_tasklet, (ulong)chan); + spin_lock_init(&chan->lock); + INIT_LIST_HEAD(&chan->active_list); + INIT_LIST_HEAD(&chan->pending_list); + INIT_LIST_HEAD(&chan->done_list); + INIT_LIST_HEAD(&chan->free_list); + + dma_cookie_init(&chan->common); + chan->common.device = &zdev->common; + list_add_tail(&chan->common.device_node, &zdev->common.channels); + + zynqmp_dma_init(chan); + chan->irq = platform_get_irq(pdev, 0); + if (chan->irq < 0) + return -ENXIO; + err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0, + "zynqmp-dma", chan); + if (err) + return err; + chan->clk_main = devm_clk_get(&pdev->dev, "clk_main"); + if (IS_ERR(chan->clk_main)) { + dev_err(&pdev->dev, "main clock not found.\n"); + return PTR_ERR(chan->clk_main); + } + + chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb"); + if (IS_ERR(chan->clk_apb)) { + dev_err(&pdev->dev, "apb clock not found.\n"); + return PTR_ERR(chan->clk_apb); + } + + err = clk_prepare_enable(chan->clk_main); + if (err) { + dev_err(&pdev->dev, "Unable to enable main clock.\n"); + return err; + } + + err = clk_prepare_enable(chan->clk_apb); + if (err) { + clk_disable_unprepare(chan->clk_main); + dev_err(&pdev->dev, "Unable to enable apb clock.\n"); + return err; + } + + chan->desc_size = sizeof(struct zynqmp_dma_desc_ll); + chan->idle = true; + return 0; +} + +/** + * of_zynqmp_dma_xlate - Translation function + * @dma_spec: Pointer to DMA specifier as found in the device tree + * @ofdma: Pointer to DMA controller data + * + * Return: DMA channel pointer on success and NULL on error + */ +static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct zynqmp_dma_device *zdev = ofdma->of_dma_data; + + return dma_get_slave_channel(&zdev->chan->common); +} + +/** + * zynqmp_dma_probe - Driver probe function + * @pdev: Pointer to the platform_device structure + * + * Return: '0' on success and failure value on error + */ +static int zynqmp_dma_probe(struct platform_device *pdev) +{ + struct zynqmp_dma_device *zdev; + struct dma_device *p; + int ret; + + zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL); + if (!zdev) + return -ENOMEM; + + zdev->dev = &pdev->dev; + INIT_LIST_HEAD(&zdev->common.channels); + + dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + dma_cap_set(DMA_SG, zdev->common.cap_mask); + dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask); + + p = &zdev->common; + p->device_prep_dma_sg = zynqmp_dma_prep_sg; + p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy; + p->device_terminate_all = zynqmp_dma_device_terminate_all; + p->device_issue_pending = zynqmp_dma_issue_pending; + p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources; + p->device_free_chan_resources = zynqmp_dma_free_chan_resources; + p->device_tx_status = dma_cookie_status; + p->device_config = zynqmp_dma_device_config; + p->dev = &pdev->dev; + + platform_set_drvdata(pdev, zdev); + + ret = zynqmp_dma_chan_probe(zdev, pdev); + if (ret) { + dev_err(&pdev->dev, "Probing channel failed\n"); + goto free_chan_resources; + } + + p->dst_addr_widths = BIT(zdev->chan->bus_width / 8); + p->src_addr_widths = BIT(zdev->chan->bus_width / 8); + + dma_async_device_register(&zdev->common); + + ret = of_dma_controller_register(pdev->dev.of_node, + of_zynqmp_dma_xlate, zdev); + if (ret) { + dev_err(&pdev->dev, "Unable to register DMA to DT\n"); + dma_async_device_unregister(&zdev->common); + goto free_chan_resources; + } + + dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n"); + + return 0; + +free_chan_resources: + zynqmp_dma_chan_remove(zdev->chan); + return ret; +} + +/** + * zynqmp_dma_remove - Driver remove function + * @pdev: Pointer to the platform_device structure + * + * Return: Always '0' + */ +static int zynqmp_dma_remove(struct platform_device *pdev) +{ + struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&zdev->common); + + zynqmp_dma_chan_remove(zdev->chan); + + return 0; +} + +static const struct of_device_id zynqmp_dma_of_match[] = { + { .compatible = "xlnx,zynqmp-dma-1.0", }, + {} +}; +MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match); + +static struct platform_driver zynqmp_dma_driver = { + .driver = { + .name = "xilinx-zynqmp-dma", + .of_match_table = zynqmp_dma_of_match, + }, + .probe = zynqmp_dma_probe, + .remove = zynqmp_dma_remove, +}; + +module_platform_driver(zynqmp_dma_driver); + +MODULE_AUTHOR("Xilinx, Inc."); +MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver"); |