diff options
author | Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> | 2013-10-18 19:35:33 +0200 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2013-11-14 11:04:38 -0800 |
commit | 0776ae7b89782124ddd72eafe0b1e0fdcdabe32e (patch) | |
tree | f16e917b66a8a60a7341937a40021d683f3e27f0 /crypto/async_tx | |
parent | 54f8d501e842879143e867e70996574a54d1e130 (diff) | |
download | linux-stable-0776ae7b89782124ddd72eafe0b1e0fdcdabe32e.tar.gz linux-stable-0776ae7b89782124ddd72eafe0b1e0fdcdabe32e.tar.bz2 linux-stable-0776ae7b89782124ddd72eafe0b1e0fdcdabe32e.zip |
dmaengine: remove DMA unmap flags
Remove no longer needed DMA unmap flags:
- DMA_COMPL_SKIP_SRC_UNMAP
- DMA_COMPL_SKIP_DEST_UNMAP
- DMA_COMPL_SRC_UNMAP_SINGLE
- DMA_COMPL_DEST_UNMAP_SINGLE
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Acked-by: Jon Mason <jon.mason@intel.com>
Acked-by: Mark Brown <broonie@linaro.org>
[djbw: clean up straggling skip unmap flags in ntb]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto/async_tx')
-rw-r--r-- | crypto/async_tx/async_memcpy.c | 3 | ||||
-rw-r--r-- | crypto/async_tx/async_pq.c | 1 | ||||
-rw-r--r-- | crypto/async_tx/async_raid6_recov.c | 8 | ||||
-rw-r--r-- | crypto/async_tx/async_xor.c | 6 |
4 files changed, 5 insertions, 13 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 72750214f779..f8c0b8dbeb75 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -56,8 +56,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { - unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP; + unsigned long dma_prep_flags = 0; if (submit->cb_fn) dma_prep_flags |= DMA_PREP_INTERRUPT; diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 4126b56fbc01..d05327caf69d 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -62,7 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan, dma_addr_t dma_dest[2]; int src_off = 0; - dma_flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index a3a72a784421..934a84981495 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -47,9 +47,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, struct device *dev = dma->dev; dma_addr_t pq[2]; struct dma_async_tx_descriptor *tx; - enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP | - DMA_PREP_PQ_DISABLE_P; + enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; @@ -113,9 +111,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, dma_addr_t dma_dest[2]; struct device *dev = dma->dev; struct dma_async_tx_descriptor *tx; - enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP | - DMA_PREP_PQ_DISABLE_P; + enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index d2cc77d501c7..3c562f5a60bb 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -41,7 +41,7 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, dma_async_tx_callback cb_fn_orig = submit->cb_fn; void *cb_param_orig = submit->cb_param; enum async_tx_flags flags_orig = submit->flags; - enum dma_ctrl_flags dma_flags; + enum dma_ctrl_flags dma_flags = 0; int src_cnt = unmap->to_cnt; int xor_src_cnt; dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; @@ -55,7 +55,6 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, /* if we are submitting additional xors, leave the chain open * and clear the callback parameters */ - dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; if (src_cnt > xor_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; submit->flags |= ASYNC_TX_FENCE; @@ -284,8 +283,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, if (unmap && src_cnt <= device->max_xor && is_dma_xor_aligned(device, offset, 0, len)) { - unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP; + unsigned long dma_prep_flags = 0; int i; pr_debug("%s: (async) len: %zu\n", __func__, len); |