summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorHanna Hawa <hannah@marvell.com>2018-07-17 13:30:02 +0300
committerVinod Koul <vkoul@kernel.org>2018-07-20 15:01:58 +0530
commitc3a272c7b0c8995bab2116436b03e0e44b480c73 (patch)
tree73bff070872fb05284309989d2783f8a76acdc31 /drivers/dma
parent5a80aff92ad28c5e3045b542576c1d08260606db (diff)
downloadlinux-c3a272c7b0c8995bab2116436b03e0e44b480c73.tar.gz
linux-c3a272c7b0c8995bab2116436b03e0e44b480c73.tar.bz2
linux-c3a272c7b0c8995bab2116436b03e0e44b480c73.zip
dmaengine: mv_xor_v2: move unmap to before callback
Completion callback should happen after dma_descriptor_unmap() has happened. This allow the cache invalidate to happen and ensure that the data accessed by the upper layer is in memory that was from DMA rather than stale data. On some architecture this is done by the hardware, however we should make the code consistent to not cause confusion. Signed-off-by: Hanna Hawa <hannah@marvell.com> Reviewed-by: Thomas Petazzoni <thomas.petazzoni@bootlin.com> Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/mv_xor_v2.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 14e2a7a2e80b..d41d916f40fa 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -589,10 +589,9 @@ static void mv_xor_v2_tasklet(unsigned long data)
*/
dma_cookie_complete(&next_pending_sw_desc->async_tx);
+ dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
dmaengine_desc_get_callback_invoke(
&next_pending_sw_desc->async_tx, NULL);
-
- dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
}
dma_run_dependencies(&next_pending_sw_desc->async_tx);