summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/cache-v6.S
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-10-31 16:52:16 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-02-15 15:22:25 +0000
commit2ffe2da3e71652d4f4cae19539b5c78c2a239136 (patch)
tree1b69404360a47369c858e54643bab6836015ddbd /arch/arm/mm/cache-v6.S
parent702b94bff3c50542a6e4ab9a4f4cef093262fe65 (diff)
downloadlinux-2ffe2da3e71652d4f4cae19539b5c78c2a239136.tar.gz
linux-2ffe2da3e71652d4f4cae19539b5c78c2a239136.tar.bz2
linux-2ffe2da3e71652d4f4cae19539b5c78c2a239136.zip
ARM: dma-mapping: fix for speculative prefetching
ARMv6 and ARMv7 CPUs can perform speculative prefetching, which makes DMA cache coherency handling slightly more interesting. Rather than being able to rely upon the CPU not accessing the DMA buffer until DMA has completed, we now must expect that the cache could be loaded with possibly stale data from the DMA buffer. Where DMA involves data being transferred to the device, we clean the cache before handing it over for DMA, otherwise we invalidate the buffer to get rid of potential writebacks. On DMA Completion, if data was transferred from the device, we invalidate the buffer to get rid of any stale speculative prefetches. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Tested-By: Santosh Shilimkar <santosh.shilimkar@ti.com>
Diffstat (limited to 'arch/arm/mm/cache-v6.S')
-rw-r--r--arch/arm/mm/cache-v6.S10
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index a11934e53fbd..9d89c67a1cc3 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -271,10 +271,9 @@ ENTRY(v6_dma_flush_range)
*/
ENTRY(v6_dma_map_area)
add r1, r1, r0
- cmp r2, #DMA_TO_DEVICE
- beq v6_dma_clean_range
- bcs v6_dma_inv_range
- b v6_dma_flush_range
+ teq r2, #DMA_FROM_DEVICE
+ beq v6_dma_inv_range
+ b v6_dma_clean_range
ENDPROC(v6_dma_map_area)
/*
@@ -284,6 +283,9 @@ ENDPROC(v6_dma_map_area)
* - dir - DMA direction
*/
ENTRY(v6_dma_unmap_area)
+ add r1, r1, r0
+ teq r2, #DMA_TO_DEVICE
+ bne v6_dma_inv_range
mov pc, lr
ENDPROC(v6_dma_unmap_area)