summaryrefslogtreecommitdiffstats
path: root/src/arch/arm/armv7/cpu.S
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/arm/armv7/cpu.S')
-rw-r--r--src/arch/arm/armv7/cpu.S72
1 files changed, 45 insertions, 27 deletions
diff --git a/src/arch/arm/armv7/cpu.S b/src/arch/arm/armv7/cpu.S
index 42e2354d44ec..29a19e76df83 100644
--- a/src/arch/arm/armv7/cpu.S
+++ b/src/arch/arm/armv7/cpu.S
@@ -30,25 +30,39 @@
* SUCH DAMAGE.
*/
+#include <arch/asm.h>
+
/*
- * These work very hard to not push registers onto the stack and to limit themselves
- * to use r0-r3 and ip.
+ * Dcache invalidations by set/way work by passing a [way:sbz:set:sbz:level:0]
+ * bitfield in a register to the appropriate MCR instruction. This algorithm
+ * works by initializing a bitfield with the highest-numbered set and way, and
+ * generating a "set decrement" and a "way decrement". The former just contains
+ * the LSB of the set field, but the latter contains the LSB of the way field
+ * minus the highest valid set field... such that when you subtract it from a
+ * [way:0:level] field you end up with a [way - 1:highest_set:level] field
+ * through the magic of double subtraction. It's quite ingenius, really.
+ * Takes care to only use r0-r3 and ip so it's pefectly ABI-compatible without
+ * needing to write to memory.
*/
-/* * LINTSTUB: void armv7_dcache_wbinv_all(void); */
-ENTRY_NP(armv7_dcache_wbinv_all)
+.macro dcache_apply_all crm
+ dsb
+ mov r3, #-2 @ initialize level so that we start at 0
+
+1: @next_level
+ add r3, r3, #2 @ increment level
+
mrc p15, 1, r0, c0, c0, 1 @ read CLIDR
- ands r3, r0, #0x07000000
- beq .Ldone_wbinv
- lsr r3, r3, #23 @ left align loc (low 4 bits)
+ and ip, r0, #0x07000000 @ narrow to LoC
+ lsr ip, ip, #23 @ left align LoC (low 4 bits)
+ cmp r3, ip @ compare
+ bge 3f @done @ else fall through (r0 == CLIDR)
- mov r1, #0
-.Lstart_wbinv:
- add r2, r3, r3, lsr #1 @ r2 = level * 3 / 2
+ add r2, r3, r3, lsr #1 @ r2 = (level << 1) * 3 / 2
mov r1, r0, lsr r2 @ r1 = cache type
bfc r1, #3, #28
cmp r1, #2 @ is it data or i&d?
- blt .Lnext_level_wbinv @ nope, skip level
+ blt 1b @next_level @ nope, skip level
mcr p15, 2, r3, c0, c0, 0 @ select cache level
isb
@@ -65,7 +79,7 @@ ENTRY_NP(armv7_dcache_wbinv_all)
ubfx ip, r0, #3, #10 @ get numways - 1 from [to be discarded] CCSIDR
clz r2, ip @ number of bits to MSB of way
lsl ip, ip, r2 @ shift by that into way position
- mov r0, #1 @
+ mov r0, #1
lsl r2, r0, r2 @ r2 now contains the way decr
mov r0, r3 @ get sets/level (no way yet)
orr r3, r3, ip @ merge way into way/set/level
@@ -73,27 +87,31 @@ ENTRY_NP(armv7_dcache_wbinv_all)
sub r2, r2, r0 @ subtract from way decr
/* r3 = ways/sets/level, r2 = way decr, r1 = set decr, r0 and ip are free */
-1: mcr p15, 0, r3, c7, c14, 2 @ writeback and invalidate line
+2: mcr p15, 0, r3, c7, \crm, 2 @ writeback and/or invalidate line
cmp r3, #15 @ are we done with this level (way/set == 0)
- bls .Lnext_level_wbinv @ yes, go to next level
- lsl r0, r3, #10 @ clear way bits leaving only set/level bits
- lsr r0, r0, #4 @ clear level bits leaving only set bits
+ bls 1b @next_level @ yes, go to next level
+ lsr r0, r3, #4 @ clear level bits leaving only way/set bits
+ lsls r0, r0, #14 @ clear way bits leaving only set bits
subne r3, r3, r1 @ non-zero?, decrement set #
subeq r3, r3, r2 @ zero?, decrement way # and restore set count
- b 1b
+ b 2b
-.Lnext_level_wbinv:
- mrc p15, 1, r0, c0, c0, 1 @ read CLIDR
- and ip, r0, #0x07000000 @ narrow to LoC
- lsr ip, ip, #23 @ left align LoC (low 4 bits)
- add r3, r3, #2 @ go to next level
- cmp r3, ip @ compare
- blt .Lstart_wbinv @ not done, next level (r0 == CLIDR)
-
-.Ldone_wbinv:
+3: @done
mov r0, #0 @ default back to cache level 0
mcr p15, 2, r0, c0, c0, 0 @ select cache level
dsb
isb
bx lr
-END(armv7_dcache_wbinv_all)
+.endm
+
+ENTRY(dcache_invalidate_all)
+ dcache_apply_all crm=c6
+ENDPROC(dcache_invalidate_all)
+
+ENTRY(dcache_clean_all)
+ dcache_apply_all crm=c10
+ENDPROC(dcache_clean_all)
+
+ENTRY(dcache_clean_invalidate_all)
+ dcache_apply_all crm=c14
+ENDPROC(dcache_clean_invalidate_all)