summaryrefslogtreecommitdiffstats
path: root/arch/arm/lib/csumpartialcopygeneric.S
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2005-11-11 21:51:49 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2005-11-11 21:51:49 +0000
commit8adbb3718d6cead304f84f7dd60ad65274df0b15 (patch)
tree5d39d25a7d2a6e6606c6b2ec4ce23d45cb6b4a2a /arch/arm/lib/csumpartialcopygeneric.S
parenta9c4814d8db200052c07d8b68e76c134682c4569 (diff)
downloadlinux-8adbb3718d6cead304f84f7dd60ad65274df0b15.tar.gz
linux-8adbb3718d6cead304f84f7dd60ad65274df0b15.tar.bz2
linux-8adbb3718d6cead304f84f7dd60ad65274df0b15.zip
[ARM] 3152/1: make various assembly local labels actually local (the rest)
Patch from Nicolas Pitre For assembly labels to actually be local they must start with ".L" and not only "." otherwise they still remain visible in the final link and clutter kallsyms needlessly, and possibly make for unclear symbolic backtrace. This patch simply inserts a"L" where appropriate. The code itself is unchanged. Signed-off-by: Nicolas Pitre <nico@cam.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/lib/csumpartialcopygeneric.S')
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S70
1 files changed, 36 insertions, 34 deletions
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index d3a2f4667db4..4a4609c19095 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -22,7 +22,7 @@ dst .req r1
len .req r2
sum .req r3
-.zero: mov r0, sum
+.Lzero: mov r0, sum
load_regs ea
/*
@@ -31,8 +31,9 @@ sum .req r3
* the length. Note that the source pointer hasn't been
* aligned yet.
*/
-.dst_unaligned: tst dst, #1
- beq .dst_16bit
+.Ldst_unaligned:
+ tst dst, #1
+ beq .Ldst_16bit
load1b ip
sub len, len, #1
@@ -41,7 +42,7 @@ sum .req r3
tst dst, #2
moveq pc, lr @ dst is now 32bit aligned
-.dst_16bit: load2b r8, ip
+.Ldst_16bit: load2b r8, ip
sub len, len, #2
adcs sum, sum, r8, put_byte_0
strb r8, [dst], #1
@@ -53,12 +54,12 @@ sum .req r3
* Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0
*/
-.less8: teq len, #0 @ check for zero count
- beq .zero
+.Lless8: teq len, #0 @ check for zero count
+ beq .Lzero
/* we must have at least one byte. */
tst dst, #1 @ dst 16-bit aligned
- beq .less8_aligned
+ beq .Lless8_aligned
/* Align dst */
load1b ip
@@ -66,7 +67,7 @@ sum .req r3
adcs sum, sum, ip, put_byte_1 @ update checksum
strb ip, [dst], #1
tst len, #6
- beq .less8_byteonly
+ beq .Lless8_byteonly
1: load2b r8, ip
sub len, len, #2
@@ -74,15 +75,16 @@ sum .req r3
strb r8, [dst], #1
adcs sum, sum, ip, put_byte_1
strb ip, [dst], #1
-.less8_aligned: tst len, #6
+.Lless8_aligned:
+ tst len, #6
bne 1b
-.less8_byteonly:
+.Lless8_byteonly:
tst len, #1
- beq .done
+ beq .Ldone
load1b r8
adcs sum, sum, r8, put_byte_0 @ update checksum
strb r8, [dst], #1
- b .done
+ b .Ldone
FN_ENTRY
mov ip, sp
@@ -90,11 +92,11 @@ FN_ENTRY
sub fp, ip, #4
cmp len, #8 @ Ensure that we have at least
- blo .less8 @ 8 bytes to copy.
+ blo .Lless8 @ 8 bytes to copy.
adds sum, sum, #0 @ C = 0
tst dst, #3 @ Test destination alignment
- blne .dst_unaligned @ align destination, return here
+ blne .Ldst_unaligned @ align destination, return here
/*
* Ok, the dst pointer is now 32bit aligned, and we know
@@ -103,7 +105,7 @@ FN_ENTRY
*/
tst src, #3 @ Test source alignment
- bne .src_not_aligned
+ bne .Lsrc_not_aligned
/* Routine for src & dst aligned */
@@ -136,17 +138,17 @@ FN_ENTRY
adcs sum, sum, r4
4: ands len, len, #3
- beq .done
+ beq .Ldone
load1l r4
tst len, #2
mov r5, r4, get_byte_0
- beq .exit
+ beq .Lexit
adcs sum, sum, r4, push #16
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
mov r5, r4, get_byte_2
-.exit: tst len, #1
+.Lexit: tst len, #1
strneb r5, [dst], #1
andne r5, r5, #255
adcnes sum, sum, r5, put_byte_0
@@ -157,20 +159,20 @@ FN_ENTRY
* the inefficient byte manipulations in the
* architecture independent code.
*/
-.done: adc r0, sum, #0
+.Ldone: adc r0, sum, #0
ldr sum, [sp, #0] @ dst
tst sum, #1
movne r0, r0, ror #8
load_regs ea
-.src_not_aligned:
+.Lsrc_not_aligned:
adc sum, sum, #0 @ include C from dst alignment
and ip, src, #3
bic src, src, #3
load1l r5
cmp ip, #2
- beq .src2_aligned
- bhi .src3_aligned
+ beq .Lsrc2_aligned
+ bhi .Lsrc3_aligned
mov r4, r5, pull #8 @ C = 0
bics ip, len, #15
beq 2f
@@ -211,18 +213,18 @@ FN_ENTRY
adcs sum, sum, r4
mov r4, r5, pull #8
4: ands len, len, #3
- beq .done
+ beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
- beq .exit
+ beq .Lexit
adcs sum, sum, r4, push #16
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
mov r5, r4, get_byte_2
- b .exit
+ b .Lexit
-.src2_aligned: mov r4, r5, pull #16
+.Lsrc2_aligned: mov r4, r5, pull #16
adds sum, sum, #0
bics ip, len, #15
beq 2f
@@ -263,20 +265,20 @@ FN_ENTRY
adcs sum, sum, r4
mov r4, r5, pull #16
4: ands len, len, #3
- beq .done
+ beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
- beq .exit
+ beq .Lexit
adcs sum, sum, r4
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
tst len, #1
- beq .done
+ beq .Ldone
load1b r5
- b .exit
+ b .Lexit
-.src3_aligned: mov r4, r5, pull #24
+.Lsrc3_aligned: mov r4, r5, pull #24
adds sum, sum, #0
bics ip, len, #15
beq 2f
@@ -317,10 +319,10 @@ FN_ENTRY
adcs sum, sum, r4
mov r4, r5, pull #24
4: ands len, len, #3
- beq .done
+ beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
- beq .exit
+ beq .Lexit
strb r5, [dst], #1
adcs sum, sum, r4
load1l r4
@@ -328,4 +330,4 @@ FN_ENTRY
strb r5, [dst], #1
adcs sum, sum, r4, push #24
mov r5, r4, get_byte_1
- b .exit
+ b .Lexit