diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2009-02-16 11:42:09 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-02-19 11:27:35 +0000 |
commit | c4c5716e16c4ee971dec446a7e4801fbb8a1066b (patch) | |
tree | a1fb65c5b694bdc0b61bcd06371c2204968eb3b3 /arch | |
parent | 2e1926e7b5d39eb31880152d636e8d8d011888cb (diff) | |
download | linux-stable-c4c5716e16c4ee971dec446a7e4801fbb8a1066b.tar.gz linux-stable-c4c5716e16c4ee971dec446a7e4801fbb8a1066b.tar.bz2 linux-stable-c4c5716e16c4ee971dec446a7e4801fbb8a1066b.zip |
[ARM] 5385/2: unwind: Add unwinding information to exception entry points
This is needed to allow or stop the unwinding at certain points in the
kernel like exception entries.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 19 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 4 |
2 files changed, 23 insertions, 0 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 85040cfeb5e5..d662a2f1fd85 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -20,6 +20,7 @@ #include <asm/vfpmacros.h> #include <mach/entry-macro.S> #include <asm/thread_notify.h> +#include <asm/unwind.h> #include "entry-header.S" @@ -123,6 +124,8 @@ ENDPROC(__und_invalid) #endif .macro svc_entry, stack_hole=0 + UNWIND(.fnstart ) + UNWIND(.save {r0 - pc} ) sub sp, sp, #(S_FRAME_SIZE + \stack_hole) SPFIX( tst sp, #4 ) SPFIX( bicne sp, sp, #4 ) @@ -196,6 +199,7 @@ __dabt_svc: ldr r0, [sp, #S_PSR] msr spsr_cxsf, r0 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr + UNWIND(.fnend ) ENDPROC(__dabt_svc) .align 5 @@ -228,6 +232,7 @@ __irq_svc: bleq trace_hardirqs_on #endif ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr + UNWIND(.fnend ) ENDPROC(__irq_svc) .ltorg @@ -278,6 +283,7 @@ __und_svc: ldr lr, [sp, #S_PSR] @ Get SVC cpsr msr spsr_cxsf, lr ldmia sp, {r0 - pc}^ @ Restore SVC registers + UNWIND(.fnend ) ENDPROC(__und_svc) .align 5 @@ -320,6 +326,7 @@ __pabt_svc: ldr r0, [sp, #S_PSR] msr spsr_cxsf, r0 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr + UNWIND(.fnend ) ENDPROC(__pabt_svc) .align 5 @@ -343,6 +350,8 @@ ENDPROC(__pabt_svc) #endif .macro usr_entry + UNWIND(.fnstart ) + UNWIND(.cantunwind ) @ don't unwind the user space sub sp, sp, #S_FRAME_SIZE stmib sp, {r1 - r12} @@ -420,6 +429,7 @@ __dabt_usr: mov r2, sp adr lr, ret_from_exception b do_DataAbort + UNWIND(.fnend ) ENDPROC(__dabt_usr) .align 5 @@ -450,6 +460,7 @@ __irq_usr: mov why, #0 b ret_to_user + UNWIND(.fnend ) ENDPROC(__irq_usr) .ltorg @@ -484,6 +495,7 @@ __und_usr: #else b __und_usr_unknown #endif + UNWIND(.fnend ) ENDPROC(__und_usr) @ @@ -671,14 +683,18 @@ __pabt_usr: enable_irq @ Enable interrupts mov r1, sp @ regs bl do_PrefetchAbort @ call abort handler + UNWIND(.fnend ) /* fall through */ /* * This is the return code to user mode for abort handlers */ ENTRY(ret_from_exception) + UNWIND(.fnstart ) + UNWIND(.cantunwind ) get_thread_info tsk mov why, #0 b ret_to_user + UNWIND(.fnend ) ENDPROC(__pabt_usr) ENDPROC(ret_from_exception) @@ -688,6 +704,8 @@ ENDPROC(ret_from_exception) * previous and next are guaranteed not to be the same. */ ENTRY(__switch_to) + UNWIND(.fnstart ) + UNWIND(.cantunwind ) add ip, r1, #TI_CPU_SAVE ldr r3, [r2, #TI_TP_VALUE] stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack @@ -717,6 +735,7 @@ ENTRY(__switch_to) bl atomic_notifier_call_chain mov r0, r5 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously + UNWIND(.fnend ) ENDPROC(__switch_to) __INIT diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 49a6ba926c2b..b8c1f1411440 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -11,6 +11,7 @@ #include <asm/unistd.h> #include <asm/ftrace.h> #include <mach/entry-macro.S> +#include <asm/unwind.h> #include "entry-header.S" @@ -22,6 +23,8 @@ * stack. */ ret_fast_syscall: + UNWIND(.fnstart ) + UNWIND(.cantunwind ) disable_irq @ disable interrupts ldr r1, [tsk, #TI_FLAGS] tst r1, #_TIF_WORK_MASK @@ -38,6 +41,7 @@ ret_fast_syscall: mov r0, r0 add sp, sp, #S_FRAME_SIZE - S_PC movs pc, lr @ return & move spsr_svc into cpsr + UNWIND(.fnend ) /* * Ok, we need to do extra processing, enter the slow path. |