summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2017-08-09 15:37:49 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2017-08-09 15:37:49 +0100
commit0553896787353e2526078064ff1cf21ff7bc34ce (patch)
treebf9f6490d4a74991653da4054cfc4c1b7c647074 /arch/arm64
parent739586951b8abe381a98797a5e27a0a9336333d6 (diff)
parent31e43ad3b74a5d7b282023b72f25fc677c14c727 (diff)
downloadlinux-stable-0553896787353e2526078064ff1cf21ff7bc34ce.tar.gz
linux-stable-0553896787353e2526078064ff1cf21ff7bc34ce.tar.bz2
linux-stable-0553896787353e2526078064ff1cf21ff7bc34ce.zip
Merge branch 'arm64/exception-stack' of git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux into for-next/core
* 'arm64/exception-stack' of git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux: arm64: unwind: remove sp from struct stackframe arm64: unwind: reference pt_regs via embedded stack frame arm64: unwind: disregard frame.sp when validating frame pointer arm64: unwind: avoid percpu indirection for irq stack arm64: move non-entry code out of .entry.text arm64: consistently use bl for C exception entry arm64: Add ASM_BUG()
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/asm-bug.h54
-rw-r--r--arch/arm64/include/asm/assembler.h11
-rw-r--r--arch/arm64/include/asm/bug.h35
-rw-r--r--arch/arm64/include/asm/irq.h39
-rw-r--r--arch/arm64/include/asm/ptrace.h1
-rw-r--r--arch/arm64/include/asm/stacktrace.h1
-rw-r--r--arch/arm64/include/asm/traps.h5
-rw-r--r--arch/arm64/kernel/asm-offsets.c1
-rw-r--r--arch/arm64/kernel/entry.S122
-rw-r--r--arch/arm64/kernel/head.S4
-rw-r--r--arch/arm64/kernel/perf_callchain.c1
-rw-r--r--arch/arm64/kernel/process.c5
-rw-r--r--arch/arm64/kernel/ptrace.c2
-rw-r--r--arch/arm64/kernel/return_address.c1
-rw-r--r--arch/arm64/kernel/stacktrace.c57
-rw-r--r--arch/arm64/kernel/time.c1
-rw-r--r--arch/arm64/kernel/traps.c34
17 files changed, 176 insertions, 198 deletions
diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h
new file mode 100644
index 000000000000..636e755bcdca
--- /dev/null
+++ b/arch/arm64/include/asm/asm-bug.h
@@ -0,0 +1,54 @@
+#ifndef __ASM_ASM_BUG_H
+/*
+ * Copyright (C) 2017 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#define __ASM_ASM_BUG_H
+
+#include <asm/brk-imm.h>
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
+#define __BUGVERBOSE_LOCATION(file, line) \
+ .pushsection .rodata.str,"aMS",@progbits,1; \
+ 2: .string file; \
+ .popsection; \
+ \
+ .long 2b - 0b; \
+ .short line;
+#else
+#define _BUGVERBOSE_LOCATION(file, line)
+#endif
+
+#ifdef CONFIG_GENERIC_BUG
+
+#define __BUG_ENTRY(flags) \
+ .pushsection __bug_table,"aw"; \
+ .align 2; \
+ 0: .long 1f - 0b; \
+_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
+ .short flags; \
+ .popsection; \
+ 1:
+#else
+#define __BUG_ENTRY(flags)
+#endif
+
+#define ASM_BUG_FLAGS(flags) \
+ __BUG_ENTRY(flags) \
+ brk BUG_BRK_IMM
+
+#define ASM_BUG() ASM_BUG_FLAGS(0)
+
+#endif /* __ASM_ASM_BUG_H */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 5d8903c45031..1ef56837cc6f 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -409,6 +409,17 @@ alternative_endif
.size __pi_##x, . - x; \
ENDPROC(x)
+/*
+ * Annotate a function as being unsuitable for kprobes.
+ */
+#ifdef CONFIG_KPROBES
+#define NOKPROBE(x) \
+ .pushsection "_kprobe_blacklist", "aw"; \
+ .quad x; \
+ .popsection;
+#else
+#define NOKPROBE(x)
+#endif
/*
* Emit a 64-bit absolute little endian symbol reference in a way that
* ensures that it will be resolved at build time, even when building a
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
index a02a57186f56..d7dc43752705 100644
--- a/arch/arm64/include/asm/bug.h
+++ b/arch/arm64/include/asm/bug.h
@@ -18,41 +18,12 @@
#ifndef _ARCH_ARM64_ASM_BUG_H
#define _ARCH_ARM64_ASM_BUG_H
-#include <asm/brk-imm.h>
+#include <linux/stringify.h>
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
-#define __BUGVERBOSE_LOCATION(file, line) \
- ".pushsection .rodata.str,\"aMS\",@progbits,1\n" \
- "2: .string \"" file "\"\n\t" \
- ".popsection\n\t" \
- \
- ".long 2b - 0b\n\t" \
- ".short " #line "\n\t"
-#else
-#define _BUGVERBOSE_LOCATION(file, line)
-#endif
-
-#ifdef CONFIG_GENERIC_BUG
-
-#define __BUG_ENTRY(flags) \
- ".pushsection __bug_table,\"aw\"\n\t" \
- ".align 2\n\t" \
- "0: .long 1f - 0b\n\t" \
-_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
- ".short " #flags "\n\t" \
- ".popsection\n" \
- "1: "
-#else
-#define __BUG_ENTRY(flags) ""
-#endif
+#include <asm/asm-bug.h>
#define __BUG_FLAGS(flags) \
- asm volatile ( \
- __BUG_ENTRY(flags) \
- "brk %[imm]" :: [imm] "i" (BUG_BRK_IMM) \
- );
-
+ asm volatile (__stringify(ASM_BUG_FLAGS(flags)));
#define BUG() do { \
__BUG_FLAGS(0); \
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index b77197d941fc..8ba89c4ca183 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -7,6 +7,7 @@
#ifndef __ASSEMBLER__
#include <linux/percpu.h>
+#include <linux/sched/task_stack.h>
#include <asm-generic/irq.h>
#include <asm/thread_info.h>
@@ -15,31 +16,6 @@ struct pt_regs;
DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
-/*
- * The highest address on the stack, and the first to be used. Used to
- * find the dummy-stack frame put down by el?_irq() in entry.S, which
- * is structured as follows:
- *
- * ------------
- * | | <- irq_stack_ptr
- * top ------------
- * | x19 | <- irq_stack_ptr - 0x08
- * ------------
- * | x29 | <- irq_stack_ptr - 0x10
- * ------------
- *
- * where x19 holds a copy of the task stack pointer where the struct pt_regs
- * from kernel_entry can be found.
- *
- */
-#define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP)
-
-/*
- * The offset from irq_stack_ptr where entry.S will store the original
- * stack pointer. Used by unwind_frame() and dump_backtrace().
- */
-#define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08)))
-
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
static inline int nr_legacy_irqs(void)
@@ -47,14 +23,21 @@ static inline int nr_legacy_irqs(void)
return 0;
}
-static inline bool on_irq_stack(unsigned long sp, int cpu)
+static inline bool on_irq_stack(unsigned long sp)
{
- /* variable names the same as kernel/stacktrace.c */
- unsigned long low = (unsigned long)per_cpu(irq_stack, cpu);
+ unsigned long low = (unsigned long)raw_cpu_ptr(irq_stack);
unsigned long high = low + IRQ_STACK_START_SP;
return (low <= sp && sp <= high);
}
+static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp)
+{
+ unsigned long low = (unsigned long)task_stack_page(tsk);
+ unsigned long high = low + THREAD_SIZE;
+
+ return (low <= sp && sp < high);
+}
+
#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 4f64373b84fd..6069d66e0bc2 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -137,6 +137,7 @@ struct pt_regs {
u64 orig_addr_limit;
u64 unused; // maintain 16 byte alignment
+ u64 stackframe[2];
};
static inline bool in_syscall(struct pt_regs const *regs)
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 5b6eafccc5d8..3bebab378c72 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -20,7 +20,6 @@ struct task_struct;
struct stackframe {
unsigned long fp;
- unsigned long sp;
unsigned long pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
unsigned int graph;
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 02e9035b0685..41361684580d 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -60,4 +60,9 @@ static inline int in_exception_text(unsigned long ptr)
return in ? : __in_irqentry_text(ptr);
}
+static inline int in_entry_text(unsigned long ptr)
+{
+ return ptr >= (unsigned long)&__entry_text_start &&
+ ptr < (unsigned long)&__entry_text_end;
+}
#endif
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index b3bb7ef97bc8..71bf088f1e4b 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -75,6 +75,7 @@ int main(void)
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
+ DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index cace76d17535..0b8461158c56 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -111,6 +111,18 @@
mrs x23, spsr_el1
stp lr, x21, [sp, #S_LR]
+ /*
+ * In order to be able to dump the contents of struct pt_regs at the
+ * time the exception was taken (in case we attempt to walk the call
+ * stack later), chain it together with the stack frames.
+ */
+ .if \el == 0
+ stp xzr, xzr, [sp, #S_STACKFRAME]
+ .else
+ stp x29, x22, [sp, #S_STACKFRAME]
+ .endif
+ add x29, sp, #S_STACKFRAME
+
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Set the TTBR0 PAN bit in SPSR. When the exception is taken from
@@ -263,14 +275,6 @@ alternative_else_nop_endif
/* switch to the irq stack */
mov sp, x26
-
- /*
- * Add a dummy stack frame, this non-standard format is fixed up
- * by unwind_frame()
- */
- stp x29, x19, [sp, #-16]!
- mov x29, sp
-
9998:
.endm
@@ -350,7 +354,8 @@ END(vectors)
mov x0, sp
mov x1, #\reason
mrs x2, esr_el1
- b bad_mode
+ bl bad_mode
+ ASM_BUG()
.endm
el0_sync_invalid:
@@ -447,14 +452,16 @@ el1_sp_pc:
mrs x0, far_el1
enable_dbg
mov x2, sp
- b do_sp_pc_abort
+ bl do_sp_pc_abort
+ ASM_BUG()
el1_undef:
/*
* Undefined instruction
*/
enable_dbg
mov x0, sp
- b do_undefinstr
+ bl do_undefinstr
+ ASM_BUG()
el1_dbg:
/*
* Debug exception handling
@@ -472,7 +479,8 @@ el1_inv:
mov x0, sp
mov x2, x1
mov x1, #BAD_SYNC
- b bad_mode
+ bl bad_mode
+ ASM_BUG()
ENDPROC(el1_sync)
.align 6
@@ -706,38 +714,6 @@ el0_irq_naked:
ENDPROC(el0_irq)
/*
- * Register switch for AArch64. The callee-saved registers need to be saved
- * and restored. On entry:
- * x0 = previous task_struct (must be preserved across the switch)
- * x1 = next task_struct
- * Previous and next are guaranteed not to be the same.
- *
- */
-ENTRY(cpu_switch_to)
- mov x10, #THREAD_CPU_CONTEXT
- add x8, x0, x10
- mov x9, sp
- stp x19, x20, [x8], #16 // store callee-saved registers
- stp x21, x22, [x8], #16
- stp x23, x24, [x8], #16
- stp x25, x26, [x8], #16
- stp x27, x28, [x8], #16
- stp x29, x9, [x8], #16
- str lr, [x8]
- add x8, x1, x10
- ldp x19, x20, [x8], #16 // restore callee-saved registers
- ldp x21, x22, [x8], #16
- ldp x23, x24, [x8], #16
- ldp x25, x26, [x8], #16
- ldp x27, x28, [x8], #16
- ldp x29, x9, [x8], #16
- ldr lr, [x8]
- mov sp, x9
- msr sp_el0, x1
- ret
-ENDPROC(cpu_switch_to)
-
-/*
* This is the fast syscall return path. We do as little as possible here,
* and this includes saving x0 back into the kernel stack.
*/
@@ -780,18 +756,6 @@ finish_ret_to_user:
ENDPROC(ret_to_user)
/*
- * This is how we return from a fork.
- */
-ENTRY(ret_from_fork)
- bl schedule_tail
- cbz x19, 1f // not a kernel thread
- mov x0, x20
- blr x19
-1: get_thread_info tsk
- b ret_to_user
-ENDPROC(ret_from_fork)
-
-/*
* SVC handler.
*/
.align 6
@@ -863,3 +827,49 @@ ENTRY(sys_rt_sigreturn_wrapper)
mov x0, sp
b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)
+
+/*
+ * Register switch for AArch64. The callee-saved registers need to be saved
+ * and restored. On entry:
+ * x0 = previous task_struct (must be preserved across the switch)
+ * x1 = next task_struct
+ * Previous and next are guaranteed not to be the same.
+ *
+ */
+ENTRY(cpu_switch_to)
+ mov x10, #THREAD_CPU_CONTEXT
+ add x8, x0, x10
+ mov x9, sp
+ stp x19, x20, [x8], #16 // store callee-saved registers
+ stp x21, x22, [x8], #16
+ stp x23, x24, [x8], #16
+ stp x25, x26, [x8], #16
+ stp x27, x28, [x8], #16
+ stp x29, x9, [x8], #16
+ str lr, [x8]
+ add x8, x1, x10
+ ldp x19, x20, [x8], #16 // restore callee-saved registers
+ ldp x21, x22, [x8], #16
+ ldp x23, x24, [x8], #16
+ ldp x25, x26, [x8], #16
+ ldp x27, x28, [x8], #16
+ ldp x29, x9, [x8], #16
+ ldr lr, [x8]
+ mov sp, x9
+ msr sp_el0, x1
+ ret
+ENDPROC(cpu_switch_to)
+NOKPROBE(cpu_switch_to)
+
+/*
+ * This is how we return from a fork.
+ */
+ENTRY(ret_from_fork)
+ bl schedule_tail
+ cbz x19, 1f // not a kernel thread
+ mov x0, x20
+ blr x19
+1: get_thread_info tsk
+ b ret_to_user
+ENDPROC(ret_from_fork)
+NOKPROBE(ret_from_fork)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 73a0531e0187..d3015172c136 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -362,6 +362,9 @@ __primary_switched:
ret // to __primary_switch()
0:
#endif
+ add sp, sp, #16
+ mov x29, #0
+ mov x30, #0
b start_kernel
ENDPROC(__primary_switched)
@@ -617,6 +620,7 @@ __secondary_switched:
ldr x2, [x0, #CPU_BOOT_TASK]
msr sp_el0, x2
mov x29, #0
+ mov x30, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 713ca824f266..bcafd7dcfe8b 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -162,7 +162,6 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
}
frame.fp = regs->regs[29];
- frame.sp = regs->sp;
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = current->curr_ret_stack;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 659ae8094ed5..85b953dd023a 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -382,15 +382,12 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
frame.fp = thread_saved_fp(p);
- frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = p->curr_ret_stack;
#endif
do {
- if (frame.sp < stack_page ||
- frame.sp >= stack_page + THREAD_SIZE ||
- unwind_frame(p, &frame))
+ if (unwind_frame(p, &frame))
goto out;
if (!in_sched_functions(frame.pc)) {
ret = frame.pc;
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 28619b5b6746..320df70c11e6 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -127,7 +127,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
- on_irq_stack(addr, raw_smp_processor_id());
+ on_irq_stack(addr);
}
/**
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 12a87f2600f2..933adbc0f654 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -42,7 +42,6 @@ void *return_address(unsigned int level)
data.addr = NULL;
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_stack_pointer;
frame.pc = (unsigned long)return_address; /* dummy */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = current->curr_ret_stack;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 09d37d66b630..35588caad9d0 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -42,9 +42,10 @@
*/
int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
{
- unsigned long high, low;
unsigned long fp = frame->fp;
- unsigned long irq_stack_ptr;
+
+ if (fp & 0xf)
+ return -EINVAL;
if (!tsk)
tsk = current;
@@ -53,22 +54,10 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
* Switching between stacks is valid when tracing current and in
* non-preemptible context.
*/
- if (tsk == current && !preemptible())
- irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
- else
- irq_stack_ptr = 0;
-
- low = frame->sp;
- /* irq stacks are not THREAD_SIZE aligned */
- if (on_irq_stack(frame->sp, raw_smp_processor_id()))
- high = irq_stack_ptr;
- else
- high = ALIGN(low, THREAD_SIZE) - 0x20;
-
- if (fp < low || fp > high || fp & 0xf)
+ if (!(tsk == current && !preemptible() && on_irq_stack(fp)) &&
+ !on_task_stack(tsk, fp))
return -EINVAL;
- frame->sp = fp + 0x10;
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
@@ -86,34 +75,13 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/*
- * Check whether we are going to walk through from interrupt stack
- * to task stack.
- * If we reach the end of the stack - and its an interrupt stack,
- * unpack the dummy frame to find the original elr.
- *
- * Check the frame->fp we read from the bottom of the irq_stack,
- * and the original task stack pointer are both in current->stack.
+ * Frames created upon entry from EL0 have NULL FP and PC values, so
+ * don't bother reporting these. Frames created by __noreturn functions
+ * might have a valid FP even if PC is bogus, so only terminate where
+ * both are NULL.
*/
- if (frame->sp == irq_stack_ptr) {
- struct pt_regs *irq_args;
- unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
-
- if (object_is_on_stack((void *)orig_sp) &&
- object_is_on_stack((void *)frame->fp)) {
- frame->sp = orig_sp;
-
- /* orig_sp is the saved pt_regs, find the elr */
- irq_args = (struct pt_regs *)orig_sp;
- frame->pc = irq_args->pc;
- } else {
- /*
- * This frame has a non-standard format, and we
- * didn't fix it, because the data looked wrong.
- * Refuse to output this frame.
- */
- return -EINVAL;
- }
- }
+ if (!frame->fp && !frame->pc)
+ return -EINVAL;
return 0;
}
@@ -167,7 +135,6 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
data.no_sched_functions = 0;
frame.fp = regs->regs[29];
- frame.sp = regs->sp;
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = current->curr_ret_stack;
@@ -192,12 +159,10 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
if (tsk != current) {
data.no_sched_functions = 1;
frame.fp = thread_saved_fp(tsk);
- frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
} else {
data.no_sched_functions = 0;
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_stack_pointer;
frame.pc = (unsigned long)save_stack_trace_tsk;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index da33c90248e9..a4391280fba9 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -50,7 +50,6 @@ unsigned long profile_pc(struct pt_regs *regs)
return regs->pc;
frame.fp = regs->regs[29];
- frame.sp = regs->sp;
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = -1; /* no task info */
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index ccb9727d67b2..9f023d128c8c 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -143,7 +143,6 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
- unsigned long irq_stack_ptr;
int skip;
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
@@ -154,25 +153,14 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
if (!try_get_task_stack(tsk))
return;
- /*
- * Switching between stacks is valid when tracing current and in
- * non-preemptible context.
- */
- if (tsk == current && !preemptible())
- irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
- else
- irq_stack_ptr = 0;
-
if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_stack_pointer;
frame.pc = (unsigned long)dump_backtrace;
} else {
/*
* task blocked in __switch_to
*/
frame.fp = thread_saved_fp(tsk);
- frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -182,13 +170,12 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
skip = !!regs;
printk("Call trace:\n");
while (1) {
- unsigned long where = frame.pc;
unsigned long stack;
int ret;
/* skip until specified stack frame */
if (!skip) {
- dump_backtrace_entry(where);
+ dump_backtrace_entry(frame.pc);
} else if (frame.fp == regs->regs[29]) {
skip = 0;
/*
@@ -203,20 +190,13 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
ret = unwind_frame(tsk, &frame);
if (ret < 0)
break;
- stack = frame.sp;
- if (in_exception_text(where)) {
- /*
- * If we switched to the irq_stack before calling this
- * exception handler, then the pt_regs will be on the
- * task stack. The easiest way to tell is if the large
- * pt_regs would overlap with the end of the irq_stack.
- */
- if (stack < irq_stack_ptr &&
- (stack + sizeof(struct pt_regs)) > irq_stack_ptr)
- stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
+ if (in_entry_text(frame.pc)) {
+ stack = frame.fp - offsetof(struct pt_regs, stackframe);
- dump_mem("", "Exception stack", stack,
- stack + sizeof(struct pt_regs));
+ if (on_task_stack(tsk, stack) ||
+ (tsk == current && !preemptible() && on_irq_stack(stack)))
+ dump_mem("", "Exception stack", stack,
+ stack + sizeof(struct pt_regs));
}
}