summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2024-04-29 14:28:48 +0200
committerAlexander Gordeev <agordeev@linux.ibm.com>2024-05-14 13:37:07 +0200
commit62b672c4ba90e726cc39b5c3d6dffd1ca817e143 (patch)
tree897868b63b1b6ae04a48a9f002bf25e830fb1056
parentbe72ea09c1a5273abf8c6c52ef53e36c701cbf6a (diff)
downloadlinux-stable-62b672c4ba90e726cc39b5c3d6dffd1ca817e143.tar.gz
linux-stable-62b672c4ba90e726cc39b5c3d6dffd1ca817e143.tar.bz2
linux-stable-62b672c4ba90e726cc39b5c3d6dffd1ca817e143.zip
s390/stackstrace: Detect vdso stack frames
Clear the backchain of the extra stack frame added by the vdso user wrapper code. This allows the user stack walker to detect and skip the non-standard stack frame. Without this an incorrect instruction pointer would be added to stack traces, and stack frame walking would be continued with a more or less random back chain. Fixes: aa44433ac4ee ("s390: add USER_STACKTRACE support") Reviewed-by: Jens Remus <jremus@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
-rw-r--r--arch/s390/include/asm/processor.h1
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/stacktrace.c28
-rw-r--r--arch/s390/kernel/vdso.c13
-rw-r--r--arch/s390/kernel/vdso64/vdso_user_wrapper.S1
5 files changed, 36 insertions, 8 deletions
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index db9982f0e8cd..bbbdc5abe2b2 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -98,6 +98,7 @@ void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void);
+unsigned long vdso_text_size(void);
unsigned long vdso_size(void);
/*
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 28017c418442..2f65bca2f3f1 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -66,6 +66,7 @@ int main(void)
OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
BLANK();
+ OFFSET(__SFUSER_BACKCHAIN, stack_frame_user, back_chain);
DEFINE(STACK_FRAME_USER_OVERHEAD, sizeof(struct stack_frame_user));
OFFSET(__SFVDSO_RETURN_ADDRESS, stack_frame_vdso_wrapper, return_address);
DEFINE(STACK_FRAME_VDSO_OVERHEAD, sizeof(struct stack_frame_vdso_wrapper));
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index b4485b0c7f06..640363b2a105 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -92,10 +92,16 @@ static inline bool ip_invalid(unsigned long ip)
return false;
}
+static inline bool ip_within_vdso(unsigned long ip)
+{
+ return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
+}
+
void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
struct perf_callchain_entry_ctx *entry,
const struct pt_regs *regs, bool perf)
{
+ struct stack_frame_vdso_wrapper __user *sf_vdso;
struct stack_frame_user __user *sf;
unsigned long ip, sp;
bool first = true;
@@ -112,11 +118,25 @@ void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *coo
while (1) {
if (__get_user(sp, &sf->back_chain))
break;
+ /*
+ * VDSO entry code has a non-standard stack frame layout.
+ * See VDSO user wrapper code for details.
+ */
+ if (!sp && ip_within_vdso(ip)) {
+ sf_vdso = (void __user *)sf;
+ if (__get_user(ip, &sf_vdso->return_address))
+ break;
+ sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
+ sf = (void __user *)sp;
+ if (__get_user(sp, &sf->back_chain))
+ break;
+ } else {
+ sf = (void __user *)sp;
+ if (__get_user(ip, &sf->gprs[8]))
+ break;
+ }
/* Sanity check: ABI requires SP to be 8 byte aligned. */
- if (!sp || sp & 0x7)
- break;
- sf = (void __user *)sp;
- if (__get_user(ip, &sf->gprs[8]))
+ if (sp & 0x7)
break;
if (ip_invalid(ip)) {
/*
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index a45b3a4c91db..2f967ac2b8e3 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -210,17 +210,22 @@ static unsigned long vdso_addr(unsigned long start, unsigned long len)
return addr;
}
-unsigned long vdso_size(void)
+unsigned long vdso_text_size(void)
{
- unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
+ unsigned long size;
if (is_compat_task())
- size += vdso32_end - vdso32_start;
+ size = vdso32_end - vdso32_start;
else
- size += vdso64_end - vdso64_start;
+ size = vdso64_end - vdso64_start;
return PAGE_ALIGN(size);
}
+unsigned long vdso_size(void)
+{
+ return vdso_text_size() + VVAR_NR_PAGES * PAGE_SIZE;
+}
+
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
unsigned long addr = VDSO_BASE;
diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
index deee8ca9cdbf..e26e68675c08 100644
--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
@@ -23,6 +23,7 @@ __kernel_\func:
CFI_VAL_OFFSET 15,-STACK_FRAME_USER_OVERHEAD
stg %r14,__SFVDSO_RETURN_ADDRESS(%r15)
CFI_REL_OFFSET 14,__SFVDSO_RETURN_ADDRESS
+ xc __SFUSER_BACKCHAIN(8,%r15),__SFUSER_BACKCHAIN(%r15)
brasl %r14,__s390_vdso_\func
lg %r14,__SFVDSO_RETURN_ADDRESS(%r15)
CFI_RESTORE 14