summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-25 13:54:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-25 13:54:37 -0700
commit77d431641e2b402fe98af3540e8fb1c77bf92c25 (patch)
treee12e03874dab30dc42247fa130dcd09d719a9bb1 /arch
parent55a7d4b85ca1f723d26b8956e8faeff730d0d240 (diff)
parentf01cae4e1a9bc845ee125cdb763ccf18298d3fdc (diff)
downloadlinux-stable-77d431641e2b402fe98af3540e8fb1c77bf92c25.tar.gz
linux-stable-77d431641e2b402fe98af3540e8fb1c77bf92c25.tar.bz2
linux-stable-77d431641e2b402fe98af3540e8fb1c77bf92c25.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc fixes from David Miller: "Sparc perf stack traversal fixes from David Ahern" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: sparc64: perf: Use UREG_FP rather than UREG_I6 sparc64: perf: Add sanity checking on addresses in user stack sparc64: Convert BUG_ON to warning sparc: perf: Disable pagefaults while walking userspace stacks
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc/include/asm/uaccess_64.h22
-rw-r--r--arch/sparc/kernel/perf_event.c24
-rw-r--r--arch/sparc/mm/fault_64.c5
3 files changed, 46 insertions, 5 deletions
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index a35194b7dba0..ea6e9a20f3ff 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -49,6 +49,28 @@ do { \
__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
} while(0)
+/*
+ * Test whether a block of memory is a valid user space address.
+ * Returns 0 if the range is valid, nonzero otherwise.
+ */
+static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
+{
+ if (__builtin_constant_p(size))
+ return addr > limit - size;
+
+ addr += size;
+ if (addr < size)
+ return true;
+
+ return addr > limit;
+}
+
+#define __range_not_ok(addr, size, limit) \
+({ \
+ __chk_user_ptr(addr); \
+ __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
+})
+
static inline int __access_ok(const void __user * addr, unsigned long size)
{
return 1;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 59cf917a77b5..689db65f8529 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -21,7 +21,7 @@
#include <asm/stacktrace.h>
#include <asm/cpudata.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <asm/nmi.h>
#include <asm/pcr.h>
@@ -1741,18 +1741,31 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
+static inline int
+valid_user_frame(const void __user *fp, unsigned long size)
+{
+ /* addresses should be at least 4-byte aligned */
+ if (((unsigned long) fp) & 3)
+ return 0;
+
+ return (__range_not_ok(fp, size, TASK_SIZE) == 0);
+}
+
static void perf_callchain_user_64(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long ufp;
- ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
+ ufp = regs->u_regs[UREG_FP] + STACK_BIAS;
do {
struct sparc_stackf __user *usf;
struct sparc_stackf sf;
unsigned long pc;
usf = (struct sparc_stackf __user *)ufp;
+ if (!valid_user_frame(usf, sizeof(sf)))
+ break;
+
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
@@ -1767,7 +1780,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
{
unsigned long ufp;
- ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
+ ufp = regs->u_regs[UREG_FP] & 0xffffffffUL;
do {
unsigned long pc;
@@ -1803,8 +1816,13 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
return;
flushw_user();
+
+ pagefault_disable();
+
if (test_thread_flag(TIF_32BIT))
perf_callchain_user_32(entry, regs);
else
perf_callchain_user_64(entry, regs);
+
+ pagefault_enable();
}
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index e9268ea1a68d..dbabe5713a15 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -413,8 +413,9 @@ good_area:
* that here.
*/
if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
- BUG_ON(address != regs->tpc);
- BUG_ON(regs->tstate & TSTATE_PRIV);
+ WARN(address != regs->tpc,
+ "address (%lx) != regs->tpc (%lx)\n", address, regs->tpc);
+ WARN_ON(regs->tstate & TSTATE_PRIV);
goto bad_area;
}