summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2015-03-28 21:35:16 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2015-03-28 22:03:40 +1100
commit529d235a0e190ded1d21ccc80a73e625ebcad09b (patch)
treec807f7526f29b79a7ce0b233daf8a1c3030d8dd0 /arch/powerpc/kernel
parentc03e73740d24fbe990291cd9ac2d6ae0d95b975f (diff)
downloadlinux-529d235a0e190ded1d21ccc80a73e625ebcad09b.tar.gz
linux-529d235a0e190ded1d21ccc80a73e625ebcad09b.tar.bz2
linux-529d235a0e190ded1d21ccc80a73e625ebcad09b.zip
powerpc: Add a proper syscall for switching endianness
We currently have a "special" syscall for switching endianness. This is syscall number 0x1ebe, which is handled explicitly in the 64-bit syscall exception entry. That has a few problems, firstly the syscall number is outside of the usual range, which confuses various tools. For example strace doesn't recognise the syscall at all. Secondly it's handled explicitly as a special case in the syscall exception entry, which is complicated enough without it. As a first step toward removing the special syscall, we need to add a regular syscall that implements the same functionality. The logic is simple, it simply toggles the MSR_LE bit in the userspace MSR. This is the same as the special syscall, with the caveat that the special syscall clobbers fewer registers. This version clobbers r9-r12, XER, CTR, and CR0-1,5-7. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/entry_64.S5
-rw-r--r--arch/powerpc/kernel/syscalls.c17
-rw-r--r--arch/powerpc/kernel/systbl.S2
-rw-r--r--arch/powerpc/kernel/systbl_chk.c2
4 files changed, 26 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d180caf2d6de..afbc20019c2e 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -356,6 +356,11 @@ _GLOBAL(ppc64_swapcontext)
bl sys_swapcontext
b .Lsyscall_exit
+_GLOBAL(ppc_switch_endian)
+ bl save_nvgprs
+ bl sys_switch_endian
+ b .Lsyscall_exit
+
_GLOBAL(ret_from_fork)
bl schedule_tail
REST_NVGPRS(r1)
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index b2702e87db0d..5fa92706444b 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -121,3 +121,20 @@ long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
(u64)len_high << 32 | len_low, advice);
}
+
+long sys_switch_endian(void)
+{
+ struct thread_info *ti;
+
+ current->thread.regs->msr ^= MSR_LE;
+
+ /*
+ * Set TIF_RESTOREALL so that r3 isn't clobbered on return to
+ * userspace. That also has the effect of restoring the non-volatile
+ * GPRs, so we saved them on the way in here.
+ */
+ ti = current_thread_info();
+ ti->flags |= _TIF_RESTOREALL;
+
+ return 0;
+}
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 7ab5d434e2ee..4d6b1d3a747f 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -22,6 +22,7 @@
#define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func)
#define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall)
#define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func)
+#define PPC64ONLY(func) .llong DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall)
#define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264)
#else
#define SYSCALL(func) .long sys_##func
@@ -29,6 +30,7 @@
#define PPC_SYS(func) .long ppc_##func
#define OLDSYS(func) .long sys_##func
#define SYS32ONLY(func) .long sys_##func
+#define PPC64ONLY(func) .long sys_ni_syscall
#define SYSX(f, f3264, f32) .long f32
#endif
#define SYSCALL_SPU(func) SYSCALL(func)
diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c
index 238aa63ced8f..2384129f5893 100644
--- a/arch/powerpc/kernel/systbl_chk.c
+++ b/arch/powerpc/kernel/systbl_chk.c
@@ -21,9 +21,11 @@
#ifdef CONFIG_PPC64
#define OLDSYS(func) -1
#define SYS32ONLY(func) -1
+#define PPC64ONLY(func) __NR_##func
#else
#define OLDSYS(func) __NR_old##func
#define SYS32ONLY(func) __NR_##func
+#define PPC64ONLY(func) -1
#endif
#define SYSX(f, f3264, f32) -1