summaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-25 10:01:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-25 10:01:34 -0700
commitd710d370c4911e83da5d2bc43d4a2c3b56bd27e7 (patch)
tree9e7a702654feb88e2555c1bf41f71ef4a58b25aa /arch/s390/kernel
parent744465da705f7d8cd893e97738a47b91f3321ce2 (diff)
parentc65f677b62d6180cc174e06f953f7fe860adf6d1 (diff)
downloadlinux-d710d370c4911e83da5d2bc43d4a2c3b56bd27e7.tar.gz
linux-d710d370c4911e83da5d2bc43d4a2c3b56bd27e7.tar.bz2
linux-d710d370c4911e83da5d2bc43d4a2c3b56bd27e7.zip
Merge tag 's390-5.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Vasily Gorbik: - Raise minimum supported machine generation to z10, which comes with various cleanups and code simplifications (usercopy/spectre mitigation/etc). - Rework extables and get rid of anonymous out-of-line fixups. - Page table helpers cleanup. Add set_pXd()/set_pte() helper functions. Covert pte_val()/pXd_val() macros to functions. - Optimize kretprobe handling by avoiding extra kprobe on __kretprobe_trampoline. - Add support for CEX8 crypto cards. - Allow to trigger AP bus rescan via writing to /sys/bus/ap/scans. - Add CONFIG_EXPOLINE_EXTERN option to build the kernel without COMDAT group sections which simplifies kpatch support. - Always use the packed stack layout and extend kernel unwinder tests. - Add sanity checks for ftrace code patching. - Add s390dbf debug log for the vfio_ap device driver. - Various virtual vs physical address confusion fixes. - Various small fixes and improvements all over the code. * tag 's390-5.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (69 commits) s390/test_unwind: add kretprobe tests s390/kprobes: Avoid additional kprobe in kretprobe handling s390: convert ".insn" encoding to instruction names s390: assume stckf is always present s390/nospec: move to single register thunks s390: raise minimum supported machine generation to z10 s390/uaccess: Add copy_from/to_user_key functions s390/nospec: align and size extern thunks s390/nospec: add an option to use thunk-extern s390/nospec: generate single register thunks if possible s390/pci: make zpci_set_irq()/zpci_clear_irq() static s390: remove unused expoline to BC instructions s390/irq: use assignment instead of cast s390/traps: get rid of magic cast for per code s390/traps: get rid of magic cast for program interruption code s390/signal: fix typo in comments s390/asm-offsets: remove unused defines s390/test_unwind: avoid build warning with W=1 s390: remove .fixup section s390/bpf: encode register within extable entry ...
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/asm-offsets.c8
-rw-r--r--arch/s390/kernel/base.S25
-rw-r--r--arch/s390/kernel/cache.c7
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/crash_dump.c58
-rw-r--r--arch/s390/kernel/diag.c1
-rw-r--r--arch/s390/kernel/early.c24
-rw-r--r--arch/s390/kernel/entry.S18
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/ftrace.c65
-rw-r--r--arch/s390/kernel/ftrace.h2
-rw-r--r--arch/s390/kernel/head64.S1
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/kprobes.c43
-rw-r--r--arch/s390/kernel/lgr.c3
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/mcount.S72
-rw-r--r--arch/s390/kernel/module.c12
-rw-r--r--arch/s390/kernel/nmi.c1
-rw-r--r--arch/s390/kernel/nospec-branch.c31
-rw-r--r--arch/s390/kernel/os_info.c12
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c11
-rw-r--r--arch/s390/kernel/processor.c22
-rw-r--r--arch/s390/kernel/ptrace.c164
-rw-r--r--arch/s390/kernel/relocate_kernel.S3
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c10
-rw-r--r--arch/s390/kernel/stacktrace.c1
-rw-r--r--arch/s390/kernel/sysinfo.c1
-rw-r--r--arch/s390/kernel/text_amode31.S1
-rw-r--r--arch/s390/kernel/topology.c1
-rw-r--r--arch/s390/kernel/traps.c16
-rw-r--r--arch/s390/kernel/uprobes.c16
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/kernel/vtime.c13
38 files changed, 284 insertions, 374 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index be8007f367aa..c8d1b6aa823e 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -57,7 +57,9 @@ obj-$(CONFIG_COMPAT) += $(compat-obj-y)
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KPROBES) += kprobes_insn_page.o
-obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
+obj-$(CONFIG_KPROBES) += mcount.o
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 8e00bb228662..7c74f0e17e5a 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -50,9 +50,7 @@ int main(void)
BLANK();
/* idle data offsets */
OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter);
- OFFSET(__CLOCK_IDLE_EXIT, s390_idle_data, clock_idle_exit);
OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter);
- OFFSET(__TIMER_IDLE_EXIT, s390_idle_data, timer_idle_exit);
OFFSET(__MT_CYCLES_ENTER, s390_idle_data, mt_cycles_enter);
BLANK();
/* hardware defined lowcore locations 0x000 - 0x1ff */
@@ -123,14 +121,12 @@ int main(void)
OFFSET(__LC_USER_ASCE, lowcore, user_asce);
OFFSET(__LC_LPP, lowcore, lpp);
OFFSET(__LC_CURRENT_PID, lowcore, current_pid);
- OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
- OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
- OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
OFFSET(__LC_GMAP, lowcore, gmap);
- OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline);
OFFSET(__LC_LAST_BREAK, lowcore, last_break);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
+ OFFSET(__LC_VMCORE_INFO, lowcore, vmcore_info);
+ OFFSET(__LC_OS_INFO, lowcore, os_info);
/* hardware defined lowcore locations 0x1000 - 0x18ff */
OFFSET(__LC_MCESAD, lowcore, mcesad);
OFFSET(__LC_EXT_PARAMS2, lowcore, ext_params2);
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index d255c69c1779..172c23c8ca00 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -3,8 +3,7 @@
* arch/s390/kernel/base.S
*
* Copyright IBM Corp. 2006, 2007
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
- * Michael Holzheu <holzheu@de.ibm.com>
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
*/
#include <linux/linkage.h>
@@ -15,18 +14,28 @@
GEN_BR_THUNK %r9
GEN_BR_THUNK %r14
+__PT_R0 = __PT_GPRS
+__PT_R8 = __PT_GPRS + 64
+
ENTRY(s390_base_pgm_handler)
- stmg %r0,%r15,__LC_SAVE_AREA_SYNC
- basr %r13,0
-0: aghi %r15,-STACK_FRAME_OVERHEAD
+ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ lgr %r2,%r11
larl %r1,s390_base_pgm_handler_fn
lg %r9,0(%r1)
ltgr %r9,%r9
jz 1f
BASR_EX %r14,%r9
- lmg %r0,%r15,__LC_SAVE_AREA_SYNC
- lpswe __LC_PGM_OLD_PSW
-1: lpswe disabled_wait_psw-0b(%r13)
+ mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
+ lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
+ lpswe __LC_RETURN_PSW
+1: larl %r13,disabled_wait_psw
+ lpswe 0(%r13)
ENDPROC(s390_base_pgm_handler)
.align 8
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index d66825e53fce..7ee3651d00ab 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -3,7 +3,6 @@
* Extract CPU cache information and expose them via sysfs.
*
* Copyright IBM Corp. 2012
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/seq_file.h>
@@ -71,8 +70,6 @@ void show_cacheinfo(struct seq_file *m)
struct cacheinfo *cache;
int idx;
- if (!test_facility(34))
- return;
this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
cache = this_cpu_ci->info_list + idx;
@@ -132,8 +129,6 @@ int init_cache_level(unsigned int cpu)
union cache_topology ct;
enum cache_type ctype;
- if (!test_facility(34))
- return -EOPNOTSUPP;
if (!this_cpu_ci)
return -EINVAL;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
@@ -157,8 +152,6 @@ int populate_cache_leaves(unsigned int cpu)
union cache_topology ct;
enum cache_type ctype;
- if (!test_facility(34))
- return -EOPNOTSUPP;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
idx < this_cpu_ci->num_leaves; idx++, level++) {
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index cca142fbb516..eee1ad3e1b29 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -89,7 +89,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
_sigregs32 user_sregs;
int i;
- /* Alwys make any pending restarted system call return -EINTR */
+ /* Always make any pending restarted system call return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index af8202121642..69819b765250 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -132,28 +132,27 @@ static inline void *load_real_addr(void *addr)
/*
* Copy memory of the old, dumped system to a kernel space virtual address
*/
-int copy_oldmem_kernel(void *dst, void *src, size_t count)
+int copy_oldmem_kernel(void *dst, unsigned long src, size_t count)
{
- unsigned long from, len;
+ unsigned long len;
void *ra;
int rc;
while (count) {
- from = __pa(src);
- if (!oldmem_data.start && from < sclp.hsa_size) {
+ if (!oldmem_data.start && src < sclp.hsa_size) {
/* Copy from zfcp/nvme dump HSA area */
- len = min(count, sclp.hsa_size - from);
- rc = memcpy_hsa_kernel(dst, from, len);
+ len = min(count, sclp.hsa_size - src);
+ rc = memcpy_hsa_kernel(dst, src, len);
if (rc)
return rc;
} else {
/* Check for swapped kdump oldmem areas */
- if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) {
- from -= oldmem_data.start;
- len = min(count, oldmem_data.size - from);
- } else if (oldmem_data.start && from < oldmem_data.size) {
- len = min(count, oldmem_data.size - from);
- from += oldmem_data.start;
+ if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) {
+ src -= oldmem_data.start;
+ len = min(count, oldmem_data.size - src);
+ } else if (oldmem_data.start && src < oldmem_data.size) {
+ len = min(count, oldmem_data.size - src);
+ src += oldmem_data.start;
} else {
len = count;
}
@@ -163,7 +162,7 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
} else {
ra = dst;
}
- if (memcpy_real(ra, (void *) from, len))
+ if (memcpy_real(ra, src, len))
return -EFAULT;
}
dst += len;
@@ -176,31 +175,30 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
/*
* Copy memory of the old, dumped system to a user space virtual address
*/
-static int copy_oldmem_user(void __user *dst, void *src, size_t count)
+static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count)
{
- unsigned long from, len;
+ unsigned long len;
int rc;
while (count) {
- from = __pa(src);
- if (!oldmem_data.start && from < sclp.hsa_size) {
+ if (!oldmem_data.start && src < sclp.hsa_size) {
/* Copy from zfcp/nvme dump HSA area */
- len = min(count, sclp.hsa_size - from);
- rc = memcpy_hsa_user(dst, from, len);
+ len = min(count, sclp.hsa_size - src);
+ rc = memcpy_hsa_user(dst, src, len);
if (rc)
return rc;
} else {
/* Check for swapped kdump oldmem areas */
- if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) {
- from -= oldmem_data.start;
- len = min(count, oldmem_data.size - from);
- } else if (oldmem_data.start && from < oldmem_data.size) {
- len = min(count, oldmem_data.size - from);
- from += oldmem_data.start;
+ if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) {
+ src -= oldmem_data.start;
+ len = min(count, oldmem_data.size - src);
+ } else if (oldmem_data.start && src < oldmem_data.size) {
+ len = min(count, oldmem_data.size - src);
+ src += oldmem_data.start;
} else {
len = count;
}
- rc = copy_to_user_real(dst, (void *) from, count);
+ rc = copy_to_user_real(dst, src, count);
if (rc)
return rc;
}
@@ -217,12 +215,12 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count)
ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
unsigned long offset, int userbuf)
{
- void *src;
+ unsigned long src;
int rc;
if (!csize)
return 0;
- src = (void *) (pfn << PAGE_SHIFT) + offset;
+ src = pfn_to_phys(pfn) + offset;
if (userbuf)
rc = copy_oldmem_user((void __force __user *) buf, src, csize);
else
@@ -429,10 +427,10 @@ static void *nt_prpsinfo(void *ptr)
static void *get_vmcoreinfo_old(unsigned long *size)
{
char nt_name[11], *vmcoreinfo;
+ unsigned long addr;
Elf64_Nhdr note;
- void *addr;
- if (copy_oldmem_kernel(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
+ if (copy_oldmem_kernel(&addr, __LC_VMCORE_INFO, sizeof(addr)))
return NULL;
memset(nt_name, 0, sizeof(nt_name));
if (copy_oldmem_kernel(&note, addr, sizeof(note)))
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 76a656b2146f..a778714e4d8b 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -11,6 +11,7 @@
#include <linux/cpu.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <asm/asm-extable.h>
#include <asm/diag.h>
#include <asm/trace/diag.h>
#include <asm/sections.h>
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 3cdf68c53614..08cc86a0db90 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -2,7 +2,6 @@
/*
* Copyright IBM Corp. 2007, 2009
* Author(s): Hongjie Yang <hongjie@us.ibm.com>,
- * Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#define KMSG_COMPONENT "setup"
@@ -18,6 +17,7 @@
#include <linux/pfn.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
+#include <asm/asm-extable.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/ipl.h>
@@ -149,22 +149,10 @@ static __init void setup_topology(void)
topology_max_mnest = max_mnest;
}
-static void early_pgm_check_handler(void)
+static void early_pgm_check_handler(struct pt_regs *regs)
{
- const struct exception_table_entry *fixup;
- unsigned long cr0, cr0_new;
- unsigned long addr;
-
- addr = S390_lowcore.program_old_psw.addr;
- fixup = s390_search_extables(addr);
- if (!fixup)
+ if (!fixup_exception(regs))
disabled_wait();
- /* Disable low address protection before storing into lowcore. */
- __ctl_store(cr0, 0, 0);
- cr0_new = cr0 & ~(1UL << 28);
- __ctl_load(cr0_new, 0, 0);
- S390_lowcore.program_old_psw.addr = extable_fixup(fixup);
- __ctl_load(cr0, 0, 0);
}
static noinline __init void setup_lowcore_early(void)
@@ -294,6 +282,11 @@ static void __init check_image_bootable(void)
disabled_wait();
}
+static void __init sort_amode31_extable(void)
+{
+ sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
+}
+
void __init startup_init(void)
{
sclp_early_adjust_va();
@@ -302,6 +295,7 @@ void __init startup_init(void)
time_early_init();
init_kernel_storage_key();
lockdep_off();
+ sort_amode31_extable();
setup_lowcore_early();
setup_facility_list();
detect_machine_type();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 01bae1d51113..a601a518b569 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -6,11 +6,11 @@
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Hartmut Penner (hp@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
- * Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/init.h>
#include <linux/linkage.h>
+#include <asm/asm-extable.h>
#include <asm/alternative-asm.h>
#include <asm/processor.h>
#include <asm/cache.h>
@@ -98,11 +98,6 @@ _LPP_OFFSET = __LC_LPP
#endif
.endm
- .macro STCK savearea
- ALTERNATIVE ".insn s,0xb2050000,\savearea", \
- ".insn s,0xb27c0000,\savearea", 25
- .endm
-
/*
* The TSTMSK macro generates a test-under-mask instruction by
* calculating the memory offset for the specified mask value.
@@ -191,7 +186,6 @@ _LPP_OFFSET = __LC_LPP
#endif
GEN_BR_THUNK %r14
- GEN_BR_THUNK %r14,%r13
.section .kprobes.text, "ax"
.Ldummy:
@@ -232,7 +226,7 @@ ENTRY(__switch_to)
aghi %r3,__TASK_pid
mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
- ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
+ ALTERNATIVE "", "lpp _LPP_OFFSET", 40
BR_EX %r14
ENDPROC(__switch_to)
@@ -443,7 +437,7 @@ ENDPROC(pgm_check_handler)
*/
.macro INT_HANDLER name,lc_old_psw,handler
ENTRY(\name)
- STCK __LC_INT_CLOCK
+ stckf __LC_INT_CLOCK
stpt __LC_SYS_ENTER_TIMER
STBEAR __LC_LAST_BREAK
BPOFF
@@ -515,7 +509,7 @@ ENTRY(psw_idle)
.Lpsw_idle_stcctm:
oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
BPON
- STCK __CLOCK_IDLE_ENTER(%r2)
+ stckf __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
lpswe __SF_EMPTY(%r15)
.globl psw_idle_exit
@@ -527,7 +521,7 @@ ENDPROC(psw_idle)
* Machine check handler routines
*/
ENTRY(mcck_int_handler)
- STCK __LC_MCCK_CLOCK
+ stckf __LC_MCCK_CLOCK
BPOFF
la %r1,4095 # validate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
@@ -654,7 +648,7 @@ ENTRY(mcck_int_handler)
ENDPROC(mcck_int_handler)
ENTRY(restart_int_handler)
- ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
+ ALTERNATIVE "", "lpp _LPP_OFFSET", 40
stg %r15,__LC_SAVE_AREA_RESTART
TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
jz 0f
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 6083090be1f4..56e5e3712fbb 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -5,6 +5,7 @@
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/signal.h>
+#include <asm/extable.h>
#include <asm/ptrace.h>
#include <asm/idle.h>
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 89c0870d5679..1852d46babb1 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -4,8 +4,7 @@
*
* Copyright IBM Corp. 2009,2014
*
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/moduleloader.h>
@@ -61,18 +60,9 @@ asm(
#ifdef CONFIG_EXPOLINE
asm(
" .align 16\n"
- "ftrace_shared_hotpatch_trampoline_ex:\n"
- " lmg %r0,%r1,2(%r1)\n"
- " ex %r0," __stringify(__LC_BR_R1) "(%r0)\n"
- " j .\n"
- "ftrace_shared_hotpatch_trampoline_ex_end:\n"
-);
-
-asm(
- " .align 16\n"
"ftrace_shared_hotpatch_trampoline_exrl:\n"
" lmg %r0,%r1,2(%r1)\n"
- " .insn ril,0xc60000000000,%r0,0f\n" /* exrl */
+ " exrl %r0,0f\n"
" j .\n"
"0: br %r1\n"
"ftrace_shared_hotpatch_trampoline_exrl_end:\n"
@@ -91,12 +81,8 @@ static const char *ftrace_shared_hotpatch_trampoline(const char **end)
tend = ftrace_shared_hotpatch_trampoline_br_end;
#ifdef CONFIG_EXPOLINE
if (!nospec_disable) {
- tstart = ftrace_shared_hotpatch_trampoline_ex;
- tend = ftrace_shared_hotpatch_trampoline_ex_end;
- if (test_facility(35)) { /* exrl */
- tstart = ftrace_shared_hotpatch_trampoline_exrl;
- tend = ftrace_shared_hotpatch_trampoline_exrl_end;
- }
+ tstart = ftrace_shared_hotpatch_trampoline_exrl;
+ tend = ftrace_shared_hotpatch_trampoline_exrl_end;
}
#endif /* CONFIG_EXPOLINE */
if (end)
@@ -194,25 +180,26 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return 0;
}
-static void brcl_disable(void *brcl)
+static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
{
- u8 op = 0x04; /* set mask field to zero */
+ u16 old;
+ u8 op;
- s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
+ if (get_kernel_nofault(old, addr))
+ return -EFAULT;
+ if (old != expected)
+ return -EINVAL;
+ /* set mask field to all ones or zeroes */
+ op = enable ? 0xf4 : 0x04;
+ s390_kernel_write((char *)addr + 1, &op, sizeof(op));
+ return 0;
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
- brcl_disable((void *)rec->ip);
- return 0;
-}
-
-static void brcl_enable(void *brcl)
-{
- u8 op = 0xf4; /* set mask field to all ones */
-
- s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
+ /* Expect brcl 0xf,... */
+ return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
@@ -223,8 +210,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (IS_ERR(trampoline))
return PTR_ERR(trampoline);
s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
- brcl_enable((void *)rec->ip);
- return 0;
+ /* Expect brcl 0x0,... */
+ return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
}
int ftrace_update_ftrace_func(ftrace_func_t func)
@@ -297,14 +284,24 @@ NOKPROBE_SYMBOL(prepare_ftrace_return);
*/
int ftrace_enable_ftrace_graph_caller(void)
{
- brcl_disable(ftrace_graph_caller);
+ int rc;
+
+ /* Expect brc 0xf,... */
+ rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
+ if (rc)
+ return rc;
text_poke_sync_lock();
return 0;
}
int ftrace_disable_ftrace_graph_caller(void)
{
- brcl_enable(ftrace_graph_caller);
+ int rc;
+
+ /* Expect brc 0x0,... */
+ rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
+ if (rc)
+ return rc;
text_poke_sync_lock();
return 0;
}
diff --git a/arch/s390/kernel/ftrace.h b/arch/s390/kernel/ftrace.h
index 69e416f4c6b0..7f75a9616406 100644
--- a/arch/s390/kernel/ftrace.h
+++ b/arch/s390/kernel/ftrace.h
@@ -16,8 +16,6 @@ extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_start[];
extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_end[];
extern const char ftrace_shared_hotpatch_trampoline_br[];
extern const char ftrace_shared_hotpatch_trampoline_br_end[];
-extern const char ftrace_shared_hotpatch_trampoline_ex[];
-extern const char ftrace_shared_hotpatch_trampoline_ex_end[];
extern const char ftrace_shared_hotpatch_trampoline_exrl[];
extern const char ftrace_shared_hotpatch_trampoline_exrl_end[];
extern const char ftrace_plt_template[];
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 42f9a325a257..d7b8b6ad574d 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -5,7 +5,6 @@
* Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Rob van der Heij <rvdhei@iae.nl>
- * Heiko Carstens <heiko.carstens@de.ibm.com>
*
*/
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 5ad1dde23dc5..28ae7df26c4a 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -4,7 +4,6 @@
*
* Copyright IBM Corp. 2005, 2012
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
- * Heiko Carstens <heiko.carstens@de.ibm.com>
* Volker Sameske <sameske@de.ibm.com>
*/
@@ -20,6 +19,7 @@
#include <linux/gfp.h>
#include <linux/crash_dump.h>
#include <linux/debug_locks.h>
+#include <asm/asm-extable.h>
#include <asm/diag.h>
#include <asm/ipl.h>
#include <asm/smp.h>
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index cb7099682340..3033f616e256 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -342,7 +342,7 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy)
struct ext_int_info *p;
int index;
- ext_code = *(struct ext_code *) &regs->int_code;
+ ext_code.int_code = regs->int_code;
if (ext_code.code != EXT_IRQ_CLK_COMP)
set_cpu_flag(CIF_NOHZ_DELAY);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index e27a7d3b0364..e32c14fd1282 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -372,33 +372,26 @@ static int kprobe_handler(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(kprobe_handler);
-/*
- * Function return probe trampoline:
- * - init_kprobes() establishes a probepoint here
- * - When the probed function returns, this probe
- * causes the handlers to fire
- */
-static void __used kretprobe_trampoline_holder(void)
+void arch_kretprobe_fixup_return(struct pt_regs *regs,
+ kprobe_opcode_t *correct_ret_addr)
{
- asm volatile(".global __kretprobe_trampoline\n"
- "__kretprobe_trampoline: bcr 0,0\n");
+ /* Replace fake return address with real one. */
+ regs->gprs[14] = (unsigned long)correct_ret_addr;
}
+NOKPROBE_SYMBOL(arch_kretprobe_fixup_return);
/*
- * Called when the probe at kretprobe trampoline is hit
+ * Called from __kretprobe_trampoline
*/
-static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+void trampoline_probe_handler(struct pt_regs *regs)
{
- regs->psw.addr = __kretprobe_trampoline_handler(regs, NULL);
- /*
- * By returning a non-zero value, we are telling
- * kprobe_handler() that we don't want the post_handler
- * to run (and have re-enabled preemption)
- */
- return 1;
+ kretprobe_trampoline_handler(regs, NULL);
}
NOKPROBE_SYMBOL(trampoline_probe_handler);
+/* assembler function that handles the kretprobes must not be probed itself */
+NOKPROBE_SYMBOL(__kretprobe_trampoline);
+
/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "breakpoint"
@@ -465,7 +458,6 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
struct kprobe *p = kprobe_running();
- const struct exception_table_entry *entry;
switch(kcb->kprobe_status) {
case KPROBE_HIT_SS:
@@ -487,10 +479,8 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
- entry = s390_search_extables(regs->psw.addr);
- if (entry && ex_handle(entry, regs))
+ if (fixup_exception(regs))
return 1;
-
/*
* fixup_exception() could not handle it,
* Let do_page_fault() fix it.
@@ -554,18 +544,13 @@ int kprobe_exceptions_notify(struct notifier_block *self,
}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
-static struct kprobe trampoline = {
- .addr = (kprobe_opcode_t *) &__kretprobe_trampoline,
- .pre_handler = trampoline_probe_handler
-};
-
int __init arch_init_kprobes(void)
{
- return register_kprobe(&trampoline);
+ return 0;
}
int arch_trampoline_kprobe(struct kprobe *p)
{
- return p->addr == (kprobe_opcode_t *) &__kretprobe_trampoline;
+ return 0;
}
NOKPROBE_SYMBOL(arch_trampoline_kprobe);
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
index 3b895971c3d0..6652e54cf3db 100644
--- a/arch/s390/kernel/lgr.c
+++ b/arch/s390/kernel/lgr.c
@@ -88,8 +88,7 @@ static void lgr_stsi_2_2_2(struct lgr_info *lgr_info)
if (stsi(si, 2, 2, 2))
return;
cpascii(lgr_info->name, si->name, sizeof(si->name));
- memcpy(&lgr_info->lpar_number, &si->lpar_number,
- sizeof(lgr_info->lpar_number));
+ lgr_info->lpar_number = si->lpar_number;
}
/*
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index a16467b3825e..088d57a3083f 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -3,7 +3,6 @@
* Copyright IBM Corp. 2005, 2011
*
* Author(s): Rolf Adelsberger,
- * Heiko Carstens <heiko.carstens@de.ibm.com>
* Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index a24177dcd12a..4786bfe02144 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -2,8 +2,6 @@
/*
* Copyright IBM Corp. 2008, 2009
*
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
- *
*/
#include <linux/linkage.h>
@@ -13,14 +11,6 @@
#include <asm/ptrace.h>
#include <asm/export.h>
- GEN_BR_THUNK %r1
- GEN_BR_THUNK %r14
-
- .section .kprobes.text, "ax"
-
-ENTRY(ftrace_stub)
- BR_EX %r14
-ENDPROC(ftrace_stub)
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
@@ -28,20 +18,28 @@ ENDPROC(ftrace_stub)
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
#define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2)
#define STACK_PTREGS_FLAGS (STACK_PTREGS + __PT_FLAGS)
-#ifdef __PACK_STACK
-/* allocate just enough for r14, r15 and backchain */
+/* packed stack: allocate just enough for r14, r15 and backchain */
#define TRACED_FUNC_FRAME_SIZE 24
-#else
-#define TRACED_FUNC_FRAME_SIZE STACK_FRAME_OVERHEAD
-#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+ GEN_BR_THUNK %r1
+ GEN_BR_THUNK %r14
+
+ .section .kprobes.text, "ax"
+
+ENTRY(ftrace_stub)
+ BR_EX %r14
+ENDPROC(ftrace_stub)
.macro ftrace_regs_entry, allregs=0
stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller
.if \allregs == 1
- lghi %r14,0 # save condition code
- ipm %r14 # don't put any instructions
- sllg %r14,%r14,16 # clobbering CC before this point
+ # save psw mask
+ # don't put any instructions clobbering CC before this point
+ epsw %r1,%r14
+ risbg %r14,%r1,0,31,32
.endif
lgr %r1,%r15
@@ -57,13 +55,7 @@ ENDPROC(ftrace_stub)
.if \allregs == 1
stg %r14,(STACK_PTREGS_PSW)(%r15)
- stosm (STACK_PTREGS_PSW)(%r15),0
-#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
mvghi STACK_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS
-#else
- lghi %r14,_PIF_FTRACE_FULL_REGS
- stg %r14,STACK_PTREGS_FLAGS(%r15)
-#endif
.else
xc STACK_PTREGS_FLAGS(8,%r15),STACK_PTREGS_FLAGS(%r15)
.endif
@@ -141,3 +133,35 @@ SYM_FUNC_START(return_to_handler)
SYM_FUNC_END(return_to_handler)
#endif
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#ifdef CONFIG_KPROBES
+
+SYM_FUNC_START(__kretprobe_trampoline)
+
+ stg %r14,(__SF_GPRS+8*8)(%r15)
+ lay %r15,-STACK_FRAME_SIZE(%r15)
+ stmg %r0,%r14,STACK_PTREGS_GPRS(%r15)
+
+ # store original stack pointer in backchain and pt_regs
+ lay %r7,STACK_FRAME_SIZE(%r15)
+ stg %r7,__SF_BACKCHAIN(%r15)
+ stg %r7,STACK_PTREGS_GPRS+(15*8)(%r15)
+
+ # store full psw
+ epsw %r2,%r3
+ risbg %r3,%r2,0,31,32
+ stg %r3,STACK_PTREGS_PSW(%r15)
+ larl %r1,__kretprobe_trampoline
+ stg %r1,STACK_PTREGS_PSW+8(%r15)
+
+ lay %r2,STACK_PTREGS(%r15)
+ brasl %r14,trampoline_probe_handler
+
+ mvc __SF_EMPTY(16,%r7),STACK_PTREGS_PSW(%r15)
+ lmg %r0,%r15,STACK_PTREGS_GPRS(%r15)
+ lpswe __SF_EMPTY(%r15)
+
+SYM_FUNC_END(__kretprobe_trampoline)
+
+#endif /* CONFIG_KPROBES */
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index b032e556eeb7..c0dd72db77b8 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -517,15 +517,9 @@ int module_finalize(const Elf_Ehdr *hdr,
ij = me->core_layout.base + me->arch.plt_offset +
me->arch.plt_size - PLT_ENTRY_SIZE;
- if (test_facility(35)) {
- ij[0] = 0xc6000000; /* exrl %r0,.+10 */
- ij[1] = 0x0005a7f4; /* j . */
- ij[2] = 0x000007f1; /* br %r1 */
- } else {
- ij[0] = 0x44000000 | (unsigned int)
- offsetof(struct lowcore, br_r1_trampoline);
- ij[1] = 0xa7f40000; /* j . */
- }
+ ij[0] = 0xc6000000; /* exrl %r0,.+10 */
+ ij[1] = 0x0005a7f4; /* j . */
+ ij[2] = 0x000007f1; /* br %r1 */
}
secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 651a51914e34..fc60e29b8690 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -6,7 +6,6 @@
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
- * Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
#include <linux/kernel_stat.h>
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index 60e6fec27bba..717bbcc056e5 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -105,6 +105,7 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
s32 *epo;
/* Second part of the instruction replace is always a nop */
+ memcpy(insnbuf + 2, branch, sizeof(branch));
for (epo = start; epo < end; epo++) {
instr = (u8 *) epo + *epo;
if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
@@ -117,42 +118,20 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
if (thunk[0] == 0xc6 && thunk[1] == 0x00)
/* exrl %r0,<target-br> */
br = thunk + (*(int *)(thunk + 2)) * 2;
- else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
- thunk[6] == 0x44 && thunk[7] == 0x00 &&
- (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
- (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
- /* larl %rx,<target br> + ex %r0,0(%rx) */
- br = thunk + (*(int *)(thunk + 2)) * 2;
else
continue;
- /* Check for unconditional branch 0x07f? or 0x47f???? */
- if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
+ if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
continue;
-
- memcpy(insnbuf + 2, branch, sizeof(branch));
switch (type) {
case BRCL_EXPOLINE:
+ /* brcl to thunk, replace with br + nop */
insnbuf[0] = br[0];
insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
- if (br[0] == 0x47) {
- /* brcl to b, replace with bc + nopr */
- insnbuf[2] = br[2];
- insnbuf[3] = br[3];
- } else {
- /* brcl to br, replace with bcr + nop */
- }
break;
case BRASL_EXPOLINE:
+ /* brasl to thunk, replace with basr + nop */
+ insnbuf[0] = 0x0d;
insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
- if (br[0] == 0x47) {
- /* brasl to b, replace with bas + nopr */
- insnbuf[0] = 0x4d;
- insnbuf[2] = br[2];
- insnbuf[3] = br[3];
- } else {
- /* brasl to br, replace with basr + nop */
- insnbuf[0] = 0x0d;
- }
break;
}
diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c
index 4bef35b79b93..6b5b64e67eee 100644
--- a/arch/s390/kernel/os_info.c
+++ b/arch/s390/kernel/os_info.c
@@ -15,6 +15,7 @@
#include <asm/checksum.h>
#include <asm/lowcore.h>
#include <asm/os_info.h>
+#include <asm/asm-offsets.h>
/*
* OS info structure has to be page aligned
@@ -45,7 +46,7 @@ void os_info_crashkernel_add(unsigned long base, unsigned long size)
*/
void os_info_entry_add(int nr, void *ptr, u64 size)
{
- os_info.entry[nr].addr = (u64)(unsigned long)ptr;
+ os_info.entry[nr].addr = __pa(ptr);
os_info.entry[nr].size = size;
os_info.entry[nr].csum = (__force u32)csum_partial(ptr, size, 0);
os_info.csum = os_info_csum(&os_info);
@@ -62,7 +63,7 @@ void __init os_info_init(void)
os_info.version_minor = OS_INFO_VERSION_MINOR;
os_info.magic = OS_INFO_MAGIC;
os_info.csum = os_info_csum(&os_info);
- mem_assign_absolute(S390_lowcore.os_info, (unsigned long) ptr);
+ mem_assign_absolute(S390_lowcore.os_info, __pa(ptr));
}
#ifdef CONFIG_CRASH_DUMP
@@ -90,7 +91,7 @@ static void os_info_old_alloc(int nr, int align)
goto fail;
}
buf_align = PTR_ALIGN(buf, align);
- if (copy_oldmem_kernel(buf_align, (void *) addr, size)) {
+ if (copy_oldmem_kernel(buf_align, addr, size)) {
msg = "copy failed";
goto fail_free;
}
@@ -123,15 +124,14 @@ static void os_info_old_init(void)
return;
if (!oldmem_data.start)
goto fail;
- if (copy_oldmem_kernel(&addr, &S390_lowcore.os_info, sizeof(addr)))
+ if (copy_oldmem_kernel(&addr, __LC_OS_INFO, sizeof(addr)))
goto fail;
if (addr == 0 || addr % PAGE_SIZE)
goto fail;
os_info_old = kzalloc(sizeof(*os_info_old), GFP_KERNEL);
if (!os_info_old)
goto fail;
- if (copy_oldmem_kernel(os_info_old, (void *) addr,
- sizeof(*os_info_old)))
+ if (copy_oldmem_kernel(os_info_old, addr, sizeof(*os_info_old)))
goto fail_free;
if (os_info_old->magic != OS_INFO_MAGIC)
goto fail_free;
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index ee8707abdb6a..483ab5e10164 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -1451,6 +1451,8 @@ static size_t cfdiag_maxsize(struct cpumf_ctr_info *info)
/* Get the CPU speed, try sampling facility first and CPU attributes second. */
static void cfdiag_get_cpu_speed(void)
{
+ unsigned long mhz;
+
if (cpum_sf_avail()) { /* Sampling facility first */
struct hws_qsi_info_block si;
@@ -1464,12 +1466,9 @@ static void cfdiag_get_cpu_speed(void)
/* Fallback: CPU speed extract static part. Used in case
* CPU Measurement Sampling Facility is turned off.
*/
- if (test_facility(34)) {
- unsigned long mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
-
- if (mhz != -1UL)
- cfdiag_cpu_speed = mhz & 0xffffffff;
- }
+ mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
+ if (mhz != -1UL)
+ cfdiag_cpu_speed = mhz & 0xffffffff;
}
static int cfset_init(void)
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index d9d4a806979e..7a74ea5f7531 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -172,8 +172,7 @@ static void show_cpu_summary(struct seq_file *m, void *v)
static int __init setup_hwcaps(void)
{
/* instructions named N3, "backported" to esa-mode */
- if (test_facility(0))
- elf_hwcap |= HWCAP_ESAN3;
+ elf_hwcap |= HWCAP_ESAN3;
/* z/Architecture mode active */
elf_hwcap |= HWCAP_ZARCH;
@@ -191,8 +190,7 @@ static int __init setup_hwcaps(void)
elf_hwcap |= HWCAP_LDISP;
/* extended-immediate */
- if (test_facility(21))
- elf_hwcap |= HWCAP_EIMM;
+ elf_hwcap |= HWCAP_EIMM;
/* extended-translation facility 3 enhancement */
if (test_facility(22) && test_facility(30))
@@ -262,21 +260,7 @@ static int __init setup_elf_platform(void)
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
switch (cpu_id.machine) {
- case 0x2064:
- case 0x2066:
- default: /* Use "z900" as default for 64 bit kernels. */
- strcpy(elf_platform, "z900");
- break;
- case 0x2084:
- case 0x2086:
- strcpy(elf_platform, "z990");
- break;
- case 0x2094:
- case 0x2096:
- strcpy(elf_platform, "z9-109");
- break;
- case 0x2097:
- case 0x2098:
+ default: /* Use "z10" as default. */
strcpy(elf_platform, "z10");
break;
case 0x2817:
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 0ea3d02b378d..ed3439515bb2 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -147,38 +147,36 @@ void ptrace_disable(struct task_struct *task)
static inline unsigned long __peek_user_per(struct task_struct *child,
addr_t addr)
{
- struct per_struct_kernel *dummy = NULL;
-
- if (addr == (addr_t) &dummy->cr9)
+ if (addr == offsetof(struct per_struct_kernel, cr9))
/* Control bits of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
PER_EVENT_IFETCH : child->thread.per_user.control;
- else if (addr == (addr_t) &dummy->cr10)
+ else if (addr == offsetof(struct per_struct_kernel, cr10))
/* Start address of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
0 : child->thread.per_user.start;
- else if (addr == (addr_t) &dummy->cr11)
+ else if (addr == offsetof(struct per_struct_kernel, cr11))
/* End address of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
-1UL : child->thread.per_user.end;
- else if (addr == (addr_t) &dummy->bits)
+ else if (addr == offsetof(struct per_struct_kernel, bits))
/* Single-step bit. */
return test_thread_flag(TIF_SINGLE_STEP) ?
(1UL << (BITS_PER_LONG - 1)) : 0;
- else if (addr == (addr_t) &dummy->starting_addr)
+ else if (addr == offsetof(struct per_struct_kernel, starting_addr))
/* Start address of the user specified per set. */
return child->thread.per_user.start;
- else if (addr == (addr_t) &dummy->ending_addr)
+ else if (addr == offsetof(struct per_struct_kernel, ending_addr))
/* End address of the user specified per set. */
return child->thread.per_user.end;
- else if (addr == (addr_t) &dummy->perc_atmid)
+ else if (addr == offsetof(struct per_struct_kernel, perc_atmid))
/* PER code, ATMID and AI of the last PER trap */
return (unsigned long)
child->thread.per_event.cause << (BITS_PER_LONG - 16);
- else if (addr == (addr_t) &dummy->address)
+ else if (addr == offsetof(struct per_struct_kernel, address))
/* Address of the last PER trap */
return child->thread.per_event.address;
- else if (addr == (addr_t) &dummy->access_id)
+ else if (addr == offsetof(struct per_struct_kernel, access_id))
/* Access id of the last PER trap */
return (unsigned long)
child->thread.per_event.paid << (BITS_PER_LONG - 8);
@@ -196,61 +194,60 @@ static inline unsigned long __peek_user_per(struct task_struct *child,
*/
static unsigned long __peek_user(struct task_struct *child, addr_t addr)
{
- struct user *dummy = NULL;
addr_t offset, tmp;
- if (addr < (addr_t) &dummy->regs.acrs) {
+ if (addr < offsetof(struct user, regs.acrs)) {
/*
* psw and gprs are stored on the stack
*/
tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
- if (addr == (addr_t) &dummy->regs.psw.mask) {
+ if (addr == offsetof(struct user, regs.psw.mask)) {
/* Return a clean psw mask. */
tmp &= PSW_MASK_USER | PSW_MASK_RI;
tmp |= PSW_USER_BITS;
}
- } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
+ } else if (addr < offsetof(struct user, regs.orig_gpr2)) {
/*
* access registers are stored in the thread structure
*/
- offset = addr - (addr_t) &dummy->regs.acrs;
+ offset = addr - offsetof(struct user, regs.acrs);
/*
* Very special case: old & broken 64 bit gdb reading
* from acrs[15]. Result is a 64 bit value. Read the
* 32 bit acrs[15] value and shift it by 32. Sick...
*/
- if (addr == (addr_t) &dummy->regs.acrs[15])
+ if (addr == offsetof(struct user, regs.acrs[15]))
tmp = ((unsigned long) child->thread.acrs[15]) << 32;
else
tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
- } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
+ } else if (addr == offsetof(struct user, regs.orig_gpr2)) {
/*
* orig_gpr2 is stored on the kernel stack
*/
tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
- } else if (addr < (addr_t) &dummy->regs.fp_regs) {
+ } else if (addr < offsetof(struct user, regs.fp_regs)) {
/*
* prevent reads of padding hole between
* orig_gpr2 and fp_regs on s390.
*/
tmp = 0;
- } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
+ } else if (addr == offsetof(struct user, regs.fp_regs.fpc)) {
/*
* floating point control reg. is in the thread structure
*/
tmp = child->thread.fpu.fpc;
tmp <<= BITS_PER_LONG - 32;
- } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
+ } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
- offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
+ offset = addr - offsetof(struct user, regs.fp_regs.fprs);
if (MACHINE_HAS_VX)
tmp = *(addr_t *)
((addr_t) child->thread.fpu.vxrs + 2*offset);
@@ -258,11 +255,11 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
tmp = *(addr_t *)
((addr_t) child->thread.fpu.fprs + offset);
- } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
+ } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
/*
* Handle access to the per_info structure.
*/
- addr -= (addr_t) &dummy->regs.per_info;
+ addr -= offsetof(struct user, regs.per_info);
tmp = __peek_user_per(child, addr);
} else
@@ -281,8 +278,8 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
* an alignment of 4. Programmers from hell...
*/
mask = __ADDR_MASK;
- if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
- addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
+ if (addr >= offsetof(struct user, regs.acrs) &&
+ addr < offsetof(struct user, regs.orig_gpr2))
mask = 3;
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
@@ -294,8 +291,6 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
static inline void __poke_user_per(struct task_struct *child,
addr_t addr, addr_t data)
{
- struct per_struct_kernel *dummy = NULL;
-
/*
* There are only three fields in the per_info struct that the
* debugger user can write to.
@@ -308,14 +303,14 @@ static inline void __poke_user_per(struct task_struct *child,
* addresses are used only if single stepping is not in effect.
* Writes to any other field in per_info are ignored.
*/
- if (addr == (addr_t) &dummy->cr9)
+ if (addr == offsetof(struct per_struct_kernel, cr9))
/* PER event mask of the user specified per set. */
child->thread.per_user.control =
data & (PER_EVENT_MASK | PER_CONTROL_MASK);
- else if (addr == (addr_t) &dummy->starting_addr)
+ else if (addr == offsetof(struct per_struct_kernel, starting_addr))
/* Starting address of the user specified per set. */
child->thread.per_user.start = data;
- else if (addr == (addr_t) &dummy->ending_addr)
+ else if (addr == offsetof(struct per_struct_kernel, ending_addr))
/* Ending address of the user specified per set. */
child->thread.per_user.end = data;
}
@@ -328,16 +323,15 @@ static inline void __poke_user_per(struct task_struct *child,
*/
static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
{
- struct user *dummy = NULL;
addr_t offset;
- if (addr < (addr_t) &dummy->regs.acrs) {
+ if (addr < offsetof(struct user, regs.acrs)) {
struct pt_regs *regs = task_pt_regs(child);
/*
* psw and gprs are stored on the stack
*/
- if (addr == (addr_t) &dummy->regs.psw.mask) {
+ if (addr == offsetof(struct user, regs.psw.mask)) {
unsigned long mask = PSW_MASK_USER;
mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
@@ -359,36 +353,36 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
regs->int_code = 0x20000 | (data & 0xffff);
}
*(addr_t *)((addr_t) &regs->psw + addr) = data;
- } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
+ } else if (addr < offsetof(struct user, regs.orig_gpr2)) {
/*
* access registers are stored in the thread structure
*/
- offset = addr - (addr_t) &dummy->regs.acrs;
+ offset = addr - offsetof(struct user, regs.acrs);
/*
* Very special case: old & broken 64 bit gdb writing
* to acrs[15] with a 64 bit value. Ignore the lower
* half of the value and write the upper 32 bit to
* acrs[15]. Sick...
*/
- if (addr == (addr_t) &dummy->regs.acrs[15])
+ if (addr == offsetof(struct user, regs.acrs[15]))
child->thread.acrs[15] = (unsigned int) (data >> 32);
else
*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
- } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
+ } else if (addr == offsetof(struct user, regs.orig_gpr2)) {
/*
* orig_gpr2 is stored on the kernel stack
*/
task_pt_regs(child)->orig_gpr2 = data;
- } else if (addr < (addr_t) &dummy->regs.fp_regs) {
+ } else if (addr < offsetof(struct user, regs.fp_regs)) {
/*
* prevent writes of padding hole between
* orig_gpr2 and fp_regs on s390.
*/
return 0;
- } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
+ } else if (addr == offsetof(struct user, regs.fp_regs.fpc)) {
/*
* floating point control reg. is in the thread structure
*/
@@ -397,12 +391,12 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
return -EINVAL;
child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
- } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
+ } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
- offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
+ offset = addr - offsetof(struct user, regs.fp_regs.fprs);
if (MACHINE_HAS_VX)
*(addr_t *)((addr_t)
child->thread.fpu.vxrs + 2*offset) = data;
@@ -410,11 +404,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
*(addr_t *)((addr_t)
child->thread.fpu.fprs + offset) = data;
- } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
+ } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
/*
* Handle access to the per_info structure.
*/
- addr -= (addr_t) &dummy->regs.per_info;
+ addr -= offsetof(struct user, regs.per_info);
__poke_user_per(child, addr, data);
}
@@ -431,8 +425,8 @@ static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
* an alignment of 4. Programmers from hell indeed...
*/
mask = __ADDR_MASK;
- if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
- addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
+ if (addr >= offsetof(struct user, regs.acrs) &&
+ addr < offsetof(struct user, regs.orig_gpr2))
mask = 3;
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
@@ -540,37 +534,35 @@ long arch_ptrace(struct task_struct *child, long request,
static inline __u32 __peek_user_per_compat(struct task_struct *child,
addr_t addr)
{
- struct compat_per_struct_kernel *dummy32 = NULL;
-
- if (addr == (addr_t) &dummy32->cr9)
+ if (addr == offsetof(struct compat_per_struct_kernel, cr9))
/* Control bits of the active per set. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
PER_EVENT_IFETCH : child->thread.per_user.control;
- else if (addr == (addr_t) &dummy32->cr10)
+ else if (addr == offsetof(struct compat_per_struct_kernel, cr10))
/* Start address of the active per set. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
0 : child->thread.per_user.start;
- else if (addr == (addr_t) &dummy32->cr11)
+ else if (addr == offsetof(struct compat_per_struct_kernel, cr11))
/* End address of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
PSW32_ADDR_INSN : child->thread.per_user.end;
- else if (addr == (addr_t) &dummy32->bits)
+ else if (addr == offsetof(struct compat_per_struct_kernel, bits))
/* Single-step bit. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
0x80000000 : 0;
- else if (addr == (addr_t) &dummy32->starting_addr)
+ else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr))
/* Start address of the user specified per set. */
return (__u32) child->thread.per_user.start;
- else if (addr == (addr_t) &dummy32->ending_addr)
+ else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr))
/* End address of the user specified per set. */
return (__u32) child->thread.per_user.end;
- else if (addr == (addr_t) &dummy32->perc_atmid)
+ else if (addr == offsetof(struct compat_per_struct_kernel, perc_atmid))
/* PER code, ATMID and AI of the last PER trap */
return (__u32) child->thread.per_event.cause << 16;
- else if (addr == (addr_t) &dummy32->address)
+ else if (addr == offsetof(struct compat_per_struct_kernel, address))
/* Address of the last PER trap */
return (__u32) child->thread.per_event.address;
- else if (addr == (addr_t) &dummy32->access_id)
+ else if (addr == offsetof(struct compat_per_struct_kernel, access_id))
/* Access id of the last PER trap */
return (__u32) child->thread.per_event.paid << 24;
return 0;
@@ -581,21 +573,20 @@ static inline __u32 __peek_user_per_compat(struct task_struct *child,
*/
static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
{
- struct compat_user *dummy32 = NULL;
addr_t offset;
__u32 tmp;
- if (addr < (addr_t) &dummy32->regs.acrs) {
+ if (addr < offsetof(struct compat_user, regs.acrs)) {
struct pt_regs *regs = task_pt_regs(child);
/*
* psw and gprs are stored on the stack
*/
- if (addr == (addr_t) &dummy32->regs.psw.mask) {
+ if (addr == offsetof(struct compat_user, regs.psw.mask)) {
/* Fake a 31 bit psw mask. */
tmp = (__u32)(regs->psw.mask >> 32);
tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
tmp |= PSW32_USER_BITS;
- } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
+ } else if (addr == offsetof(struct compat_user, regs.psw.addr)) {
/* Fake a 31 bit psw address. */
tmp = (__u32) regs->psw.addr |
(__u32)(regs->psw.mask & PSW_MASK_BA);
@@ -603,38 +594,38 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
/* gpr 0-15 */
tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
}
- } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
+ } else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) {
/*
* access registers are stored in the thread structure
*/
- offset = addr - (addr_t) &dummy32->regs.acrs;
+ offset = addr - offsetof(struct compat_user, regs.acrs);
tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
- } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
+ } else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) {
/*
* orig_gpr2 is stored on the kernel stack
*/
tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
- } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
+ } else if (addr < offsetof(struct compat_user, regs.fp_regs)) {
/*
* prevent reads of padding hole between
* orig_gpr2 and fp_regs on s390.
*/
tmp = 0;
- } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
+ } else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) {
/*
* floating point control reg. is in the thread structure
*/
tmp = child->thread.fpu.fpc;
- } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
+ } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
- offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
+ offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
if (MACHINE_HAS_VX)
tmp = *(__u32 *)
((addr_t) child->thread.fpu.vxrs + 2*offset);
@@ -642,11 +633,11 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
tmp = *(__u32 *)
((addr_t) child->thread.fpu.fprs + offset);
- } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
+ } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
/*
* Handle access to the per_info structure.
*/
- addr -= (addr_t) &dummy32->regs.per_info;
+ addr -= offsetof(struct compat_user, regs.per_info);
tmp = __peek_user_per_compat(child, addr);
} else
@@ -673,16 +664,14 @@ static int peek_user_compat(struct task_struct *child,
static inline void __poke_user_per_compat(struct task_struct *child,
addr_t addr, __u32 data)
{
- struct compat_per_struct_kernel *dummy32 = NULL;
-
- if (addr == (addr_t) &dummy32->cr9)
+ if (addr == offsetof(struct compat_per_struct_kernel, cr9))
/* PER event mask of the user specified per set. */
child->thread.per_user.control =
data & (PER_EVENT_MASK | PER_CONTROL_MASK);
- else if (addr == (addr_t) &dummy32->starting_addr)
+ else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr))
/* Starting address of the user specified per set. */
child->thread.per_user.start = data;
- else if (addr == (addr_t) &dummy32->ending_addr)
+ else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr))
/* Ending address of the user specified per set. */
child->thread.per_user.end = data;
}
@@ -693,16 +682,15 @@ static inline void __poke_user_per_compat(struct task_struct *child,
static int __poke_user_compat(struct task_struct *child,
addr_t addr, addr_t data)
{
- struct compat_user *dummy32 = NULL;
__u32 tmp = (__u32) data;
addr_t offset;
- if (addr < (addr_t) &dummy32->regs.acrs) {
+ if (addr < offsetof(struct compat_user, regs.acrs)) {
struct pt_regs *regs = task_pt_regs(child);
/*
* psw, gprs, acrs and orig_gpr2 are stored on the stack
*/
- if (addr == (addr_t) &dummy32->regs.psw.mask) {
+ if (addr == offsetof(struct compat_user, regs.psw.mask)) {
__u32 mask = PSW32_MASK_USER;
mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
@@ -716,7 +704,7 @@ static int __poke_user_compat(struct task_struct *child,
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(regs->psw.mask & PSW_MASK_BA) |
(__u64)(tmp & mask) << 32;
- } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
+ } else if (addr == offsetof(struct compat_user, regs.psw.addr)) {
/* Build a 64 bit psw address from 31 bit address. */
regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
/* Transfer 31 bit amode bit to psw mask. */
@@ -732,27 +720,27 @@ static int __poke_user_compat(struct task_struct *child,
/* gpr 0-15 */
*(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
}
- } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
+ } else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) {
/*
* access registers are stored in the thread structure
*/
- offset = addr - (addr_t) &dummy32->regs.acrs;
+ offset = addr - offsetof(struct compat_user, regs.acrs);
*(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
- } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
+ } else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) {
/*
* orig_gpr2 is stored on the kernel stack
*/
*(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
- } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
+ } else if (addr < offsetof(struct compat_user, regs.fp_regs)) {
/*
* prevent writess of padding hole between
* orig_gpr2 and fp_regs on s390.
*/
return 0;
- } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
+ } else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) {
/*
* floating point control reg. is in the thread structure
*/
@@ -760,12 +748,12 @@ static int __poke_user_compat(struct task_struct *child,
return -EINVAL;
child->thread.fpu.fpc = data;
- } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
+ } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
- offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
+ offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
if (MACHINE_HAS_VX)
*(__u32 *)((addr_t)
child->thread.fpu.vxrs + 2*offset) = tmp;
@@ -773,11 +761,11 @@ static int __poke_user_compat(struct task_struct *child,
*(__u32 *)((addr_t)
child->thread.fpu.fprs + offset) = tmp;
- } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
+ } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
/*
* Handle access to the per_info structure.
*/
- addr -= (addr_t) &dummy32->regs.per_info;
+ addr -= offsetof(struct compat_user, regs.per_info);
__poke_user_per_compat(child, addr, data);
}
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index fe396673e8a6..9438368c3632 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -2,8 +2,7 @@
/*
* Copyright IBM Corp. 2005
*
- * Author(s): Rolf Adelsberger,
- * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Author(s): Rolf Adelsberger
*
*/
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 05327be3a982..84e23fcc1106 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -490,7 +490,6 @@ static void __init setup_lowcore_dat_off(void)
lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0;
arch_spin_lock_setup(0);
- lc->br_r1_trampoline = 0x07f1; /* br %r1 */
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
lc->preempt_count = PREEMPT_DISABLED;
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 307f5d99514d..5ff8d915ec7a 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -141,7 +141,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
{
_sigregs user_sregs;
- /* Alwys make any pending restarted system call return -EINTR */
+ /* Always make any pending restarted system call return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs)))
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2bad902d8437..127da1850b06 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -5,7 +5,6 @@
* Copyright IBM Corp. 1999, 2012
* Author(s): Denis Joseph Barrow,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
- * Heiko Carstens <heiko.carstens@de.ibm.com>,
*
* based on other smp stuff by
* (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
@@ -208,7 +207,6 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->spinlock_index = 0;
- lc->br_r1_trampoline = 0x07f1; /* br %r1 */
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
lc->preempt_count = PREEMPT_DISABLED;
@@ -671,7 +669,7 @@ static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
bool is_boot_cpu, void *regs)
{
if (is_boot_cpu)
- copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
+ copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512);
else
__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(regs));
save_area_add_regs(sa, regs);
@@ -1253,7 +1251,7 @@ static __always_inline void set_new_lowcore(struct lowcore *lc)
src.odd = sizeof(S390_lowcore);
dst.even = (unsigned long) lc;
dst.odd = sizeof(*lc);
- pfx = (unsigned long) lc;
+ pfx = __pa(lc);
asm volatile(
" mvcl %[dst],%[src]\n"
@@ -1293,8 +1291,8 @@ static int __init smp_reinit_ipl_cpu(void)
local_irq_restore(flags);
free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER);
- memblock_free_late(lc_ipl->mcck_stack - STACK_INIT_OFFSET, THREAD_SIZE);
- memblock_free_late((unsigned long) lc_ipl, sizeof(*lc_ipl));
+ memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE);
+ memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl));
return 0;
}
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index b7bb1981e9ee..7ee455e8e3d5 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -3,7 +3,6 @@
* Stack trace management functions
*
* Copyright IBM Corp. 2006
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/stacktrace.h>
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index ef3f2659876c..b5e364358ce4 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include <asm/asm-extable.h>
#include <asm/ebcdic.h>
#include <asm/debug.h>
#include <asm/sysinfo.h>
diff --git a/arch/s390/kernel/text_amode31.S b/arch/s390/kernel/text_amode31.S
index 868e4a604110..2c8b14cc5556 100644
--- a/arch/s390/kernel/text_amode31.S
+++ b/arch/s390/kernel/text_amode31.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
+#include <asm/asm-extable.h>
#include <asm/errno.h>
#include <asm/sigp.h>
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 58f8291950cb..c6eecd4a5302 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2011
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#define KMSG_COMPONENT "cpu"
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 2b780786fc68..674c65019434 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -27,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/cpu.h>
#include <linux/entry-common.h>
+#include <asm/asm-extable.h>
#include <asm/fpu/api.h>
#include <asm/vtime.h>
#include "entry.h"
@@ -53,9 +54,7 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
force_sig_fault(si_signo, si_code, get_trap_ip(regs));
report_user_fault(regs, si_signo, 0);
} else {
- const struct exception_table_entry *fixup;
- fixup = s390_search_extables(regs->psw.addr);
- if (!fixup || !ex_handle(fixup, regs))
+ if (!fixup_exception(regs))
die(regs, str);
}
}
@@ -244,16 +243,12 @@ static void space_switch_exception(struct pt_regs *regs)
static void monitor_event_exception(struct pt_regs *regs)
{
- const struct exception_table_entry *fixup;
-
if (user_mode(regs))
return;
switch (report_bug(regs->psw.addr - (regs->int_code >> 16), regs)) {
case BUG_TRAP_TYPE_NONE:
- fixup = s390_search_extables(regs->psw.addr);
- if (fixup)
- ex_handle(fixup, regs);
+ fixup_exception(regs);
break;
case BUG_TRAP_TYPE_WARN:
break;
@@ -291,7 +286,6 @@ static void __init test_monitor_call(void)
void __init trap_init(void)
{
- sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
local_mcck_enable();
test_monitor_call();
}
@@ -303,7 +297,7 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
unsigned int trapnr;
irqentry_state_t state;
- regs->int_code = *(u32 *)&S390_lowcore.pgm_ilc;
+ regs->int_code = S390_lowcore.pgm_int_code;
regs->int_parm_long = S390_lowcore.trans_exc_code;
state = irqentry_enter(regs);
@@ -328,7 +322,7 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
set_thread_flag(TIF_PER_TRAP);
ev->address = S390_lowcore.per_address;
- ev->cause = *(u16 *)&S390_lowcore.per_code;
+ ev->cause = S390_lowcore.per_code_combined;
ev->paid = S390_lowcore.per_access_id;
} else {
/* PER event in kernel is kprobes */
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index bd3ef121c379..b88345ef8bd9 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -177,9 +177,7 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
__typeof__(*(ptr)) input; \
int __rc = 0; \
\
- if (!test_facility(34)) \
- __rc = EMU_ILLEGAL_OP; \
- else if ((u64 __force)ptr & mask) \
+ if ((u64 __force)ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (get_user(input, ptr)) \
__rc = EMU_ADDRESSING; \
@@ -194,9 +192,7 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
__typeof__(ptr) __ptr = (ptr); \
int __rc = 0; \
\
- if (!test_facility(34)) \
- __rc = EMU_ILLEGAL_OP; \
- else if ((u64 __force)__ptr & mask) \
+ if ((u64 __force)__ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (put_user(*(input), __ptr)) \
__rc = EMU_ADDRESSING; \
@@ -213,9 +209,7 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
__typeof__(*(ptr)) input; \
int __rc = 0; \
\
- if (!test_facility(34)) \
- __rc = EMU_ILLEGAL_OP; \
- else if ((u64 __force)ptr & mask) \
+ if ((u64 __force)ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (get_user(input, ptr)) \
__rc = EMU_ADDRESSING; \
@@ -327,10 +321,6 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
break;
case 0xc6:
switch (insn->opc1) {
- case 0x02: /* pfdrl */
- if (!test_facility(34))
- rc = EMU_ILLEGAL_OP;
- break;
case 0x04: /* cghrl */
rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64);
break;
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 42c43521878f..2e526f11b91e 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -49,7 +49,6 @@ SECTIONS
SOFTIRQENTRY_TEXT
FTRACE_HOTPATCH_TRAMPOLINES_TEXT
*(.text.*_indirect_*)
- *(.fixup)
*(.gnu.warning)
. = ALIGN(PAGE_SIZE);
_etext = .; /* End of text section */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index f216a1b2f825..9436f3053b88 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -128,13 +128,12 @@ static int do_account_vtime(struct task_struct *tsk)
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
- /* Use STORE CLOCK by default, STORE CLOCK FAST if available. */
- alternative_io("stpt %0\n .insn s,0xb2050000,%1\n",
- "stpt %0\n .insn s,0xb27c0000,%1\n",
- 25,
- ASM_OUTPUT2("=Q" (S390_lowcore.last_update_timer),
- "=Q" (S390_lowcore.last_update_clock)),
- ASM_NO_INPUT_CLOBBER("cc"));
+ asm volatile(
+ " stpt %0\n" /* Store current cpu timer value */
+ " stckf %1" /* Store current tod clock value */
+ : "=Q" (S390_lowcore.last_update_timer),
+ "=Q" (S390_lowcore.last_update_clock)
+ : : "cc");
clock = S390_lowcore.last_update_clock - clock;
timer -= S390_lowcore.last_update_timer;