summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/s390/s390dbf.txt10
-rw-r--r--arch/s390/Kconfig62
-rw-r--r--arch/s390/Makefile22
-rw-r--r--arch/s390/appldata/appldata_base.c18
-rw-r--r--arch/s390/configs/default_defconfig655
-rw-r--r--arch/s390/configs/gcov_defconfig618
-rw-r--r--arch/s390/configs/performance_defconfig610
-rw-r--r--arch/s390/configs/zfcpdump_defconfig86
-rw-r--r--arch/s390/crypto/aes_s390.c15
-rw-r--r--arch/s390/defconfig4
-rw-r--r--arch/s390/include/asm/atomic.h190
-rw-r--r--arch/s390/include/asm/bitops.h1008
-rw-r--r--arch/s390/include/asm/compat.h5
-rw-r--r--arch/s390/include/asm/ctl_reg.h112
-rw-r--r--arch/s390/include/asm/debug.h5
-rw-r--r--arch/s390/include/asm/dis.h52
-rw-r--r--arch/s390/include/asm/fcx.h38
-rw-r--r--arch/s390/include/asm/ipl.h10
-rw-r--r--arch/s390/include/asm/mmu_context.h10
-rw-r--r--arch/s390/include/asm/page.h7
-rw-r--r--arch/s390/include/asm/pci_debug.h5
-rw-r--r--arch/s390/include/asm/pci_insn.h15
-rw-r--r--arch/s390/include/asm/percpu.h137
-rw-r--r--arch/s390/include/asm/processor.h18
-rw-r--r--arch/s390/include/asm/ptrace.h7
-rw-r--r--arch/s390/include/asm/setup.h7
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/switch_to.h124
-rw-r--r--arch/s390/include/asm/timex.h6
-rw-r--r--arch/s390/include/asm/uaccess.h18
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h4
-rw-r--r--arch/s390/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/bitmap.c54
-rw-r--r--arch/s390/kernel/cache.c5
-rw-r--r--arch/s390/kernel/compat_linux.c4
-rw-r--r--arch/s390/kernel/compat_linux.h1
-rw-r--r--arch/s390/kernel/compat_signal.c89
-rw-r--r--arch/s390/kernel/crash_dump.c35
-rw-r--r--arch/s390/kernel/debug.c2
-rw-r--r--arch/s390/kernel/dis.c81
-rw-r--r--arch/s390/kernel/dumpstack.c1
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/ftrace.c9
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/ipl.c4
-rw-r--r--arch/s390/kernel/kprobes.c13
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/process.c15
-rw-r--r--arch/s390/kernel/ptrace.c70
-rw-r--r--arch/s390/kernel/runtime_instr.c2
-rw-r--r--arch/s390/kernel/setup.c59
-rw-r--r--arch/s390/kernel/signal.c49
-rw-r--r--arch/s390/kernel/smp.c21
-rw-r--r--arch/s390/kernel/vdso.c9
-rw-r--r--arch/s390/kernel/vtime.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c23
-rw-r--r--arch/s390/kvm/trace.h1
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/find.c77
-rw-r--r--arch/s390/lib/uaccess_mvcos.c30
-rw-r--r--arch/s390/lib/uaccess_pt.c2
-rw-r--r--arch/s390/lib/uaccess_std.c305
-rw-r--r--arch/s390/math-emu/math.c2
-rw-r--r--arch/s390/mm/cmm.c12
-rw-r--r--arch/s390/mm/fault.c46
-rw-r--r--arch/s390/mm/gup.c83
-rw-r--r--arch/s390/mm/mmap.c12
-rw-r--r--arch/s390/mm/pageattr.c4
-rw-r--r--arch/s390/mm/pgtable.c43
-rw-r--r--arch/s390/net/bpf_jit_comp.c6
-rw-r--r--arch/s390/pci/pci.c108
-rw-r--r--arch/s390/pci/pci_clp.c33
-rw-r--r--arch/s390/pci/pci_dma.c18
-rw-r--r--arch/s390/pci/pci_event.c35
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c6
-rw-r--r--drivers/s390/block/dasd.c3
-rw-r--r--drivers/s390/block/scm_blk.c6
-rw-r--r--drivers/s390/block/scm_blk.h2
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/raw3270.c4
-rw-r--r--drivers/s390/char/zcore.c20
-rw-r--r--drivers/s390/cio/airq.c19
-rw-r--r--drivers/s390/cio/eadm_sch.c29
-rw-r--r--drivers/s390/cio/eadm_sch.h4
-rw-r--r--drivers/s390/cio/qdio_debug.h8
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h12
-rw-r--r--drivers/s390/net/claw.h8
-rw-r--r--drivers/s390/net/ctcm_dbug.c2
-rw-r--r--drivers/s390/net/lcs.h8
-rw-r--r--drivers/s390/net/netiucv.c8
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h4
-rw-r--r--drivers/tty/hvc/hvc_iucv.c3
96 files changed, 3427 insertions, 1983 deletions
diff --git a/Documentation/s390/s390dbf.txt b/Documentation/s390/s390dbf.txt
index fcaf0b4efba2..3da163383c93 100644
--- a/Documentation/s390/s390dbf.txt
+++ b/Documentation/s390/s390dbf.txt
@@ -158,6 +158,16 @@ Return Value: none
Description: Sets new actual debug level if new_level is valid.
---------------------------------------------------------------------------
+bool debug_level_enabled (debug_info_t * id, int level);
+
+Parameter: id: handle for debug log
+ level: debug level
+
+Return Value: True if level is less or equal to the current debug level.
+
+Description: Returns true if debug events for the specified level would be
+ logged. Otherwise returns false.
+---------------------------------------------------------------------------
void debug_stop_all(void);
Parameter: none
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 7143793859fa..f75d7e517927 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -99,6 +99,7 @@ config S390
select CLONE_BACKWARDS2
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES if !SMP
+ select GENERIC_FIND_FIRST_BIT
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL_OLD
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
@@ -237,6 +238,67 @@ config MARCH_ZEC12
endchoice
+config MARCH_G5_TUNE
+ def_bool TUNE_G5 || MARCH_G5 && TUNE_DEFAULT
+
+config MARCH_Z900_TUNE
+ def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT
+
+config MARCH_Z990_TUNE
+ def_bool TUNE_Z990 || MARCH_Z990 && TUNE_DEFAULT
+
+config MARCH_Z9_109_TUNE
+ def_bool TUNE_Z9_109 || MARCH_Z9_109 && TUNE_DEFAULT
+
+config MARCH_Z10_TUNE
+ def_bool TUNE_Z10 || MARCH_Z10 && TUNE_DEFAULT
+
+config MARCH_Z196_TUNE
+ def_bool TUNE_Z196 || MARCH_Z196 && TUNE_DEFAULT
+
+config MARCH_ZEC12_TUNE
+ def_bool TUNE_ZEC12 || MARCH_ZEC12 && TUNE_DEFAULT
+
+choice
+ prompt "Tune code generation"
+ default TUNE_DEFAULT
+ help
+ Cause the compiler to tune (-mtune) the generated code for a machine.
+ This will make the code run faster on the selected machine but
+ somewhat slower on other machines.
+ This option only changes how the compiler emits instructions, not the
+ selection of instructions itself, so the resulting kernel will run on
+ all other machines.
+
+config TUNE_DEFAULT
+ bool "Default"
+ help
+ Tune the generated code for the target processor for which the kernel
+ will be compiled.
+
+config TUNE_G5
+ bool "System/390 model G5 and G6"
+
+config TUNE_Z900
+ bool "IBM zSeries model z800 and z900"
+
+config TUNE_Z990
+ bool "IBM zSeries model z890 and z990"
+
+config TUNE_Z9_109
+ bool "IBM System z9"
+
+config TUNE_Z10
+ bool "IBM System z10"
+
+config TUNE_Z196
+ bool "IBM zEnterprise 114 and 196"
+
+config TUNE_ZEC12
+ bool "IBM zBC12 and zEC12"
+
+endchoice
+
config 64BIT
def_bool y
prompt "64 bit kernel"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index a7d68a467ce8..874e6d6e9c5f 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -35,13 +35,21 @@ endif
export LD_BFD
-cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5)
-cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
-cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
-cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
-cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10)
-cflags-$(CONFIG_MARCH_Z196) += $(call cc-option,-march=z196)
-cflags-$(CONFIG_MARCH_ZEC12) += $(call cc-option,-march=zEC12)
+cflags-$(CONFIG_MARCH_G5) += -march=g5
+cflags-$(CONFIG_MARCH_Z900) += -march=z900
+cflags-$(CONFIG_MARCH_Z990) += -march=z990
+cflags-$(CONFIG_MARCH_Z9_109) += -march=z9-109
+cflags-$(CONFIG_MARCH_Z10) += -march=z10
+cflags-$(CONFIG_MARCH_Z196) += -march=z196
+cflags-$(CONFIG_MARCH_ZEC12) += -march=zEC12
+
+cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5
+cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900
+cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990
+cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
+cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10
+cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
+cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
#KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 87a22092b68f..4c4a1cef5208 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -48,9 +48,9 @@ static struct platform_device *appldata_pdev;
* /proc entries (sysctl)
*/
static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
-static int appldata_timer_handler(ctl_table *ctl, int write,
+static int appldata_timer_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
-static int appldata_interval_handler(ctl_table *ctl, int write,
+static int appldata_interval_handler(struct ctl_table *ctl, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos);
@@ -201,10 +201,10 @@ static void __appldata_vtimer_setup(int cmd)
* Start/Stop timer, show status of timer (0 = not active, 1 = active)
*/
static int
-appldata_timer_handler(ctl_table *ctl, int write,
+appldata_timer_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- int len;
+ unsigned int len;
char buf[2];
if (!*lenp || *ppos) {
@@ -243,10 +243,11 @@ out:
* current timer interval.
*/
static int
-appldata_interval_handler(ctl_table *ctl, int write,
+appldata_interval_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- int len, interval;
+ unsigned int len;
+ int interval;
char buf[16];
if (!*lenp || *ppos) {
@@ -286,11 +287,12 @@ out:
* monitoring (0 = not in process, 1 = in process)
*/
static int
-appldata_generic_handler(ctl_table *ctl, int write,
+appldata_generic_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct appldata_ops *ops = NULL, *tmp_ops;
- int rc, len, found;
+ unsigned int len;
+ int rc, found;
char buf[2];
struct list_head *lh;
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
new file mode 100644
index 000000000000..e0af2ee58751
--- /dev/null
+++ b/arch/s390/configs/default_defconfig
@@ -0,0 +1,655 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_PCI=y
+CONFIG_PCI_DEBUG=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_CHSC_SCH=y
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_HIBERNATION=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NET_SCTPPROBE=m
+CONFIG_RDS=m
+CONFIG_RDS_RDMA=m
+CONFIG_RDS_TCP=m
+CONFIG_RDS_DEBUG=y
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_OSD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_XIP=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_ISCSI_TCP=m
+CONFIG_LIBFCOE=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_ZFCP=y
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_FLAKEY=m
+CONFIG_DM_VERITY=m
+CONFIG_DM_SWITCH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+CONFIG_NLMON=m
+CONFIG_VHOST_NET=m
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+CONFIG_MLX4_EN=m
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_RAW_DRIVER=m
+CONFIG_HANGCHECK_TIMER=m
+CONFIG_TN3270_FS=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ZVM_WATCHDOG=m
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_VIRTIO_BALLOON=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JBD_DEBUG=y
+CONFIG_JBD2_DEBUG=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_XFS_DEBUG=y
+CONFIG_GFS2_FS=m
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_FANOTIFY=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_SWAP=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_V4_SECURITY_LABEL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS=y
+CONFIG_CIFS_STATS2=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_READABLE_ASM=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_SLUB_STATS=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_RB=y
+CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_DEBUG_PER_CPU_MAPS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_RT_MUTEX_TESTER=y
+CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCK_STAT=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
+CONFIG_DEBUG_WRITECOUNT=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_PROVE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=300
+CONFIG_NOTIFIER_ERROR_INJECTION=m
+CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
+CONFIG_PM_NOTIFIER_ERROR_INJECT=m
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAILSLAB=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAIL_MAKE_REQUEST=y
+CONFIG_FAIL_IO_TIMEOUT=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_LATENCYTOP=y
+CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+CONFIG_LKDTM=m
+CONFIG_KPROBES_SANITY_TEST=y
+CONFIG_RBTREE_TEST=m
+CONFIG_INTERVAL_TREE_TEST=m
+CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_DMA_API_DEBUG=y
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_S390_PTDUMP=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_IMA=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_ZCRYPT=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
+CONFIG_PUBLIC_KEY_ALGO_RSA=m
+CONFIG_X509_CERTIFICATE_PARSER=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_CORDIC=m
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
new file mode 100644
index 000000000000..b9f6b4cab927
--- /dev/null
+++ b/arch/s390/configs/gcov_defconfig
@@ -0,0 +1,618 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_GCOV_KERNEL=y
+CONFIG_GCOV_PROFILE_ALL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+CONFIG_HZ_100=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_CHSC_SCH=y
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_HIBERNATION=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NET_SCTPPROBE=m
+CONFIG_RDS=m
+CONFIG_RDS_RDMA=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_OSD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_XIP=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_ISCSI_TCP=m
+CONFIG_LIBFCOE=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_ZFCP=y
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_FLAKEY=m
+CONFIG_DM_VERITY=m
+CONFIG_DM_SWITCH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+CONFIG_NLMON=m
+CONFIG_VHOST_NET=m
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+CONFIG_MLX4_EN=m
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_RAW_DRIVER=m
+CONFIG_HANGCHECK_TIMER=m
+CONFIG_TN3270_FS=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ZVM_WATCHDOG=m
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_VIRTIO_BALLOON=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JBD_DEBUG=y
+CONFIG_JBD2_DEBUG=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_GFS2_FS=m
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_FANOTIFY=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_SWAP=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_V4_SECURITY_LABEL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS=y
+CONFIG_CIFS_STATS2=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_TIMER_STATS=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_NOTIFIER_ERROR_INJECTION=m
+CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
+CONFIG_PM_NOTIFIER_ERROR_INJECT=m
+CONFIG_LATENCYTOP=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+CONFIG_LKDTM=m
+CONFIG_RBTREE_TEST=m
+CONFIG_INTERVAL_TREE_TEST=m
+CONFIG_ATOMIC64_SELFTEST=y
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_S390_PTDUMP=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_IMA=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_ZCRYPT=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
+CONFIG_PUBLIC_KEY_ALGO_RSA=m
+CONFIG_X509_CERTIFICATE_PARSER=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_CORDIC=m
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
new file mode 100644
index 000000000000..91087b43e8fa
--- /dev/null
+++ b/arch/s390/configs/performance_defconfig
@@ -0,0 +1,610 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+CONFIG_HZ_100=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_CHSC_SCH=y
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_HIBERNATION=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NET_SCTPPROBE=m
+CONFIG_RDS=m
+CONFIG_RDS_RDMA=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_OSD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_XIP=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_ISCSI_TCP=m
+CONFIG_LIBFCOE=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_ZFCP=y
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_FLAKEY=m
+CONFIG_DM_VERITY=m
+CONFIG_DM_SWITCH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+CONFIG_NLMON=m
+CONFIG_VHOST_NET=m
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+CONFIG_MLX4_EN=m
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_RAW_DRIVER=m
+CONFIG_HANGCHECK_TIMER=m
+CONFIG_TN3270_FS=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ZVM_WATCHDOG=m
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_VIRTIO_BALLOON=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JBD_DEBUG=y
+CONFIG_JBD2_DEBUG=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_GFS2_FS=m
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_FANOTIFY=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_SWAP=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_V4_SECURITY_LABEL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS=y
+CONFIG_CIFS_STATS2=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_TIMER_STATS=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_LATENCYTOP=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+CONFIG_LKDTM=m
+CONFIG_ATOMIC64_SELFTEST=y
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_S390_PTDUMP=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_IMA=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_ZCRYPT=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
+CONFIG_PUBLIC_KEY_ALGO_RSA=m
+CONFIG_X509_CERTIFICATE_PARSER=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_CORDIC=m
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
new file mode 100644
index 000000000000..d725c4d956e4
--- /dev/null
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -0,0 +1,86 @@
+# CONFIG_SWAP is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+# CONFIG_COMPAT is not set
+CONFIG_NR_CPUS=2
+# CONFIG_HOTPLUG_CPU is not set
+CONFIG_HZ_100=y
+# CONFIG_COMPACTION is not set
+# CONFIG_MIGRATION is not set
+# CONFIG_CHECK_STACK is not set
+# CONFIG_CHSC_SCH is not set
+# CONFIG_SCM_BUS is not set
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_IUCV is not set
+CONFIG_ATM=y
+CONFIG_ATM_LANE=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV_XPRAM is not set
+# CONFIG_DCSSBLK is not set
+# CONFIG_DASD is not set
+CONFIG_ENCLOSURE_SERVICES=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_ENCLOSURE=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SRP_ATTRS=y
+CONFIG_ZFCP=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_HVC_IUCV is not set
+CONFIG_RAW_DRIVER=y
+# CONFIG_SCLP_ASYNC is not set
+# CONFIG_HMC_DRV is not set
+# CONFIG_S390_TAPE is not set
+# CONFIG_VMCP is not set
+# CONFIG_MONWRITER is not set
+# CONFIG_S390_VMUR is not set
+# CONFIG_HID is not set
+CONFIG_MEMSTICK=y
+CONFIG_MEMSTICK_DEBUG=y
+CONFIG_MEMSTICK_UNSAFE_RESUME=y
+CONFIG_MSPRO_BLOCK=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_INOTIFY_USER is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_FTRACE is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+# CONFIG_PFAULT is not set
+# CONFIG_S390_HYPFS_FS is not set
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_S390_GUEST is not set
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index b4dbade8ca24..46cae138ece2 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -725,6 +725,8 @@ static struct crypto_alg xts_aes_alg = {
}
};
+static int xts_aes_alg_reg;
+
static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -846,6 +848,8 @@ static struct crypto_alg ctr_aes_alg = {
}
};
+static int ctr_aes_alg_reg;
+
static int __init aes_s390_init(void)
{
int ret;
@@ -884,6 +888,7 @@ static int __init aes_s390_init(void)
ret = crypto_register_alg(&xts_aes_alg);
if (ret)
goto xts_aes_err;
+ xts_aes_alg_reg = 1;
}
if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
@@ -902,6 +907,7 @@ static int __init aes_s390_init(void)
free_page((unsigned long) ctrblk);
goto ctr_aes_err;
}
+ ctr_aes_alg_reg = 1;
}
out:
@@ -921,9 +927,12 @@ aes_err:
static void __exit aes_s390_fini(void)
{
- crypto_unregister_alg(&ctr_aes_alg);
- free_page((unsigned long) ctrblk);
- crypto_unregister_alg(&xts_aes_alg);
+ if (ctr_aes_alg_reg) {
+ crypto_unregister_alg(&ctr_aes_alg);
+ free_page((unsigned long) ctrblk);
+ }
+ if (xts_aes_alg_reg)
+ crypto_unregister_alg(&xts_aes_alg);
crypto_unregister_alg(&cbc_aes_alg);
crypto_unregister_alg(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index d204c65bf722..33f57514f424 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -38,13 +38,14 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
-# CONFIG_EFI_PARTITION is not set
CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z196=y
CONFIG_HZ_100=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
@@ -152,6 +153,7 @@ CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index c797832daa5f..fa9aaf7144b7 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -19,21 +19,50 @@
#define ATOMIC_INIT(i) { (i) }
-#define __CS_LOOP(ptr, op_val, op_string) ({ \
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC_OR "lao"
+#define __ATOMIC_AND "lan"
+#define __ATOMIC_ADD "laa"
+
+#define __ATOMIC_LOOP(ptr, op_val, op_string) \
+({ \
+ int old_val; \
+ \
+ typecheck(atomic_t *, ptr); \
+ asm volatile( \
+ op_string " %0,%2,%1\n" \
+ : "=d" (old_val), "+Q" ((ptr)->counter) \
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC_OR "or"
+#define __ATOMIC_AND "nr"
+#define __ATOMIC_ADD "ar"
+
+#define __ATOMIC_LOOP(ptr, op_val, op_string) \
+({ \
int old_val, new_val; \
+ \
+ typecheck(atomic_t *, ptr); \
asm volatile( \
" l %0,%2\n" \
"0: lr %1,%0\n" \
op_string " %1,%3\n" \
" cs %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (old_val), "=&d" (new_val), \
- "=Q" (((atomic_t *)(ptr))->counter) \
- : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
+ : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
+ : "d" (op_val) \
: "cc", "memory"); \
- new_val; \
+ old_val; \
})
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
static inline int atomic_read(const atomic_t *v)
{
int c;
@@ -53,32 +82,45 @@ static inline void atomic_set(atomic_t *v, int i)
static inline int atomic_add_return(int i, atomic_t *v)
{
- return __CS_LOOP(v, i, "ar");
+ return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
}
-#define atomic_add(_i, _v) atomic_add_return(_i, _v)
-#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
-#define atomic_inc(_v) atomic_add_return(1, _v)
-#define atomic_inc_return(_v) atomic_add_return(1, _v)
-#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
-static inline int atomic_sub_return(int i, atomic_t *v)
+static inline void atomic_add(int i, atomic_t *v)
{
- return __CS_LOOP(v, i, "sr");
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+ asm volatile(
+ "asi %0,%1\n"
+ : "+Q" (v->counter)
+ : "i" (i)
+ : "cc", "memory");
+ } else {
+ atomic_add_return(i, v);
+ }
+#else
+ atomic_add_return(i, v);
+#endif
}
-#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
+
+#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
+#define atomic_inc(_v) atomic_add(1, _v)
+#define atomic_inc_return(_v) atomic_add_return(1, _v)
+#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
+#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
+#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
-#define atomic_dec(_v) atomic_sub_return(1, _v)
+#define atomic_dec(_v) atomic_sub(1, _v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
- __CS_LOOP(v, ~mask, "nr");
+ __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
}
-static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
- __CS_LOOP(v, mask, "or");
+ __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -87,8 +129,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
asm volatile(
" cs %0,%2,%1"
- : "+d" (old), "=Q" (v->counter)
- : "d" (new), "Q" (v->counter)
+ : "+d" (old), "+Q" (v->counter)
+ : "d" (new)
: "cc", "memory");
return old;
}
@@ -109,27 +151,56 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
}
-#undef __CS_LOOP
+#undef __ATOMIC_LOOP
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_64BIT
-#define __CSG_LOOP(ptr, op_val, op_string) ({ \
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC64_OR "laog"
+#define __ATOMIC64_AND "lang"
+#define __ATOMIC64_ADD "laag"
+
+#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
+({ \
+ long long old_val; \
+ \
+ typecheck(atomic64_t *, ptr); \
+ asm volatile( \
+ op_string " %0,%2,%1\n" \
+ : "=d" (old_val), "+Q" ((ptr)->counter) \
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC64_OR "ogr"
+#define __ATOMIC64_AND "ngr"
+#define __ATOMIC64_ADD "agr"
+
+#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
+({ \
long long old_val, new_val; \
+ \
+ typecheck(atomic64_t *, ptr); \
asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
op_string " %1,%3\n" \
" csg %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (old_val), "=&d" (new_val), \
- "=Q" (((atomic_t *)(ptr))->counter) \
- : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
+ : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
+ : "d" (op_val) \
: "cc", "memory"); \
- new_val; \
+ old_val; \
})
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
static inline long long atomic64_read(const atomic64_t *v)
{
long long c;
@@ -149,22 +220,17 @@ static inline void atomic64_set(atomic64_t *v, long long i)
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
- return __CSG_LOOP(v, i, "agr");
-}
-
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
- return __CSG_LOOP(v, i, "sgr");
+ return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
}
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{
- __CSG_LOOP(v, ~mask, "ngr");
+ __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
}
static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
{
- __CSG_LOOP(v, mask, "ogr");
+ __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
@@ -174,13 +240,13 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
{
asm volatile(
" csg %0,%2,%1"
- : "+d" (old), "=Q" (v->counter)
- : "d" (new), "Q" (v->counter)
+ : "+d" (old), "+Q" (v->counter)
+ : "d" (new)
: "cc", "memory");
return old;
}
-#undef __CSG_LOOP
+#undef __ATOMIC64_LOOP
#else /* CONFIG_64BIT */
@@ -216,8 +282,8 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
" lm %0,%N0,%1\n"
"0: cds %0,%2,%1\n"
" jl 0b\n"
- : "=&d" (rp_old), "=Q" (v->counter)
- : "d" (rp_new), "Q" (v->counter)
+ : "=&d" (rp_old), "+Q" (v->counter)
+ : "d" (rp_new)
: "cc");
return rp_old.pair;
}
@@ -230,8 +296,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
asm volatile(
" cds %0,%2,%1"
- : "+&d" (rp_old), "=Q" (v->counter)
- : "d" (rp_new), "Q" (v->counter)
+ : "+&d" (rp_old), "+Q" (v->counter)
+ : "d" (rp_new)
: "cc");
return rp_old.pair;
}
@@ -248,17 +314,6 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
return new;
}
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
- long long old, new;
-
- do {
- old = atomic64_read(v);
- new = old - i;
- } while (atomic64_cmpxchg(v, old, new) != old);
- return new;
-}
-
static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
{
long long old, new;
@@ -281,7 +336,24 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
#endif /* CONFIG_64BIT */
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline void atomic64_add(long long i, atomic64_t *v)
+{
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+ asm volatile(
+ "agsi %0,%1\n"
+ : "+Q" (v->counter)
+ : "i" (i)
+ : "cc", "memory");
+ } else {
+ atomic64_add_return(i, v);
+ }
+#else
+ atomic64_add_return(i, v);
+#endif
+}
+
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
{
long long c, old;
@@ -289,7 +361,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
for (;;) {
if (unlikely(c == u))
break;
- old = atomic64_cmpxchg(v, c, c + a);
+ old = atomic64_cmpxchg(v, c, c + i);
if (likely(old == c))
break;
c = old;
@@ -314,14 +386,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
return dec;
}
-#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
-#define atomic64_inc(_v) atomic64_add_return(1, _v)
+#define atomic64_inc(_v) atomic64_add(1, _v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
-#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
+#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
+#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
-#define atomic64_dec(_v) atomic64_sub_return(1, _v)
+#define atomic64_dec(_v) atomic64_sub(1, _v)
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 10135a38673c..6e6ad0680829 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -1,10 +1,40 @@
/*
- * S390 version
- * Copyright IBM Corp. 1999
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Copyright IBM Corp. 1999,2013
*
- * Derived from "include/asm-i386/bitops.h"
- * Copyright (C) 1992, Linus Torvalds
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ *
+ * The description below was taken in large parts from the powerpc
+ * bitops header file:
+ * Within a word, bits are numbered LSB first. Lot's of places make
+ * this assumption by directly testing bits with (val & (1<<nr)).
+ * This can cause confusion for large (> 1 word) bitmaps on a
+ * big-endian system because, unlike little endian, the number of each
+ * bit depends on the word size.
+ *
+ * The bitop functions are defined to work on unsigned longs, so for an
+ * s390x system the bits end up numbered:
+ * |63..............0|127............64|191...........128|255...........196|
+ * and on s390:
+ * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
+ *
+ * There are a few little-endian macros used mostly for filesystem
+ * bitmaps, these work on similar bit arrays layouts, but
+ * byte-oriented:
+ * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
+ *
+ * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
+ * number field needs to be reversed compared to the big-endian bit
+ * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
+ *
+ * We also have special functions which work with an MSB0 encoding:
+ * on an s390x system the bits are numbered:
+ * |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ *
+ * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
+ * number field needs to be reversed compared to the LSB0 encoded bit
+ * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b).
*
*/
@@ -15,556 +45,348 @@
#error only <linux/bitops.h> can be included directly
#endif
+#include <linux/typecheck.h>
#include <linux/compiler.h>
-/*
- * 32 bit bitops format:
- * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
- * bit 32 is the LSB of *(addr+4). That combined with the
- * big endian byte order on S390 give the following bit
- * order in memory:
- * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
- * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
- * after that follows the next long with bit numbers
- * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
- * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
- * The reason for this bit ordering is the fact that
- * in the architecture independent code bits operations
- * of the form "flags |= (1 << bitnr)" are used INTERMIXED
- * with operation of the form "set_bit(bitnr, flags)".
- *
- * 64 bit bitops format:
- * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
- * bit 64 is the LSB of *(addr+8). That combined with the
- * big endian byte order on S390 give the following bit
- * order in memory:
- * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
- * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
- * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
- * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
- * after that follows the next long with bit numbers
- * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
- * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
- * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
- * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
- * The reason for this bit ordering is the fact that
- * in the architecture independent code bits operations
- * of the form "flags |= (1 << bitnr)" are used INTERMIXED
- * with operation of the form "set_bit(bitnr, flags)".
- */
-
-/* bitmap tables from arch/s390/kernel/bitmap.c */
-extern const char _oi_bitmap[];
-extern const char _ni_bitmap[];
-extern const char _zb_findmap[];
-extern const char _sb_findmap[];
-
#ifndef CONFIG_64BIT
#define __BITOPS_OR "or"
#define __BITOPS_AND "nr"
#define __BITOPS_XOR "xr"
-#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
+#define __BITOPS_LOOP(__addr, __val, __op_string) \
+({ \
+ unsigned long __old, __new; \
+ \
+ typecheck(unsigned long *, (__addr)); \
asm volatile( \
" l %0,%2\n" \
"0: lr %1,%0\n" \
__op_string " %1,%3\n" \
" cs %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (__old), "=&d" (__new), \
- "=Q" (*(unsigned long *) __addr) \
- : "d" (__val), "Q" (*(unsigned long *) __addr) \
- : "cc");
+ : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
+ : "d" (__val) \
+ : "cc"); \
+ __old; \
+})
#else /* CONFIG_64BIT */
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __BITOPS_OR "laog"
+#define __BITOPS_AND "lang"
+#define __BITOPS_XOR "laxg"
+
+#define __BITOPS_LOOP(__addr, __val, __op_string) \
+({ \
+ unsigned long __old; \
+ \
+ typecheck(unsigned long *, (__addr)); \
+ asm volatile( \
+ __op_string " %0,%2,%1\n" \
+ : "=d" (__old), "+Q" (*(__addr)) \
+ : "d" (__val) \
+ : "cc"); \
+ __old; \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
#define __BITOPS_OR "ogr"
#define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr"
-#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
+#define __BITOPS_LOOP(__addr, __val, __op_string) \
+({ \
+ unsigned long __old, __new; \
+ \
+ typecheck(unsigned long *, (__addr)); \
asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
__op_string " %1,%3\n" \
" csg %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (__old), "=&d" (__new), \
- "=Q" (*(unsigned long *) __addr) \
- : "d" (__val), "Q" (*(unsigned long *) __addr) \
- : "cc");
+ : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
+ : "d" (__val) \
+ : "cc"); \
+ __old; \
+})
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#endif /* CONFIG_64BIT */
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
-#ifdef CONFIG_SMP
-/*
- * SMP safe set_bit routine based on compare and swap (CS)
- */
-static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline unsigned long *
+__bitops_word(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
+ return (unsigned long *)addr;
+}
+
+static inline unsigned char *
+__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+}
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask;
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make OR mask */
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ if (__builtin_constant_p(nr)) {
+ unsigned char *caddr = __bitops_byte(nr, ptr);
+
+ asm volatile(
+ "oi %0,%b1\n"
+ : "+Q" (*caddr)
+ : "i" (1 << (nr & 7))
+ : "cc");
+ return;
+ }
+#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
+ __BITOPS_LOOP(addr, mask, __BITOPS_OR);
}
-/*
- * SMP safe clear_bit routine based on compare and swap (CS)
- */
-static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask;
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ if (__builtin_constant_p(nr)) {
+ unsigned char *caddr = __bitops_byte(nr, ptr);
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make AND mask */
+ asm volatile(
+ "ni %0,%b1\n"
+ : "+Q" (*caddr)
+ : "i" (~(1 << (nr & 7)))
+ : "cc");
+ return;
+ }
+#endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
+ __BITOPS_LOOP(addr, mask, __BITOPS_AND);
}
-/*
- * SMP safe change_bit routine based on compare and swap (CS)
- */
-static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask;
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ if (__builtin_constant_p(nr)) {
+ unsigned char *caddr = __bitops_byte(nr, ptr);
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make XOR mask */
+ asm volatile(
+ "xi %0,%b1\n"
+ : "+Q" (*caddr)
+ : "i" (1 << (nr & 7))
+ : "cc");
+ return;
+ }
+#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
+ __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
}
-/*
- * SMP safe test_and_set_bit routine based on compare and swap (CS)
- */
static inline int
-test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long old, mask;
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make OR/test mask */
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_OR);
barrier();
return (old & mask) != 0;
}
-/*
- * SMP safe test_and_clear_bit routine based on compare and swap (CS)
- */
static inline int
-test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long old, mask;
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make AND/test mask */
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_AND);
barrier();
- return (old ^ new) != 0;
+ return (old & ~mask) != 0;
}
-/*
- * SMP safe test_and_change_bit routine based on compare and swap (CS)
- */
static inline int
-test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long old, mask;
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make XOR/test mask */
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
barrier();
return (old & mask) != 0;
}
-#endif /* CONFIG_SMP */
-/*
- * fast, non-SMP set_bit routine
- */
static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
-
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- asm volatile(
- " oc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
-}
-
-static inline void
-__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
-{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
- addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- *(unsigned char *) addr |= 1 << (nr & 7);
+ *addr |= 1 << (nr & 7);
}
-#define set_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_set_bit((nr),(addr)) : \
- __set_bit((nr),(addr)) )
-
-/*
- * fast, non-SMP clear_bit routine
- */
static inline void
__clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
-
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- asm volatile(
- " nc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc");
-}
-
-static inline void
-__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
-{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
- addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- *(unsigned char *) addr &= ~(1 << (nr & 7));
+ *addr &= ~(1 << (nr & 7));
}
-#define clear_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_clear_bit((nr),(addr)) : \
- __clear_bit((nr),(addr)) )
-
-/*
- * fast, non-SMP change_bit routine
- */
static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
-
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- asm volatile(
- " xc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
-}
-
-static inline void
-__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
-{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
- addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- *(unsigned char *) addr ^= 1 << (nr & 7);
+ *addr ^= 1 << (nr & 7);
}
-#define change_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_change_bit((nr),(addr)) : \
- __change_bit((nr),(addr)) )
-
-/*
- * fast, non-SMP test_and_set_bit routine
- */
static inline int
-test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- ch = *(unsigned char *) addr;
- asm volatile(
- " oc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
- : "cc", "memory");
+ ch = *addr;
+ *addr |= 1 << (nr & 7);
return (ch >> (nr & 7)) & 1;
}
-#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
-/*
- * fast, non-SMP test_and_clear_bit routine
- */
static inline int
-test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- ch = *(unsigned char *) addr;
- asm volatile(
- " nc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
- : "cc", "memory");
+ ch = *addr;
+ *addr &= ~(1 << (nr & 7));
return (ch >> (nr & 7)) & 1;
}
-#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
-/*
- * fast, non-SMP test_and_change_bit routine
- */
static inline int
-test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- ch = *(unsigned char *) addr;
- asm volatile(
- " xc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
- : "cc", "memory");
+ ch = *addr;
+ *addr ^= 1 << (nr & 7);
return (ch >> (nr & 7)) & 1;
}
-#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
-
-#ifdef CONFIG_SMP
-#define set_bit set_bit_cs
-#define clear_bit clear_bit_cs
-#define change_bit change_bit_cs
-#define test_and_set_bit test_and_set_bit_cs
-#define test_and_clear_bit test_and_clear_bit_cs
-#define test_and_change_bit test_and_change_bit_cs
-#else
-#define set_bit set_bit_simple
-#define clear_bit clear_bit_simple
-#define change_bit change_bit_simple
-#define test_and_set_bit test_and_set_bit_simple
-#define test_and_clear_bit test_and_clear_bit_simple
-#define test_and_change_bit test_and_change_bit_simple
-#endif
-
-
-/*
- * This routine doesn't need to be atomic.
- */
-static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
+static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
{
- unsigned long addr;
- unsigned char ch;
-
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- ch = *(volatile unsigned char *) addr;
- return (ch >> (nr & 7)) & 1;
-}
+ const volatile unsigned char *addr;
-static inline int
-__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
- return (((volatile char *) addr)
- [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
+ addr = ((const volatile unsigned char *)ptr);
+ addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
+ return (*addr >> (nr & 7)) & 1;
}
-#define test_bit(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_test_bit((nr),(addr)) : \
- __test_bit((nr),(addr)) )
-
/*
- * Optimized find bit helper functions.
- */
-
-/**
- * __ffz_word_loop - find byte offset of first long != -1UL
- * @addr: pointer to array of unsigned long
- * @size: size of the array in bits
+ * Functions which use MSB0 bit numbering.
+ * On an s390x system the bits are numbered:
+ * |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
*/
-static inline unsigned long __ffz_word_loop(const unsigned long *addr,
- unsigned long size)
-{
- typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
- unsigned long bytes = 0;
-
- asm volatile(
-#ifndef CONFIG_64BIT
- " ahi %1,-1\n"
- " sra %1,5\n"
- " jz 1f\n"
- "0: c %2,0(%0,%3)\n"
- " jne 1f\n"
- " la %0,4(%0)\n"
- " brct %1,0b\n"
- "1:\n"
-#else
- " aghi %1,-1\n"
- " srag %1,%1,6\n"
- " jz 1f\n"
- "0: cg %2,0(%0,%3)\n"
- " jne 1f\n"
- " la %0,8(%0)\n"
- " brct %1,0b\n"
- "1:\n"
-#endif
- : "+&a" (bytes), "+&d" (size)
- : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
- : "cc" );
- return bytes;
-}
+unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
+unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
+ unsigned long offset);
-/**
- * __ffs_word_loop - find byte offset of first long != 0UL
- * @addr: pointer to array of unsigned long
- * @size: size of the array in bits
- */
-static inline unsigned long __ffs_word_loop(const unsigned long *addr,
- unsigned long size)
+static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
- typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
- unsigned long bytes = 0;
-
- asm volatile(
-#ifndef CONFIG_64BIT
- " ahi %1,-1\n"
- " sra %1,5\n"
- " jz 1f\n"
- "0: c %2,0(%0,%3)\n"
- " jne 1f\n"
- " la %0,4(%0)\n"
- " brct %1,0b\n"
- "1:\n"
-#else
- " aghi %1,-1\n"
- " srag %1,%1,6\n"
- " jz 1f\n"
- "0: cg %2,0(%0,%3)\n"
- " jne 1f\n"
- " la %0,8(%0)\n"
- " brct %1,0b\n"
- "1:\n"
-#endif
- : "+&a" (bytes), "+&a" (size)
- : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
- : "cc" );
- return bytes;
+ return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-/**
- * __ffz_word - add number of the first unset bit
- * @nr: base value the bit number is added to
- * @word: the word that is searched for unset bits
- */
-static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
+static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
-#ifdef CONFIG_64BIT
- if ((word & 0xffffffff) == 0xffffffff) {
- word >>= 32;
- nr += 32;
- }
-#endif
- if ((word & 0xffff) == 0xffff) {
- word >>= 16;
- nr += 16;
- }
- if ((word & 0xff) == 0xff) {
- word >>= 8;
- nr += 8;
- }
- return nr + _zb_findmap[(unsigned char) word];
+ return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-/**
- * __ffs_word - add number of the first set bit
- * @nr: base value the bit number is added to
- * @word: the word that is searched for set bits
- */
-static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
+static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
-#ifdef CONFIG_64BIT
- if ((word & 0xffffffff) == 0) {
- word >>= 32;
- nr += 32;
- }
-#endif
- if ((word & 0xffff) == 0) {
- word >>= 16;
- nr += 16;
- }
- if ((word & 0xff) == 0) {
- word >>= 8;
- nr += 8;
- }
- return nr + _sb_findmap[(unsigned char) word];
+ return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-
-/**
- * __load_ulong_be - load big endian unsigned long
- * @p: pointer to array of unsigned long
- * @offset: byte offset of source value in the array
- */
-static inline unsigned long __load_ulong_be(const unsigned long *p,
- unsigned long offset)
+static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
- p = (unsigned long *)((unsigned long) p + offset);
- return *p;
+ return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-/**
- * __load_ulong_le - load little endian unsigned long
- * @p: pointer to array of unsigned long
- * @offset: byte offset of source value in the array
- */
-static inline unsigned long __load_ulong_le(const unsigned long *p,
- unsigned long offset)
+static inline int test_bit_inv(unsigned long nr,
+ const volatile unsigned long *ptr)
{
- unsigned long word;
-
- p = (unsigned long *)((unsigned long) p + offset);
-#ifndef CONFIG_64BIT
- asm volatile(
- " ic %0,%O1(%R1)\n"
- " icm %0,2,%O1+1(%R1)\n"
- " icm %0,4,%O1+2(%R1)\n"
- " icm %0,8,%O1+3(%R1)"
- : "=&d" (word) : "Q" (*p) : "cc");
-#else
- asm volatile(
- " lrvg %0,%1"
- : "=d" (word) : "m" (*p) );
-#endif
- return word;
+ return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-/*
- * The various find bit functions.
- */
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
-/*
- * ffz - find first zero in word.
- * @word: The word to search
+/**
+ * __flogr - find leftmost one
+ * @word - The word to search
*
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long ffz(unsigned long word)
-{
- return __ffz_word(0, word);
+ * Returns the bit number of the most significant bit set,
+ * where the most significant bit has bit number 0.
+ * If no bit is set this function returns 64.
+ */
+static inline unsigned char __flogr(unsigned long word)
+{
+ if (__builtin_constant_p(word)) {
+ unsigned long bit = 0;
+
+ if (!word)
+ return 64;
+ if (!(word & 0xffffffff00000000UL)) {
+ word <<= 32;
+ bit += 32;
+ }
+ if (!(word & 0xffff000000000000UL)) {
+ word <<= 16;
+ bit += 16;
+ }
+ if (!(word & 0xff00000000000000UL)) {
+ word <<= 8;
+ bit += 8;
+ }
+ if (!(word & 0xf000000000000000UL)) {
+ word <<= 4;
+ bit += 4;
+ }
+ if (!(word & 0xc000000000000000UL)) {
+ word <<= 2;
+ bit += 2;
+ }
+ if (!(word & 0x8000000000000000UL)) {
+ word <<= 1;
+ bit += 1;
+ }
+ return bit;
+ } else {
+ register unsigned long bit asm("4") = word;
+ register unsigned long out asm("5");
+
+ asm volatile(
+ " flogr %[bit],%[bit]\n"
+ : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
+ return bit;
+ }
}
/**
@@ -573,337 +395,83 @@ static inline unsigned long ffz(unsigned long word)
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static inline unsigned long __ffs (unsigned long word)
+static inline unsigned long __ffs(unsigned long word)
{
- return __ffs_word(0, word);
+ return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
}
/**
* ffs - find first bit set
- * @x: the word to search
+ * @word: the word to search
*
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * This is defined the same way as the libc and
+ * compiler builtin ffs routines (man ffs).
*/
-static inline int ffs(int x)
+static inline int ffs(int word)
{
- if (!x)
- return 0;
- return __ffs_word(1, x);
+ unsigned long mask = 2 * BITS_PER_LONG - 1;
+ unsigned int val = (unsigned int)word;
+
+ return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
}
/**
- * find_first_zero_bit - find the first zero bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
*
- * Returns the bit-number of the first zero bit, not the number of the byte
- * containing a bit.
+ * Undefined if no set bit exists, so code should check against 0 first.
*/
-static inline unsigned long find_first_zero_bit(const unsigned long *addr,
- unsigned long size)
+static inline unsigned long __fls(unsigned long word)
{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffz_word_loop(addr, size);
- bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
- return (bits < size) ? bits : size;
+ return __flogr(word) ^ (BITS_PER_LONG - 1);
}
-#define find_first_zero_bit find_first_zero_bit
/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
+ * fls64 - find last set bit in a 64-bit word
+ * @word: the word to search
*
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-static inline unsigned long find_first_bit(const unsigned long * addr,
- unsigned long size)
-{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffs_word_loop(addr, size);
- bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
- return (bits < size) ? bits : size;
-}
-#define find_first_bit find_first_bit
-
-/*
- * Big endian variant whichs starts bit counting from left using
- * the flogr (find leftmost one) instruction.
- */
-static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
-{
- register unsigned long bit asm("2") = val;
- register unsigned long out asm("3");
-
- asm volatile (
- " .insn rre,0xb9830000,%[bit],%[bit]\n"
- : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
- return nr + bit;
-}
-
-/*
- * 64 bit special left bitops format:
- * order in memory:
- * 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
- * 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
- * 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
- * 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
- * after that follows the next long with bit numbers
- * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
- * 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
- * 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
- * 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
- * The reason for this bit ordering is the fact that
- * the hardware sets bits in a bitmap starting at bit 0
- * and we don't want to scan the bitmap from the 'wrong
- * end'.
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
*/
-static inline unsigned long find_first_bit_left(const unsigned long *addr,
- unsigned long size)
-{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffs_word_loop(addr, size);
- bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
- return (bits < size) ? bits : size;
-}
-
-static inline int find_next_bit_left(const unsigned long *addr,
- unsigned long size,
- unsigned long offset)
+static inline int fls64(unsigned long word)
{
- const unsigned long *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- set = __flo_word(0, *p & (~0UL >> bit));
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_bit_left(p, size);
-}
-
-#define for_each_set_bit_left(bit, addr, size) \
- for ((bit) = find_first_bit_left((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_bit_left((addr), (size), (bit) + 1))
-
-/* same as for_each_set_bit() but use bit as value to start with */
-#define for_each_set_bit_left_cont(bit, addr, size) \
- for ((bit) = find_next_bit_left((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_bit_left((addr), (size), (bit) + 1))
+ unsigned long mask = 2 * BITS_PER_LONG - 1;
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static inline int find_next_zero_bit (const unsigned long * addr,
- unsigned long size,
- unsigned long offset)
-{
- const unsigned long *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- /*
- * __ffz_word returns BITS_PER_LONG
- * if no zero bit is present in the word.
- */
- set = __ffz_word(bit, *p >> bit);
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_zero_bit(p, size);
+ return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
}
-#define find_next_zero_bit find_next_zero_bit
/**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
+ * fls - find last (most-significant) bit set
+ * @word: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static inline int find_next_bit (const unsigned long * addr,
- unsigned long size,
- unsigned long offset)
+static inline int fls(int word)
{
- const unsigned long *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- /*
- * __ffs_word returns BITS_PER_LONG
- * if no one bit is present in the word.
- */
- set = __ffs_word(0, *p & (~0UL << bit));
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_bit(p, size);
+ return fls64((unsigned int)word);
}
-#define find_next_bit find_next_bit
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(unsigned long *b)
-{
- return find_first_bit(b, 140);
-}
+#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
-#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
+#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
-
-/*
- * ATTENTION: intel byte ordering convention for ext2 and minix !!
- * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
- * bit 32 is the LSB of (addr+4).
- * That combined with the little endian byte order of Intel gives the
- * following bit order in memory:
- * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
- * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
- */
-
-static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
-{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffz_word_loop(vaddr, size);
- bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
- return (bits < size) ? bits : size;
-}
-#define find_first_zero_bit_le find_first_zero_bit_le
-
-static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
- unsigned long offset)
-{
- unsigned long *addr = vaddr, *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- /*
- * s390 version of ffz returns BITS_PER_LONG
- * if no zero bit is present in the word.
- */
- set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_zero_bit_le(p, size);
-}
-#define find_next_zero_bit_le find_next_zero_bit_le
-
-static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
-{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffs_word_loop(vaddr, size);
- bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
- return (bits < size) ? bits : size;
-}
-#define find_first_bit_le find_first_bit_le
-
-static inline int find_next_bit_le(void *vaddr, unsigned long size,
- unsigned long offset)
-{
- unsigned long *addr = vaddr, *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- /*
- * s390 version of ffz returns BITS_PER_LONG
- * if no zero bit is present in the word.
- */
- set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_bit_le(p, size);
-}
-#define find_next_bit_le find_next_bit_le
-
+#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/le.h>
-
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* _S390_BITOPS_H */
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index c1e7c646727c..4bf9da03591e 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -22,6 +22,7 @@
#define PSW32_MASK_ASC 0x0000C000UL
#define PSW32_MASK_CC 0x00003000UL
#define PSW32_MASK_PM 0x00000f00UL
+#define PSW32_MASK_RI 0x00000080UL
#define PSW32_MASK_USER 0x0000FF00UL
@@ -35,7 +36,9 @@
#define PSW32_ASC_SECONDARY 0x00008000UL
#define PSW32_ASC_HOME 0x0000C000UL
-extern u32 psw32_user_bits;
+#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \
+ PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
+ PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME)
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index debfda33d1f8..9b69c0befdca 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -8,69 +8,59 @@
#define __ASM_CTL_REG_H
#ifdef CONFIG_64BIT
-
-#define __ctl_load(array, low, high) ({ \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- asm volatile( \
- " lctlg %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), \
- "i" (low), "i" (high)); \
- })
-
-#define __ctl_store(array, low, high) ({ \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- asm volatile( \
- " stctg %1,%2,%0\n" \
- : "=Q" (*(addrtype *)(&array)) \
- : "i" (low), "i" (high)); \
- })
-
-#else /* CONFIG_64BIT */
-
-#define __ctl_load(array, low, high) ({ \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- asm volatile( \
- " lctl %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), \
- "i" (low), "i" (high)); \
-})
-
-#define __ctl_store(array, low, high) ({ \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- asm volatile( \
- " stctl %1,%2,%0\n" \
- : "=Q" (*(addrtype *)(&array)) \
- : "i" (low), "i" (high)); \
- })
-
-#endif /* CONFIG_64BIT */
-
-#define __ctl_set_bit(cr, bit) ({ \
- unsigned long __dummy; \
- __ctl_store(__dummy, cr, cr); \
- __dummy |= 1UL << (bit); \
- __ctl_load(__dummy, cr, cr); \
-})
-
-#define __ctl_clear_bit(cr, bit) ({ \
- unsigned long __dummy; \
- __ctl_store(__dummy, cr, cr); \
- __dummy &= ~(1UL << (bit)); \
- __ctl_load(__dummy, cr, cr); \
-})
+# define __CTL_LOAD "lctlg"
+# define __CTL_STORE "stctg"
+#else
+# define __CTL_LOAD "lctl"
+# define __CTL_STORE "stctl"
+#endif
+
+#define __ctl_load(array, low, high) { \
+ typedef struct { char _[sizeof(array)]; } addrtype; \
+ \
+ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
+ asm volatile( \
+ __CTL_LOAD " %1,%2,%0\n" \
+ : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+}
+
+#define __ctl_store(array, low, high) { \
+ typedef struct { char _[sizeof(array)]; } addrtype; \
+ \
+ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
+ asm volatile( \
+ __CTL_STORE " %1,%2,%0\n" \
+ : "=Q" (*(addrtype *)(&array)) \
+ : "i" (low), "i" (high)); \
+}
+
+static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
+{
+ unsigned long reg;
+
+ __ctl_store(reg, cr, cr);
+ reg |= 1UL << bit;
+ __ctl_load(reg, cr, cr);
+}
+
+static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
+{
+ unsigned long reg;
+
+ __ctl_store(reg, cr, cr);
+ reg &= ~(1UL << bit);
+ __ctl_load(reg, cr, cr);
+}
+
+void smp_ctl_set_bit(int cr, int bit);
+void smp_ctl_clear_bit(int cr, int bit);
#ifdef CONFIG_SMP
-
-extern void smp_ctl_set_bit(int cr, int bit);
-extern void smp_ctl_clear_bit(int cr, int bit);
-#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
-
+# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
+# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
#else
-
-#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
-
-#endif /* CONFIG_SMP */
+# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
+# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
+#endif
#endif /* __ASM_CTL_REG_H */
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 188c5052a20a..530c15eb01e9 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -107,6 +107,11 @@ void debug_set_level(debug_info_t* id, int new_level);
void debug_set_critical(void);
void debug_stop_all(void);
+static inline bool debug_level_enabled(debug_info_t* id, int level)
+{
+ return level <= id->level;
+}
+
static inline debug_entry_t*
debug_event(debug_info_t* id, int level, void* data, int length)
{
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
new file mode 100644
index 000000000000..04a83f5773cd
--- /dev/null
+++ b/arch/s390/include/asm/dis.h
@@ -0,0 +1,52 @@
+/*
+ * Disassemble s390 instructions.
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#ifndef __ASM_S390_DIS_H__
+#define __ASM_S390_DIS_H__
+
+/* Type of operand */
+#define OPERAND_GPR 0x1 /* Operand printed as %rx */
+#define OPERAND_FPR 0x2 /* Operand printed as %fx */
+#define OPERAND_AR 0x4 /* Operand printed as %ax */
+#define OPERAND_CR 0x8 /* Operand printed as %cx */
+#define OPERAND_DISP 0x10 /* Operand printed as displacement */
+#define OPERAND_BASE 0x20 /* Operand printed as base register */
+#define OPERAND_INDEX 0x40 /* Operand printed as index register */
+#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */
+#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */
+#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */
+
+
+struct s390_operand {
+ int bits; /* The number of bits in the operand. */
+ int shift; /* The number of bits to shift. */
+ int flags; /* One bit syntax flags. */
+};
+
+struct s390_insn {
+ const char name[5];
+ unsigned char opfrag;
+ unsigned char format;
+};
+
+
+static inline int insn_length(unsigned char code)
+{
+ return ((((int) code + 64) >> 7) + 1) << 1;
+}
+
+void show_code(struct pt_regs *regs);
+void print_fn_code(unsigned char *code, unsigned long len);
+int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
+struct s390_insn *find_insn(unsigned char *code);
+
+static inline int is_known_insn(unsigned char *code)
+{
+ return !!find_insn(code);
+}
+
+#endif /* __ASM_S390_DIS_H__ */
diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h
index ef6170995076..7ecb92b469b6 100644
--- a/arch/s390/include/asm/fcx.h
+++ b/arch/s390/include/asm/fcx.h
@@ -12,9 +12,9 @@
#define TCW_FORMAT_DEFAULT 0
#define TCW_TIDAW_FORMAT_DEFAULT 0
-#define TCW_FLAGS_INPUT_TIDA 1 << (23 - 5)
-#define TCW_FLAGS_TCCB_TIDA 1 << (23 - 6)
-#define TCW_FLAGS_OUTPUT_TIDA 1 << (23 - 7)
+#define TCW_FLAGS_INPUT_TIDA (1 << (23 - 5))
+#define TCW_FLAGS_TCCB_TIDA (1 << (23 - 6))
+#define TCW_FLAGS_OUTPUT_TIDA (1 << (23 - 7))
#define TCW_FLAGS_TIDAW_FORMAT(x) ((x) & 3) << (23 - 9)
#define TCW_FLAGS_GET_TIDAW_FORMAT(x) (((x) >> (23 - 9)) & 3)
@@ -54,11 +54,11 @@ struct tcw {
u32 intrg;
} __attribute__ ((packed, aligned(64)));
-#define TIDAW_FLAGS_LAST 1 << (7 - 0)
-#define TIDAW_FLAGS_SKIP 1 << (7 - 1)
-#define TIDAW_FLAGS_DATA_INT 1 << (7 - 2)
-#define TIDAW_FLAGS_TTIC 1 << (7 - 3)
-#define TIDAW_FLAGS_INSERT_CBC 1 << (7 - 4)
+#define TIDAW_FLAGS_LAST (1 << (7 - 0))
+#define TIDAW_FLAGS_SKIP (1 << (7 - 1))
+#define TIDAW_FLAGS_DATA_INT (1 << (7 - 2))
+#define TIDAW_FLAGS_TTIC (1 << (7 - 3))
+#define TIDAW_FLAGS_INSERT_CBC (1 << (7 - 4))
/**
* struct tidaw - Transport-Indirect-Addressing Word (TIDAW)
@@ -106,9 +106,9 @@ struct tsa_ddpc {
u8 sense[32];
} __attribute__ ((packed));
-#define TSA_INTRG_FLAGS_CU_STATE_VALID 1 << (7 - 0)
-#define TSA_INTRG_FLAGS_DEV_STATE_VALID 1 << (7 - 1)
-#define TSA_INTRG_FLAGS_OP_STATE_VALID 1 << (7 - 2)
+#define TSA_INTRG_FLAGS_CU_STATE_VALID (1 << (7 - 0))
+#define TSA_INTRG_FLAGS_DEV_STATE_VALID (1 << (7 - 1))
+#define TSA_INTRG_FLAGS_OP_STATE_VALID (1 << (7 - 2))
/**
* struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA)
@@ -140,10 +140,10 @@ struct tsa_intrg {
#define TSB_FORMAT_DDPC 2
#define TSB_FORMAT_INTRG 3
-#define TSB_FLAGS_DCW_OFFSET_VALID 1 << (7 - 0)
-#define TSB_FLAGS_COUNT_VALID 1 << (7 - 1)
-#define TSB_FLAGS_CACHE_MISS 1 << (7 - 2)
-#define TSB_FLAGS_TIME_VALID 1 << (7 - 3)
+#define TSB_FLAGS_DCW_OFFSET_VALID (1 << (7 - 0))
+#define TSB_FLAGS_COUNT_VALID (1 << (7 - 1))
+#define TSB_FLAGS_CACHE_MISS (1 << (7 - 2))
+#define TSB_FLAGS_TIME_VALID (1 << (7 - 3))
#define TSB_FLAGS_FORMAT(x) ((x) & 7)
#define TSB_FORMAT(t) ((t)->flags & 7)
@@ -179,9 +179,9 @@ struct tsb {
#define DCW_INTRG_RCQ_PRIMARY 1
#define DCW_INTRG_RCQ_SECONDARY 2
-#define DCW_INTRG_FLAGS_MPM 1 < (7 - 0)
-#define DCW_INTRG_FLAGS_PPR 1 < (7 - 1)
-#define DCW_INTRG_FLAGS_CRIT 1 < (7 - 2)
+#define DCW_INTRG_FLAGS_MPM (1 << (7 - 0))
+#define DCW_INTRG_FLAGS_PPR (1 << (7 - 1))
+#define DCW_INTRG_FLAGS_CRIT (1 << (7 - 2))
/**
* struct dcw_intrg_data - Interrogate DCW data
@@ -216,7 +216,7 @@ struct dcw_intrg_data {
u8 prog_data[0];
} __attribute__ ((packed));
-#define DCW_FLAGS_CC 1 << (7 - 1)
+#define DCW_FLAGS_CC (1 << (7 - 1))
#define DCW_CMD_WRITE 0x01
#define DCW_CMD_READ 0x02
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 2bd6cb897b90..2fcccc0c997c 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -7,6 +7,7 @@
#ifndef _ASM_S390_IPL_H
#define _ASM_S390_IPL_H
+#include <asm/lowcore.h>
#include <asm/types.h>
#include <asm/cio.h>
#include <asm/setup.h>
@@ -86,7 +87,14 @@ struct ipl_parameter_block {
*/
extern u32 ipl_flags;
extern u32 dump_prefix_page;
-extern unsigned int zfcpdump_prefix_array[];
+
+struct dump_save_areas {
+ struct save_area **areas;
+ int count;
+};
+
+extern struct dump_save_areas dump_save_areas;
+struct save_area *dump_save_area_create(int cpu);
extern void do_reipl(void);
extern void do_halt(void);
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 9f973d8de90e..5d1f950704dc 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -40,14 +40,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
pgd_t *pgd = mm->pgd;
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
- if (s390_user_mode != HOME_SPACE_MODE) {
- /* Load primary space page table origin. */
- asm volatile(LCTL_OPCODE" 1,1,%0\n"
- : : "m" (S390_lowcore.user_asce) );
- } else
- /* Load home space page table origin. */
- asm volatile(LCTL_OPCODE" 13,13,%0"
- : : "m" (S390_lowcore.user_asce) );
+ /* Load primary space page table origin. */
+ asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
set_fs(current->thread.mm_segment);
}
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 1e51f2915b2e..316c8503a3b4 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -30,7 +30,12 @@
#include <asm/setup.h>
#ifndef __ASSEMBLY__
-void storage_key_init_range(unsigned long start, unsigned long end);
+static inline void storage_key_init_range(unsigned long start, unsigned long end)
+{
+#if PAGE_DEFAULT_KEY
+ __storage_key_init_range(start, end);
+#endif
+}
static inline void clear_page(void *page)
{
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index 1ca5d1047c71..ac24b26fc065 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -6,14 +6,9 @@
extern debug_info_t *pci_debug_msg_id;
extern debug_info_t *pci_debug_err_id;
-#ifdef CONFIG_PCI_DEBUG
#define zpci_dbg(imp, fmt, args...) \
debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
-#else /* !CONFIG_PCI_DEBUG */
-#define zpci_dbg(imp, fmt, args...) do { } while (0)
-#endif
-
#define zpci_err(text...) \
do { \
char debug_buffer[16]; \
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index df6eac9f0cb4..649eb62c52b3 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -54,11 +54,9 @@
struct zpci_fib {
u32 fmt : 8; /* format */
u32 : 24;
- u32 reserved1;
+ u32 : 32;
u8 fc; /* function controls */
- u8 reserved2;
- u16 reserved3;
- u32 reserved4;
+ u64 : 56;
u64 pba; /* PCI base address */
u64 pal; /* PCI address limit */
u64 iota; /* I/O Translation Anchor */
@@ -70,14 +68,13 @@ struct zpci_fib {
u32 sum : 1; /* Adapter int summary bit enabled */
u32 : 1;
u32 aisbo : 6; /* Adapter int summary bit offset */
- u32 reserved5;
+ u32 : 32;
u64 aibv; /* Adapter int bit vector address */
u64 aisb; /* Adapter int summary bit address */
u64 fmb_addr; /* Function measurement block address and key */
- u64 reserved6;
- u64 reserved7;
-} __packed;
-
+ u32 : 32;
+ u32 gd;
+} __packed __aligned(8);
int zpci_mod_fc(u64 req, struct zpci_fib *fib);
int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 86fe0ee2cee5..fa91e0097458 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -10,16 +10,22 @@
*/
#define __my_cpu_offset S390_lowcore.percpu_offset
+#ifdef CONFIG_64BIT
+
/*
* For 64 bit module code, the module may be more than 4G above the
* per cpu area, use weak definitions to force the compiler to
* generate external references.
*/
-#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
+#if defined(CONFIG_SMP) && defined(MODULE)
#define ARCH_NEEDS_WEAK_PER_CPU
#endif
-#define arch_this_cpu_to_op(pcp, val, op) \
+/*
+ * We use a compare-and-swap loop since that uses less cpu cycles than
+ * disabling and enabling interrupts like the generic variant would do.
+ */
+#define arch_this_cpu_to_op_simple(pcp, val, op) \
({ \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \
@@ -30,42 +36,101 @@
do { \
old__ = prev__; \
new__ = old__ op (val); \
- switch (sizeof(*ptr__)) { \
- case 8: \
- prev__ = cmpxchg64(ptr__, old__, new__); \
- break; \
- default: \
- prev__ = cmpxchg(ptr__, old__, new__); \
- } \
+ prev__ = cmpxchg(ptr__, old__, new__); \
} while (prev__ != old__); \
preempt_enable(); \
new__; \
})
-#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+
+#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \
+{ \
+ typedef typeof(pcp) pcp_op_T__; \
+ pcp_op_T__ val__ = (val); \
+ pcp_op_T__ old__, *ptr__; \
+ preempt_disable(); \
+ ptr__ = __this_cpu_ptr(&(pcp)); \
+ if (__builtin_constant_p(val__) && \
+ ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
+ asm volatile( \
+ op2 " %[ptr__],%[val__]\n" \
+ : [ptr__] "+Q" (*ptr__) \
+ : [val__] "i" ((szcast)val__) \
+ : "cc"); \
+ } else { \
+ asm volatile( \
+ op1 " %[old__],%[val__],%[ptr__]\n" \
+ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
+ : [val__] "d" (val__) \
+ : "cc"); \
+ } \
+ preempt_enable(); \
+}
-#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
+#define this_cpu_add_8(pcp, val) arch_this_cpu_add(pcp, val, "laag", "agsi", long)
-#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &)
+#define arch_this_cpu_add_return(pcp, val, op) \
+({ \
+ typedef typeof(pcp) pcp_op_T__; \
+ pcp_op_T__ val__ = (val); \
+ pcp_op_T__ old__, *ptr__; \
+ preempt_disable(); \
+ ptr__ = __this_cpu_ptr(&(pcp)); \
+ asm volatile( \
+ op " %[old__],%[val__],%[ptr__]\n" \
+ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
+ : [val__] "d" (val__) \
+ : "cc"); \
+ preempt_enable(); \
+ old__ + val__; \
+})
-#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |)
+#define this_cpu_add_return_4(pcp, val) arch_this_cpu_add_return(pcp, val, "laa")
+#define this_cpu_add_return_8(pcp, val) arch_this_cpu_add_return(pcp, val, "laag")
-#define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
+#define arch_this_cpu_to_op(pcp, val, op) \
+{ \
+ typedef typeof(pcp) pcp_op_T__; \
+ pcp_op_T__ val__ = (val); \
+ pcp_op_T__ old__, *ptr__; \
+ preempt_disable(); \
+ ptr__ = __this_cpu_ptr(&(pcp)); \
+ asm volatile( \
+ op " %[old__],%[val__],%[ptr__]\n" \
+ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
+ : [val__] "d" (val__) \
+ : "cc"); \
+ preempt_enable(); \
+}
+
+#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
+#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, "lang")
+#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao")
+#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog")
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
({ \
@@ -74,13 +139,7 @@
pcp_op_T__ *ptr__; \
preempt_disable(); \
ptr__ = __this_cpu_ptr(&(pcp)); \
- switch (sizeof(*ptr__)) { \
- case 8: \
- ret__ = cmpxchg64(ptr__, oval, nval); \
- break; \
- default: \
- ret__ = cmpxchg(ptr__, oval, nval); \
- } \
+ ret__ = cmpxchg(ptr__, oval, nval); \
preempt_enable(); \
ret__; \
})
@@ -104,9 +163,7 @@
#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#ifdef CONFIG_64BIT
#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#endif
#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
({ \
@@ -124,9 +181,9 @@
})
#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
-#ifdef CONFIG_64BIT
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
-#endif
+
+#endif /* CONFIG_64BIT */
#include <asm-generic/percpu.h>
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index ca7821f07260..0a876bc543d3 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -134,19 +134,17 @@ struct stack_frame {
* Do necessary setup to start up a new thread.
*/
#define start_thread(regs, new_psw, new_stackp) do { \
- regs->psw.mask = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA; \
+ regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
execve_tail(); \
} while (0)
#define start_thread31(regs, new_psw, new_stackp) do { \
- regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
+ regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
- __tlb_flush_mm(current->mm); \
crst_table_downgrade(current->mm, 1UL << 31); \
- update_mm(current->mm, current); \
execve_tail(); \
} while (0)
@@ -169,17 +167,15 @@ extern void release_thread(struct task_struct *);
*/
extern unsigned long thread_saved_pc(struct task_struct *t);
-extern void show_code(struct pt_regs *regs);
-extern void print_fn_code(unsigned char *code, unsigned long len);
-extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
- unsigned int len);
-
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
(task_stack_page(tsk) + THREAD_SIZE) - 1)
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
+/* Has task runtime instrumentation enabled ? */
+#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
+
static inline unsigned short stap(void)
{
unsigned short cpu_address;
@@ -348,9 +344,9 @@ __set_psw_mask(unsigned long mask)
}
#define local_mcck_enable() \
- __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK)
+ __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK)
#define local_mcck_disable() \
- __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT)
+ __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT)
/*
* Basic Machine Check/Program Check Handler.
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 52b56533c57c..9c82cebddabd 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -10,8 +10,11 @@
#ifndef __ASSEMBLY__
-extern long psw_kernel_bits;
-extern long psw_user_bits;
+#define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
+ PSW_MASK_EA | PSW_MASK_BA)
+#define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
+ PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
+ PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
/*
* The pt_regs struct defines the way the registers are stored on
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 59880dbaf360..df802ee14af6 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -48,13 +48,6 @@ void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
unsigned long size);
-#define PRIMARY_SPACE_MODE 0
-#define ACCESS_REGISTER_MODE 1
-#define SECONDARY_SPACE_MODE 2
-#define HOME_SPACE_MODE 3
-
-extern unsigned int s390_user_mode;
-
/*
* Machine features detected in head.S
*/
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index b64f15c3b4cc..ac9bed8e103f 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -14,7 +14,6 @@
#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
extern struct mutex smp_cpu_state_mutex;
-extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 6dbd559763c9..29c81f82705e 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -13,58 +13,94 @@
extern struct task_struct *__switch_to(void *, void *);
extern void update_cr_regs(struct task_struct *task);
-static inline void save_fp_regs(s390_fp_regs *fpregs)
+static inline int test_fp_ctl(u32 fpc)
{
+ u32 orig_fpc;
+ int rc;
+
+ if (!MACHINE_HAS_IEEE)
+ return 0;
+
asm volatile(
- " std 0,%O0+8(%R0)\n"
- " std 2,%O0+24(%R0)\n"
- " std 4,%O0+40(%R0)\n"
- " std 6,%O0+56(%R0)"
- : "=Q" (*fpregs) : "Q" (*fpregs));
+ " efpc %1\n"
+ " sfpc %2\n"
+ "0: sfpc %1\n"
+ " la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "=d" (rc), "=d" (orig_fpc)
+ : "d" (fpc), "0" (-EINVAL));
+ return rc;
+}
+
+static inline void save_fp_ctl(u32 *fpc)
+{
if (!MACHINE_HAS_IEEE)
return;
+
asm volatile(
- " stfpc %0\n"
- " std 1,%O0+16(%R0)\n"
- " std 3,%O0+32(%R0)\n"
- " std 5,%O0+48(%R0)\n"
- " std 7,%O0+64(%R0)\n"
- " std 8,%O0+72(%R0)\n"
- " std 9,%O0+80(%R0)\n"
- " std 10,%O0+88(%R0)\n"
- " std 11,%O0+96(%R0)\n"
- " std 12,%O0+104(%R0)\n"
- " std 13,%O0+112(%R0)\n"
- " std 14,%O0+120(%R0)\n"
- " std 15,%O0+128(%R0)\n"
- : "=Q" (*fpregs) : "Q" (*fpregs));
+ " stfpc %0\n"
+ : "+Q" (*fpc));
}
-static inline void restore_fp_regs(s390_fp_regs *fpregs)
+static inline int restore_fp_ctl(u32 *fpc)
{
+ int rc;
+
+ if (!MACHINE_HAS_IEEE)
+ return 0;
+
asm volatile(
- " ld 0,%O0+8(%R0)\n"
- " ld 2,%O0+24(%R0)\n"
- " ld 4,%O0+40(%R0)\n"
- " ld 6,%O0+56(%R0)"
- : : "Q" (*fpregs));
+ "0: lfpc %1\n"
+ " la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
+ return rc;
+}
+
+static inline void save_fp_regs(freg_t *fprs)
+{
+ asm volatile("std 0,%0" : "=Q" (fprs[0]));
+ asm volatile("std 2,%0" : "=Q" (fprs[2]));
+ asm volatile("std 4,%0" : "=Q" (fprs[4]));
+ asm volatile("std 6,%0" : "=Q" (fprs[6]));
if (!MACHINE_HAS_IEEE)
return;
- asm volatile(
- " lfpc %0\n"
- " ld 1,%O0+16(%R0)\n"
- " ld 3,%O0+32(%R0)\n"
- " ld 5,%O0+48(%R0)\n"
- " ld 7,%O0+64(%R0)\n"
- " ld 8,%O0+72(%R0)\n"
- " ld 9,%O0+80(%R0)\n"
- " ld 10,%O0+88(%R0)\n"
- " ld 11,%O0+96(%R0)\n"
- " ld 12,%O0+104(%R0)\n"
- " ld 13,%O0+112(%R0)\n"
- " ld 14,%O0+120(%R0)\n"
- " ld 15,%O0+128(%R0)\n"
- : : "Q" (*fpregs));
+ asm volatile("std 1,%0" : "=Q" (fprs[1]));
+ asm volatile("std 3,%0" : "=Q" (fprs[3]));
+ asm volatile("std 5,%0" : "=Q" (fprs[5]));
+ asm volatile("std 7,%0" : "=Q" (fprs[7]));
+ asm volatile("std 8,%0" : "=Q" (fprs[8]));
+ asm volatile("std 9,%0" : "=Q" (fprs[9]));
+ asm volatile("std 10,%0" : "=Q" (fprs[10]));
+ asm volatile("std 11,%0" : "=Q" (fprs[11]));
+ asm volatile("std 12,%0" : "=Q" (fprs[12]));
+ asm volatile("std 13,%0" : "=Q" (fprs[13]));
+ asm volatile("std 14,%0" : "=Q" (fprs[14]));
+ asm volatile("std 15,%0" : "=Q" (fprs[15]));
+}
+
+static inline void restore_fp_regs(freg_t *fprs)
+{
+ asm volatile("ld 0,%0" : : "Q" (fprs[0]));
+ asm volatile("ld 2,%0" : : "Q" (fprs[2]));
+ asm volatile("ld 4,%0" : : "Q" (fprs[4]));
+ asm volatile("ld 6,%0" : : "Q" (fprs[6]));
+ if (!MACHINE_HAS_IEEE)
+ return;
+ asm volatile("ld 1,%0" : : "Q" (fprs[1]));
+ asm volatile("ld 3,%0" : : "Q" (fprs[3]));
+ asm volatile("ld 5,%0" : : "Q" (fprs[5]));
+ asm volatile("ld 7,%0" : : "Q" (fprs[7]));
+ asm volatile("ld 8,%0" : : "Q" (fprs[8]));
+ asm volatile("ld 9,%0" : : "Q" (fprs[9]));
+ asm volatile("ld 10,%0" : : "Q" (fprs[10]));
+ asm volatile("ld 11,%0" : : "Q" (fprs[11]));
+ asm volatile("ld 12,%0" : : "Q" (fprs[12]));
+ asm volatile("ld 13,%0" : : "Q" (fprs[13]));
+ asm volatile("ld 14,%0" : : "Q" (fprs[14]));
+ asm volatile("ld 15,%0" : : "Q" (fprs[15]));
}
static inline void save_access_regs(unsigned int *acrs)
@@ -83,12 +119,14 @@ static inline void restore_access_regs(unsigned int *acrs)
#define switch_to(prev,next,last) do { \
if (prev->mm) { \
- save_fp_regs(&prev->thread.fp_regs); \
+ save_fp_ctl(&prev->thread.fp_regs.fpc); \
+ save_fp_regs(prev->thread.fp_regs.fprs); \
save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \
} \
if (next->mm) { \
- restore_fp_regs(&next->thread.fp_regs); \
+ restore_fp_ctl(&next->thread.fp_regs.fpc); \
+ restore_fp_regs(next->thread.fp_regs.fprs); \
restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
update_cr_regs(next); \
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 819b94d22720..8beee1cceba4 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -71,9 +71,11 @@ static inline void local_tick_enable(unsigned long long comp)
typedef unsigned long long cycles_t;
-static inline void get_tod_clock_ext(char *clk)
+static inline void get_tod_clock_ext(char clk[16])
{
- asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
+ typedef struct { char _[sizeof(clk)]; } addrtype;
+
+ asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
}
static inline unsigned long long get_tod_clock(void)
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 9c33ed4e666f..79330af9a5f8 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -94,9 +94,7 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
struct uaccess_ops {
size_t (*copy_from_user)(size_t, const void __user *, void *);
- size_t (*copy_from_user_small)(size_t, const void __user *, void *);
size_t (*copy_to_user)(size_t, void __user *, const void *);
- size_t (*copy_to_user_small)(size_t, void __user *, const void *);
size_t (*copy_in_user)(size_t, void __user *, const void __user *);
size_t (*clear_user)(size_t, void __user *);
size_t (*strnlen_user)(size_t, const char __user *);
@@ -106,22 +104,20 @@ struct uaccess_ops {
};
extern struct uaccess_ops uaccess;
-extern struct uaccess_ops uaccess_std;
extern struct uaccess_ops uaccess_mvcos;
-extern struct uaccess_ops uaccess_mvcos_switch;
extern struct uaccess_ops uaccess_pt;
extern int __handle_fault(unsigned long, unsigned long, int);
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
- size = uaccess.copy_to_user_small(size, ptr, x);
+ size = uaccess.copy_to_user(size, ptr, x);
return size ? -EFAULT : size;
}
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
- size = uaccess.copy_from_user_small(size, ptr, x);
+ size = uaccess.copy_from_user(size, ptr, x);
return size ? -EFAULT : size;
}
@@ -226,10 +222,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (__builtin_constant_p(n) && (n <= 256))
- return uaccess.copy_to_user_small(n, to, from);
- else
- return uaccess.copy_to_user(n, to, from);
+ return uaccess.copy_to_user(n, to, from);
}
#define __copy_to_user_inatomic __copy_to_user
@@ -275,10 +268,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (__builtin_constant_p(n) && (n <= 256))
- return uaccess.copy_from_user_small(n, from, to);
- else
- return uaccess.copy_from_user(n, from, to);
+ return uaccess.copy_from_user(n, from, to);
}
extern void copy_from_user_overflow(void)
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index 7a84619e315e..7e0b498a2c2b 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -199,6 +199,7 @@ typedef union
typedef struct
{
__u32 fpc;
+ __u32 pad;
freg_t fprs[NUM_FPRS];
} s390_fp_regs;
@@ -206,7 +207,6 @@ typedef struct
#define FPC_FLAGS_MASK 0x00F80000
#define FPC_DXC_MASK 0x0000FF00
#define FPC_RM_MASK 0x00000003
-#define FPC_VALID_MASK 0xF8F8FF03
/* this typedef defines how a Program Status Word looks like */
typedef struct
@@ -263,7 +263,7 @@ typedef struct
#define PSW_MASK_EA 0x0000000100000000UL
#define PSW_MASK_BA 0x0000000080000000UL
-#define PSW_MASK_USER 0x0000FF8180000000UL
+#define PSW_MASK_USER 0x0000FF0180000000UL
#define PSW_ADDR_AMODE 0x0000000000000000UL
#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
diff --git a/arch/s390/include/uapi/asm/sigcontext.h b/arch/s390/include/uapi/asm/sigcontext.h
index 584787f6ce44..b30de9c01bbe 100644
--- a/arch/s390/include/uapi/asm/sigcontext.h
+++ b/arch/s390/include/uapi/asm/sigcontext.h
@@ -49,6 +49,7 @@ typedef struct
typedef struct
{
unsigned int fpc;
+ unsigned int pad;
double fprs[__NUM_FPRS];
} _s390_fp_regs;
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 4bb2a4656163..2403303cfed7 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -28,7 +28,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
-obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o
+obj-y := traps.o time.o process.o base.o early.o setup.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
diff --git a/arch/s390/kernel/bitmap.c b/arch/s390/kernel/bitmap.c
deleted file mode 100644
index 102da5e23037..000000000000
--- a/arch/s390/kernel/bitmap.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
- * See include/asm/{bitops.h|posix_types.h} for details
- *
- * Copyright IBM Corp. 1999, 2009
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
- */
-
-#include <linux/bitops.h>
-#include <linux/module.h>
-
-const char _oi_bitmap[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
-EXPORT_SYMBOL(_oi_bitmap);
-
-const char _ni_bitmap[] = { 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f };
-EXPORT_SYMBOL(_ni_bitmap);
-
-const char _zb_findmap[] = {
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 };
-EXPORT_SYMBOL(_zb_findmap);
-
-const char _sb_findmap[] = {
- 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 };
-EXPORT_SYMBOL(_sb_findmap);
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index dd62071624be..3a414c0f93ed 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -146,15 +146,14 @@ static void __init cache_build_info(void)
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
for (level = 0; level < CACHE_MAX_LEVEL; level++) {
switch (ct.ci[level].scope) {
- case CACHE_SCOPE_NOTEXISTS:
- case CACHE_SCOPE_RESERVED:
- return;
case CACHE_SCOPE_SHARED:
private = 0;
break;
case CACHE_SCOPE_PRIVATE:
private = 1;
break;
+ default:
+ return;
}
if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
rc = cache_add(level, private, CACHE_TYPE_DATA);
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 1f1b8c70ab97..e030d2bdec1b 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -58,10 +58,6 @@
#include "compat_linux.h"
-u32 psw32_user_bits = PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT |
- PSW32_DEFAULT_KEY | PSW32_MASK_BASE | PSW32_MASK_MCHECK |
- PSW32_MASK_PSTATE | PSW32_ASC_HOME;
-
/* For this source file, we want overflow handling. */
#undef high2lowuid
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 976518c0592a..1bfda3eca379 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -27,6 +27,7 @@ typedef union
typedef struct
{
unsigned int fpc;
+ unsigned int pad;
freg_t32 fprs[__NUM_FPRS];
} _s390_fp_regs32;
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index adaa9e9478d8..5a3ab5c191fd 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -153,57 +153,66 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
{
- _s390_regs_common32 regs32;
- int err, i;
+ _sigregs32 user_sregs;
+ int i;
- regs32.psw.mask = psw32_user_bits |
- ((__u32)(regs->psw.mask >> 32) & PSW32_MASK_USER);
- regs32.psw.addr = (__u32) regs->psw.addr |
+ user_sregs.regs.psw.mask = (__u32)(regs->psw.mask >> 32);
+ user_sregs.regs.psw.mask &= PSW32_MASK_USER | PSW32_MASK_RI;
+ user_sregs.regs.psw.mask |= PSW32_USER_BITS;
+ user_sregs.regs.psw.addr = (__u32) regs->psw.addr |
(__u32)(regs->psw.mask & PSW_MASK_BA);
for (i = 0; i < NUM_GPRS; i++)
- regs32.gprs[i] = (__u32) regs->gprs[i];
+ user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
save_access_regs(current->thread.acrs);
- memcpy(regs32.acrs, current->thread.acrs, sizeof(regs32.acrs));
- err = __copy_to_user(&sregs->regs, &regs32, sizeof(regs32));
- if (err)
- return err;
- save_fp_regs(&current->thread.fp_regs);
- /* s390_fp_regs and _s390_fp_regs32 are the same ! */
- return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs,
- sizeof(_s390_fp_regs32));
+ memcpy(&user_sregs.regs.acrs, current->thread.acrs,
+ sizeof(user_sregs.regs.acrs));
+ save_fp_ctl(&current->thread.fp_regs.fpc);
+ save_fp_regs(current->thread.fp_regs.fprs);
+ memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
+ sizeof(user_sregs.fpregs));
+ if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
+ return -EFAULT;
+ return 0;
}
static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
{
- _s390_regs_common32 regs32;
- int err, i;
+ _sigregs32 user_sregs;
+ int i;
/* Alwys make any pending restarted system call return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
- err = __copy_from_user(&regs32, &sregs->regs, sizeof(regs32));
- if (err)
- return err;
+ if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
+ return -EFAULT;
+
+ if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI))
+ return -EINVAL;
+
+ /* Loading the floating-point-control word can fail. Do that first. */
+ if (restore_fp_ctl(&user_sregs.fpregs.fpc))
+ return -EINVAL;
+
+ /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
- (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 |
- (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
+ (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
+ (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
+ (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
/* Check for invalid user address space control. */
- if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
- regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+ if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
+ regs->psw.mask = PSW_ASC_PRIMARY |
(regs->psw.mask & ~PSW_MASK_ASC);
- regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
+ regs->psw.addr = (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_INSN);
for (i = 0; i < NUM_GPRS; i++)
- regs->gprs[i] = (__u64) regs32.gprs[i];
- memcpy(current->thread.acrs, regs32.acrs, sizeof(current->thread.acrs));
+ regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
+ memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
+ sizeof(current->thread.acrs));
restore_access_regs(current->thread.acrs);
- err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs,
- sizeof(_s390_fp_regs32));
- current->thread.fp_regs.fpc &= FPC_VALID_MASK;
- if (err)
- return err;
+ memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
+ sizeof(current->thread.fp_regs));
- restore_fp_regs(&current->thread.fp_regs);
+ restore_fp_regs(current->thread.fp_regs.fprs);
clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */
return 0;
}
@@ -215,18 +224,18 @@ static int save_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
for (i = 0; i < NUM_GPRS; i++)
gprs_high[i] = regs->gprs[i] >> 32;
-
- return __copy_to_user(uregs, &gprs_high, sizeof(gprs_high));
+ if (__copy_to_user(uregs, &gprs_high, sizeof(gprs_high)))
+ return -EFAULT;
+ return 0;
}
static int restore_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
{
__u32 gprs_high[NUM_GPRS];
- int err, i;
+ int i;
- err = __copy_from_user(&gprs_high, uregs, sizeof(gprs_high));
- if (err)
- return err;
+ if (__copy_from_user(&gprs_high, uregs, sizeof(gprs_high)))
+ return -EFAULT;
for (i = 0; i < NUM_GPRS; i++)
*(__u32 *)&regs->gprs[i] = gprs_high[i];
return 0;
@@ -348,7 +357,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
regs->gprs[15] = (__force __u64) frame;
/* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA |
- (psw_user_bits & PSW_MASK_ASC) |
+ (PSW_USER_BITS & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__force __u64) ka->sa.sa_handler;
@@ -415,7 +424,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gprs[15] = (__force __u64) frame;
/* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA |
- (psw_user_bits & PSW_MASK_ASC) |
+ (PSW_USER_BITS & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64 __force) ka->sa.sa_handler;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 7dd21720e5b0..f45b2ab0cb81 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -22,6 +22,32 @@
#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
+struct dump_save_areas dump_save_areas;
+
+/*
+ * Allocate and add a save area for a CPU
+ */
+struct save_area *dump_save_area_create(int cpu)
+{
+ struct save_area **save_areas, *save_area;
+
+ save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
+ if (!save_area)
+ return NULL;
+ if (cpu + 1 > dump_save_areas.count) {
+ dump_save_areas.count = cpu + 1;
+ save_areas = krealloc(dump_save_areas.areas,
+ dump_save_areas.count * sizeof(void *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!save_areas) {
+ kfree(save_area);
+ return NULL;
+ }
+ dump_save_areas.areas = save_areas;
+ }
+ dump_save_areas.areas[cpu] = save_area;
+ return save_area;
+}
/*
* Return physical address for virtual address
@@ -45,7 +71,6 @@ static inline void *load_real_addr(void *addr)
static int copy_from_realmem(void *dest, void *src, size_t count)
{
unsigned long size;
- int rc;
if (!count)
return 0;
@@ -451,8 +476,8 @@ static int get_cpu_cnt(void)
{
int i, cpus = 0;
- for (i = 0; zfcpdump_save_areas[i]; i++) {
- if (zfcpdump_save_areas[i]->pref_reg == 0)
+ for (i = 0; i < dump_save_areas.count; i++) {
+ if (dump_save_areas.areas[i]->pref_reg == 0)
continue;
cpus++;
}
@@ -523,8 +548,8 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
ptr = nt_prpsinfo(ptr);
- for (i = 0; zfcpdump_save_areas[i]; i++) {
- sa = zfcpdump_save_areas[i];
+ for (i = 0; i < dump_save_areas.count; i++) {
+ sa = dump_save_areas.areas[i];
if (sa->pref_reg == 0)
continue;
ptr = fill_cpu_elf_notes(ptr, sa);
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 17d62fe5d7b7..ee8390da6ea7 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -889,7 +889,7 @@ static int debug_active=1;
* if debug_active is already off
*/
static int
-s390dbf_procactive(ctl_table *table, int write,
+s390dbf_procactive(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
if (!write || debug_stoppable || !debug_active)
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index be87d3e05a5b..993efe6a887c 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -23,6 +23,7 @@
#include <linux/kdebug.h>
#include <asm/uaccess.h>
+#include <asm/dis.h>
#include <asm/io.h>
#include <linux/atomic.h>
#include <asm/mathemu.h>
@@ -37,17 +38,6 @@
#define ONELONG "%016lx: "
#endif /* CONFIG_64BIT */
-#define OPERAND_GPR 0x1 /* Operand printed as %rx */
-#define OPERAND_FPR 0x2 /* Operand printed as %fx */
-#define OPERAND_AR 0x4 /* Operand printed as %ax */
-#define OPERAND_CR 0x8 /* Operand printed as %cx */
-#define OPERAND_DISP 0x10 /* Operand printed as displacement */
-#define OPERAND_BASE 0x20 /* Operand printed as base register */
-#define OPERAND_INDEX 0x40 /* Operand printed as index register */
-#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */
-#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */
-#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */
-
enum {
UNUSED, /* Indicates the end of the operand list */
R_8, /* GPR starting at position 8 */
@@ -155,19 +145,7 @@ enum {
INSTR_S_00, INSTR_S_RD,
};
-struct operand {
- int bits; /* The number of bits in the operand. */
- int shift; /* The number of bits to shift. */
- int flags; /* One bit syntax flags. */
-};
-
-struct insn {
- const char name[5];
- unsigned char opfrag;
- unsigned char format;
-};
-
-static const struct operand operands[] =
+static const struct s390_operand operands[] =
{
[UNUSED] = { 0, 0, 0 },
[R_8] = { 4, 8, OPERAND_GPR },
@@ -479,7 +457,7 @@ static char *long_insn_name[] = {
[LONG_INSN_PCISTB] = "pcistb",
};
-static struct insn opcode[] = {
+static struct s390_insn opcode[] = {
#ifdef CONFIG_64BIT
{ "bprp", 0xc5, INSTR_MII_UPI },
{ "bpp", 0xc7, INSTR_SMI_U0RDP },
@@ -668,7 +646,7 @@ static struct insn opcode[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_01[] = {
+static struct s390_insn opcode_01[] = {
#ifdef CONFIG_64BIT
{ "ptff", 0x04, INSTR_E },
{ "pfpo", 0x0a, INSTR_E },
@@ -684,7 +662,7 @@ static struct insn opcode_01[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_a5[] = {
+static struct s390_insn opcode_a5[] = {
#ifdef CONFIG_64BIT
{ "iihh", 0x00, INSTR_RI_RU },
{ "iihl", 0x01, INSTR_RI_RU },
@@ -706,7 +684,7 @@ static struct insn opcode_a5[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_a7[] = {
+static struct s390_insn opcode_a7[] = {
#ifdef CONFIG_64BIT
{ "tmhh", 0x02, INSTR_RI_RU },
{ "tmhl", 0x03, INSTR_RI_RU },
@@ -728,7 +706,7 @@ static struct insn opcode_a7[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_aa[] = {
+static struct s390_insn opcode_aa[] = {
#ifdef CONFIG_64BIT
{ { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
{ "rion", 0x01, INSTR_RI_RI },
@@ -739,7 +717,7 @@ static struct insn opcode_aa[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_b2[] = {
+static struct s390_insn opcode_b2[] = {
#ifdef CONFIG_64BIT
{ "stckf", 0x7c, INSTR_S_RD },
{ "lpp", 0x80, INSTR_S_RD },
@@ -851,7 +829,7 @@ static struct insn opcode_b2[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_b3[] = {
+static struct s390_insn opcode_b3[] = {
#ifdef CONFIG_64BIT
{ "maylr", 0x38, INSTR_RRF_F0FF },
{ "mylr", 0x39, INSTR_RRF_F0FF },
@@ -1034,7 +1012,7 @@ static struct insn opcode_b3[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_b9[] = {
+static struct s390_insn opcode_b9[] = {
#ifdef CONFIG_64BIT
{ "lpgr", 0x00, INSTR_RRE_RR },
{ "lngr", 0x01, INSTR_RRE_RR },
@@ -1167,7 +1145,7 @@ static struct insn opcode_b9[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_c0[] = {
+static struct s390_insn opcode_c0[] = {
#ifdef CONFIG_64BIT
{ "lgfi", 0x01, INSTR_RIL_RI },
{ "xihf", 0x06, INSTR_RIL_RU },
@@ -1187,7 +1165,7 @@ static struct insn opcode_c0[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_c2[] = {
+static struct s390_insn opcode_c2[] = {
#ifdef CONFIG_64BIT
{ "msgfi", 0x00, INSTR_RIL_RI },
{ "msfi", 0x01, INSTR_RIL_RI },
@@ -1205,7 +1183,7 @@ static struct insn opcode_c2[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_c4[] = {
+static struct s390_insn opcode_c4[] = {
#ifdef CONFIG_64BIT
{ "llhrl", 0x02, INSTR_RIL_RP },
{ "lghrl", 0x04, INSTR_RIL_RP },
@@ -1222,7 +1200,7 @@ static struct insn opcode_c4[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_c6[] = {
+static struct s390_insn opcode_c6[] = {
#ifdef CONFIG_64BIT
{ "exrl", 0x00, INSTR_RIL_RP },
{ "pfdrl", 0x02, INSTR_RIL_UP },
@@ -1240,7 +1218,7 @@ static struct insn opcode_c6[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_c8[] = {
+static struct s390_insn opcode_c8[] = {
#ifdef CONFIG_64BIT
{ "mvcos", 0x00, INSTR_SSF_RRDRD },
{ "ectg", 0x01, INSTR_SSF_RRDRD },
@@ -1251,7 +1229,7 @@ static struct insn opcode_c8[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_cc[] = {
+static struct s390_insn opcode_cc[] = {
#ifdef CONFIG_64BIT
{ "brcth", 0x06, INSTR_RIL_RP },
{ "aih", 0x08, INSTR_RIL_RI },
@@ -1263,7 +1241,7 @@ static struct insn opcode_cc[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_e3[] = {
+static struct s390_insn opcode_e3[] = {
#ifdef CONFIG_64BIT
{ "ltg", 0x02, INSTR_RXY_RRRD },
{ "lrag", 0x03, INSTR_RXY_RRRD },
@@ -1369,7 +1347,7 @@ static struct insn opcode_e3[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_e5[] = {
+static struct s390_insn opcode_e5[] = {
#ifdef CONFIG_64BIT
{ "strag", 0x02, INSTR_SSE_RDRD },
{ "mvhhi", 0x44, INSTR_SIL_RDI },
@@ -1391,7 +1369,7 @@ static struct insn opcode_e5[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_eb[] = {
+static struct s390_insn opcode_eb[] = {
#ifdef CONFIG_64BIT
{ "lmg", 0x04, INSTR_RSY_RRRD },
{ "srag", 0x0a, INSTR_RSY_RRRD },
@@ -1465,7 +1443,7 @@ static struct insn opcode_eb[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_ec[] = {
+static struct s390_insn opcode_ec[] = {
#ifdef CONFIG_64BIT
{ "brxhg", 0x44, INSTR_RIE_RRP },
{ "brxlg", 0x45, INSTR_RIE_RRP },
@@ -1504,7 +1482,7 @@ static struct insn opcode_ec[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_ed[] = {
+static struct s390_insn opcode_ed[] = {
#ifdef CONFIG_64BIT
{ "mayl", 0x38, INSTR_RXF_FRRDF },
{ "myl", 0x39, INSTR_RXF_FRRDF },
@@ -1572,7 +1550,7 @@ static struct insn opcode_ed[] = {
/* Extracts an operand value from an instruction. */
static unsigned int extract_operand(unsigned char *code,
- const struct operand *operand)
+ const struct s390_operand *operand)
{
unsigned int val;
int bits;
@@ -1608,16 +1586,11 @@ static unsigned int extract_operand(unsigned char *code,
return val;
}
-static inline int insn_length(unsigned char code)
-{
- return ((((int) code + 64) >> 7) + 1) << 1;
-}
-
-static struct insn *find_insn(unsigned char *code)
+struct s390_insn *find_insn(unsigned char *code)
{
unsigned char opfrag = code[1];
unsigned char opmask;
- struct insn *table;
+ struct s390_insn *table;
switch (code[0]) {
case 0x01:
@@ -1706,7 +1679,7 @@ static struct insn *find_insn(unsigned char *code)
*/
int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len)
{
- struct insn *insn;
+ struct s390_insn *insn;
insn = find_insn(instruction);
if (!insn)
@@ -1722,9 +1695,9 @@ EXPORT_SYMBOL_GPL(insn_to_mnemonic);
static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
{
- struct insn *insn;
+ struct s390_insn *insn;
const unsigned char *ops;
- const struct operand *operand;
+ const struct s390_operand *operand;
unsigned int value;
char separator;
char *ptr;
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 99e7f6035895..e6af9406987c 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -15,6 +15,7 @@
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/debug.h>
+#include <asm/dis.h>
#include <asm/ipl.h>
#ifndef CONFIG_64BIT
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index dc8770d7173c..96543ac400a7 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -206,6 +206,7 @@ static noinline __init void clear_bss_section(void)
*/
static noinline __init void init_kernel_storage_key(void)
{
+#if PAGE_DEFAULT_KEY
unsigned long end_pfn, init_pfn;
end_pfn = PFN_UP(__pa(&_end));
@@ -213,6 +214,7 @@ static noinline __init void init_kernel_storage_key(void)
for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
page_set_storage_key(init_pfn << PAGE_SHIFT,
PAGE_DEFAULT_KEY, 0);
+#endif
}
static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index e9b04c33d383..cb533f78c09e 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -23,7 +23,6 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
-void do_asce_exception(struct pt_regs *regs);
void addressing_exception(struct pt_regs *regs);
void data_exception(struct pt_regs *regs);
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 1014ad5f7693..224db03e9518 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -151,14 +151,13 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;
ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
- if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
- goto out;
trace.func = ip;
+ trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to. */
- if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
+ if (!ftrace_graph_entry(&trace))
+ goto out;
+ if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
goto out;
- }
parent = (unsigned long) return_to_handler;
out:
return parent;
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index fd8db63dfc94..429afcc480cb 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -437,7 +437,7 @@ ENTRY(startup_kdump)
#if defined(CONFIG_64BIT)
#if defined(CONFIG_MARCH_ZEC12)
- .long 3, 0xc100efe3, 0xf46ce000, 0x00400000
+ .long 3, 0xc100efe3, 0xf46ce800, 0x00400000
#elif defined(CONFIG_MARCH_Z196)
.long 2, 0xc100efe3, 0xf46c0000
#elif defined(CONFIG_MARCH_Z10)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index feb719d3c851..633ca7504536 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2051,12 +2051,12 @@ void s390_reset_system(void (*func)(void *), void *data)
__ctl_clear_bit(0,28);
/* Set new machine check handler */
- S390_lowcore.mcck_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT;
+ S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
S390_lowcore.mcck_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
/* Set new program check handler */
- S390_lowcore.program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT;
+ S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
S390_lowcore.program_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index d86e64eddb42..59a9c35c4598 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -26,11 +26,12 @@
#include <linux/stop_machine.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
-#include <asm/cacheflush.h>
-#include <asm/sections.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/hardirq.h>
+#include <asm/cacheflush.h>
+#include <asm/sections.h>
+#include <asm/dis.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -59,6 +60,8 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
{
+ if (!is_known_insn((unsigned char *)insn))
+ return -EINVAL;
switch (insn[0] >> 8) {
case 0x0c: /* bassm */
case 0x0b: /* bsm */
@@ -208,7 +211,7 @@ static void __kprobes copy_instruction(struct kprobe *p)
s64 disp, new_disp;
u64 addr, new_addr;
- memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
+ memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
if (!is_insn_relative_long(p->ainsn.insn))
return;
/*
@@ -252,7 +255,7 @@ static int __kprobes s390_get_insn_slot(struct kprobe *p)
p->ainsn.insn = NULL;
if (is_kernel_addr(p->addr))
p->ainsn.insn = get_dmainsn_slot();
- if (is_module_addr(p->addr))
+ else if (is_module_addr(p->addr))
p->ainsn.insn = get_insn_slot();
return p->ainsn.insn ? 0 : -ENOMEM;
}
@@ -608,7 +611,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
- int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2;
+ int ilen = insn_length(p->ainsn.insn[0] >> 8);
if (ip - (unsigned long) p->ainsn.insn == ilen)
ip = (unsigned long) p->addr + ilen;
}
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 14bdecb61923..4a460c44e17e 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -78,7 +78,7 @@ PGM_CHECK_DEFAULT /* 34 */
PGM_CHECK_DEFAULT /* 35 */
PGM_CHECK_DEFAULT /* 36 */
PGM_CHECK_DEFAULT /* 37 */
-PGM_CHECK_64BIT(do_asce_exception) /* 38 */
+PGM_CHECK_DEFAULT /* 38 */
PGM_CHECK_64BIT(do_dat_exception) /* 39 */
PGM_CHECK_64BIT(do_dat_exception) /* 3a */
PGM_CHECK_64BIT(do_dat_exception) /* 3b */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index c5dbb335716d..7ed0d4e2a435 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -139,7 +139,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
memset(&frame->childregs, 0, sizeof(struct pt_regs));
- frame->childregs.psw.mask = psw_kernel_bits | PSW_MASK_DAT |
+ frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
frame->childregs.psw.addr = PSW_ADDR_AMODE |
(unsigned long) kernel_thread_starter;
@@ -165,7 +165,8 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
* save fprs to current->thread.fp_regs to merge them with
* the emulated registers and then copy the result to the child.
*/
- save_fp_regs(&current->thread.fp_regs);
+ save_fp_ctl(&current->thread.fp_regs.fpc);
+ save_fp_regs(current->thread.fp_regs.fprs);
memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
sizeof(s390_fp_regs));
/* Set a new TLS ? */
@@ -173,7 +174,9 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
p->thread.acrs[0] = frame->childregs.gprs[6];
#else /* CONFIG_64BIT */
/* Save the fpu registers to new thread structure. */
- save_fp_regs(&p->thread.fp_regs);
+ save_fp_ctl(&p->thread.fp_regs.fpc);
+ save_fp_regs(p->thread.fp_regs.fprs);
+ p->thread.fp_regs.pad = 0;
/* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) {
unsigned long tls = frame->childregs.gprs[6];
@@ -205,10 +208,12 @@ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
* save fprs to current->thread.fp_regs to merge them with
* the emulated registers and then copy the result to the dump.
*/
- save_fp_regs(&current->thread.fp_regs);
+ save_fp_ctl(&current->thread.fp_regs.fpc);
+ save_fp_regs(current->thread.fp_regs.fprs);
memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
#else /* CONFIG_64BIT */
- save_fp_regs(fpregs);
+ save_fp_ctl(&fpregs->fpc);
+ save_fp_regs(fpregs->fprs);
#endif /* CONFIG_64BIT */
return 1;
}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 9556905bd3ce..e65c91c591e8 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -198,9 +198,11 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
* psw and gprs are stored on the stack
*/
tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
- if (addr == (addr_t) &dummy->regs.psw.mask)
+ if (addr == (addr_t) &dummy->regs.psw.mask) {
/* Return a clean psw mask. */
- tmp = psw_user_bits | (tmp & PSW_MASK_USER);
+ tmp &= PSW_MASK_USER | PSW_MASK_RI;
+ tmp |= PSW_USER_BITS;
+ }
} else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
/*
@@ -239,8 +241,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
offset = addr - (addr_t) &dummy->regs.fp_regs;
tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
- tmp &= (unsigned long) FPC_VALID_MASK
- << (BITS_PER_LONG - 32);
+ tmp <<= BITS_PER_LONG - 32;
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
/*
@@ -321,11 +322,15 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
/*
* psw and gprs are stored on the stack
*/
- if (addr == (addr_t) &dummy->regs.psw.mask &&
- ((data & ~PSW_MASK_USER) != psw_user_bits ||
- ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
- /* Invalid psw mask. */
- return -EINVAL;
+ if (addr == (addr_t) &dummy->regs.psw.mask) {
+ unsigned long mask = PSW_MASK_USER;
+
+ mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
+ if ((data & ~mask) != PSW_USER_BITS)
+ return -EINVAL;
+ if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
+ return -EINVAL;
+ }
*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
@@ -363,10 +368,10 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
/*
* floating point regs. are stored in the thread structure
*/
- if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
- (data & ~((unsigned long) FPC_VALID_MASK
- << (BITS_PER_LONG - 32))) != 0)
- return -EINVAL;
+ if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
+ if ((unsigned int) data != 0 ||
+ test_fp_ctl(data >> (BITS_PER_LONG - 32)))
+ return -EINVAL;
offset = addr - (addr_t) &dummy->regs.fp_regs;
*(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
@@ -557,7 +562,8 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
if (addr == (addr_t) &dummy32->regs.psw.mask) {
/* Fake a 31 bit psw mask. */
tmp = (__u32)(regs->psw.mask >> 32);
- tmp = psw32_user_bits | (tmp & PSW32_MASK_USER);
+ tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
+ tmp |= PSW32_USER_BITS;
} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
/* Fake a 31 bit psw address. */
tmp = (__u32) regs->psw.addr |
@@ -654,13 +660,16 @@ static int __poke_user_compat(struct task_struct *child,
* psw, gprs, acrs and orig_gpr2 are stored on the stack
*/
if (addr == (addr_t) &dummy32->regs.psw.mask) {
+ __u32 mask = PSW32_MASK_USER;
+
+ mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
/* Build a 64 bit psw mask from 31 bit mask. */
- if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits)
+ if ((tmp & ~mask) != PSW32_USER_BITS)
/* Invalid psw mask. */
return -EINVAL;
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(regs->psw.mask & PSW_MASK_BA) |
- (__u64)(tmp & PSW32_MASK_USER) << 32;
+ (__u64)(tmp & mask) << 32;
} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
/* Build a 64 bit psw address from 31 bit address. */
regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
@@ -696,8 +705,7 @@ static int __poke_user_compat(struct task_struct *child,
* floating point regs. are stored in the thread structure
*/
if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
- (tmp & ~FPC_VALID_MASK) != 0)
- /* Invalid floating point control. */
+ test_fp_ctl(tmp))
return -EINVAL;
offset = addr - (addr_t) &dummy32->regs.fp_regs;
*(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
@@ -895,8 +903,10 @@ static int s390_fpregs_get(struct task_struct *target,
const struct user_regset *regset, unsigned int pos,
unsigned int count, void *kbuf, void __user *ubuf)
{
- if (target == current)
- save_fp_regs(&target->thread.fp_regs);
+ if (target == current) {
+ save_fp_ctl(&target->thread.fp_regs.fpc);
+ save_fp_regs(target->thread.fp_regs.fprs);
+ }
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_regs, 0, -1);
@@ -909,19 +919,21 @@ static int s390_fpregs_set(struct task_struct *target,
{
int rc = 0;
- if (target == current)
- save_fp_regs(&target->thread.fp_regs);
+ if (target == current) {
+ save_fp_ctl(&target->thread.fp_regs.fpc);
+ save_fp_regs(target->thread.fp_regs.fprs);
+ }
/* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
- u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
- rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
+ u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 };
+ rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
0, offsetof(s390_fp_regs, fprs));
if (rc)
return rc;
- if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
+ if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
return -EINVAL;
- target->thread.fp_regs.fpc = fpc[0];
+ target->thread.fp_regs.fpc = ufpc[0];
}
if (rc == 0 && count > 0)
@@ -929,8 +941,10 @@ static int s390_fpregs_set(struct task_struct *target,
target->thread.fp_regs.fprs,
offsetof(s390_fp_regs, fprs), -1);
- if (rc == 0 && target == current)
- restore_fp_regs(&target->thread.fp_regs);
+ if (rc == 0 && target == current) {
+ restore_fp_ctl(&target->thread.fp_regs.fpc);
+ restore_fp_regs(target->thread.fp_regs.fprs);
+ }
return rc;
}
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index e1c9d1c292fa..d817cce7e72d 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -40,8 +40,6 @@ static void disable_runtime_instr(void)
static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
{
cb->buf_limit = 0xfff;
- if (s390_user_mode == HOME_SPACE_MODE)
- cb->home_space = 1;
cb->int_requested = 1;
cb->pstate = 1;
cb->pstate_set_buf = 1;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index aeed8a61fa0d..ffe1c53264a7 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -64,12 +64,6 @@
#include <asm/sclp.h>
#include "entry.h"
-long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
- PSW_MASK_EA | PSW_MASK_BA;
-long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT |
- PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK |
- PSW_MASK_PSTATE | PSW_ASC_HOME;
-
/*
* User copy operations.
*/
@@ -300,43 +294,14 @@ static int __init parse_vmalloc(char *arg)
}
early_param("vmalloc", parse_vmalloc);
-unsigned int s390_user_mode = PRIMARY_SPACE_MODE;
-EXPORT_SYMBOL_GPL(s390_user_mode);
-
-static void __init set_user_mode_primary(void)
-{
- psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
- psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
-#ifdef CONFIG_COMPAT
- psw32_user_bits =
- (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
-#endif
- uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos_switch : uaccess_pt;
-}
-
static int __init early_parse_user_mode(char *p)
{
- if (p && strcmp(p, "primary") == 0)
- s390_user_mode = PRIMARY_SPACE_MODE;
- else if (!p || strcmp(p, "home") == 0)
- s390_user_mode = HOME_SPACE_MODE;
- else
- return 1;
- return 0;
+ if (!p || strcmp(p, "primary") == 0)
+ return 0;
+ return 1;
}
early_param("user_mode", early_parse_user_mode);
-static void __init setup_addressing_mode(void)
-{
- if (s390_user_mode != PRIMARY_SPACE_MODE)
- return;
- set_user_mode_primary();
- if (MACHINE_HAS_MVCOS)
- pr_info("Address spaces switched, mvcos available\n");
- else
- pr_info("Address spaces switched, mvcos not available\n");
-}
-
void *restart_stack __attribute__((__section__(".data")));
static void __init setup_lowcore(void)
@@ -348,24 +313,24 @@ static void __init setup_lowcore(void)
*/
BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
- lc->restart_psw.mask = psw_kernel_bits;
+ lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr =
PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
- lc->external_new_psw.mask = psw_kernel_bits |
+ lc->external_new_psw.mask = PSW_KERNEL_BITS |
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->external_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
- lc->svc_new_psw.mask = psw_kernel_bits |
+ lc->svc_new_psw.mask = PSW_KERNEL_BITS |
PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
- lc->program_new_psw.mask = psw_kernel_bits |
+ lc->program_new_psw.mask = PSW_KERNEL_BITS |
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->program_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
- lc->mcck_new_psw.mask = psw_kernel_bits;
+ lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
lc->mcck_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
- lc->io_new_psw.mask = psw_kernel_bits |
+ lc->io_new_psw.mask = PSW_KERNEL_BITS |
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
lc->clock_comparator = -1ULL;
@@ -1043,10 +1008,7 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;
- if (MACHINE_HAS_MVCOS)
- memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
- else
- memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
+ uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos : uaccess_pt;
parse_early_param();
detect_memory_layout(memory_chunk, memory_end);
@@ -1054,7 +1016,6 @@ void __init setup_arch(char **cmdline_p)
setup_ipl();
reserve_oldmem();
setup_memory_end();
- setup_addressing_mode();
reserve_crashkernel();
setup_memory();
setup_resources();
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index c45becf82e01..fb535874a246 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -57,40 +57,48 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
/* Copy a 'clean' PSW mask to the user to avoid leaking
information about whether PER is currently on. */
- user_sregs.regs.psw.mask = psw_user_bits |
- (regs->psw.mask & PSW_MASK_USER);
+ user_sregs.regs.psw.mask = PSW_USER_BITS |
+ (regs->psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
user_sregs.regs.psw.addr = regs->psw.addr;
memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
- sizeof(sregs->regs.acrs));
+ sizeof(user_sregs.regs.acrs));
/*
* We have to store the fp registers to current->thread.fp_regs
* to merge them with the emulated registers.
*/
- save_fp_regs(&current->thread.fp_regs);
+ save_fp_ctl(&current->thread.fp_regs.fpc);
+ save_fp_regs(current->thread.fp_regs.fprs);
memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
- sizeof(s390_fp_regs));
- return __copy_to_user(sregs, &user_sregs, sizeof(_sigregs));
+ sizeof(user_sregs.fpregs));
+ if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
+ return -EFAULT;
+ return 0;
}
-/* Returns positive number on error */
static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
{
- int err;
_sigregs user_sregs;
/* Alwys make any pending restarted system call return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
- err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs));
- if (err)
- return err;
- /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */
+ if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs)))
+ return -EFAULT;
+
+ if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI))
+ return -EINVAL;
+
+ /* Loading the floating-point-control word can fail. Do that first. */
+ if (restore_fp_ctl(&user_sregs.fpregs.fpc))
+ return -EINVAL;
+
+ /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
- (user_sregs.regs.psw.mask & PSW_MASK_USER);
+ (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
/* Check for invalid user address space control. */
- if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
- regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+ if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
+ regs->psw.mask = PSW_ASC_PRIMARY |
(regs->psw.mask & ~PSW_MASK_ASC);
/* Check for invalid amode */
if (regs->psw.mask & PSW_MASK_EA)
@@ -98,14 +106,13 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
regs->psw.addr = user_sregs.regs.psw.addr;
memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs));
memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
- sizeof(sregs->regs.acrs));
+ sizeof(current->thread.acrs));
restore_access_regs(current->thread.acrs);
memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
- sizeof(s390_fp_regs));
- current->thread.fp_regs.fpc &= FPC_VALID_MASK;
+ sizeof(current->thread.fp_regs));
- restore_fp_regs(&current->thread.fp_regs);
+ restore_fp_regs(current->thread.fp_regs.fprs);
clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */
return 0;
}
@@ -224,7 +231,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
regs->gprs[15] = (unsigned long) frame;
/* Force default amode and default user address space control. */
regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
- (psw_user_bits & PSW_MASK_ASC) |
+ (PSW_USER_BITS & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
@@ -295,7 +302,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gprs[15] = (unsigned long) frame;
/* Force default amode and default user address space control. */
regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
- (psw_user_bits & PSW_MASK_ASC) |
+ (PSW_USER_BITS & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1a4313a1b60f..dc4a53465060 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -283,7 +283,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
unsigned long source_cpu = stap();
- __load_psw_mask(psw_kernel_bits);
+ __load_psw_mask(PSW_KERNEL_BITS);
if (pcpu->address == source_cpu)
func(data); /* should not return */
/* Stop target cpu (if func returns this stops the current cpu). */
@@ -395,7 +395,7 @@ void smp_send_stop(void)
int cpu;
/* Disable all interrupts/machine checks */
- __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
+ __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
trace_hardirqs_off();
debug_set_critical();
@@ -533,9 +533,6 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
-struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
-EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
-
static void __init smp_get_save_area(int cpu, u16 address)
{
void *lc = pcpu_devices[0].lowcore;
@@ -546,15 +543,9 @@ static void __init smp_get_save_area(int cpu, u16 address)
if (!OLDMEM_BASE && (address == boot_cpu_address ||
ipl_info.type != IPL_TYPE_FCP_DUMP))
return;
- if (cpu >= NR_CPUS) {
- pr_warning("CPU %i exceeds the maximum %i and is excluded "
- "from the dump\n", cpu, NR_CPUS - 1);
- return;
- }
- save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
+ save_area = dump_save_area_create(cpu);
if (!save_area)
panic("could not allocate memory for save area\n");
- zfcpdump_save_areas[cpu] = save_area;
#ifdef CONFIG_CRASH_DUMP
if (address == boot_cpu_address) {
/* Copy the registers of the boot cpu. */
@@ -693,7 +684,7 @@ static void smp_start_secondary(void *cpuvoid)
S390_lowcore.restart_source = -1UL;
restore_access_regs(S390_lowcore.access_regs_save_area);
__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
- __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
+ __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
cpu_init();
preempt_disable();
init_cpu_timer();
@@ -929,7 +920,7 @@ static ssize_t show_idle_count(struct device *dev,
idle_count = ACCESS_ONCE(idle->idle_count);
if (ACCESS_ONCE(idle->clock_idle_enter))
idle_count++;
- } while ((sequence & 1) || (idle->sequence != sequence));
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return sprintf(buf, "%llu\n", idle_count);
}
static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -947,7 +938,7 @@ static ssize_t show_idle_time(struct device *dev,
idle_time = ACCESS_ONCE(idle->idle_time);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
- } while ((sequence & 1) || (idle->sequence != sequence));
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 05d75c413137..a84476f2a9bb 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -84,8 +84,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
*/
static void vdso_init_data(struct vdso_data *vd)
{
- vd->ectg_available =
- s390_user_mode != HOME_SPACE_MODE && test_facility(31);
+ vd->ectg_available = test_facility(31);
}
#ifdef CONFIG_64BIT
@@ -102,7 +101,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
lowcore->vdso_per_cpu_data = __LC_PASTE;
- if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
+ if (!vdso_enabled)
return 0;
segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -147,7 +146,7 @@ void vdso_free_per_cpu(struct _lowcore *lowcore)
unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste;
- if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
+ if (!vdso_enabled)
return;
psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -165,7 +164,7 @@ static void vdso_init_cr5(void)
{
unsigned long cr5;
- if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
+ if (!vdso_enabled)
return;
cr5 = offsetof(struct _lowcore, paste);
__ctl_load(cr5, 5, 5);
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index abcfab55f99b..8c34363d6f1e 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -161,7 +161,7 @@ void __kprobes vtime_stop_cpu(void)
trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */
- psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT |
+ psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
idle->nohz_delay = 0;
@@ -191,7 +191,7 @@ cputime64_t s390_get_idle_time(int cpu)
sequence = ACCESS_ONCE(idle->sequence);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
- } while ((sequence & 1) || (idle->sequence != sequence));
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 776dafe918db..ed8064cb5c49 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -343,10 +343,11 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- save_fp_regs(&vcpu->arch.host_fpregs);
+ save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
+ save_fp_regs(vcpu->arch.host_fpregs.fprs);
save_access_regs(vcpu->arch.host_acrs);
- vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
- restore_fp_regs(&vcpu->arch.guest_fpregs);
+ restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.gmap);
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
@@ -356,9 +357,11 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap);
- save_fp_regs(&vcpu->arch.guest_fpregs);
+ save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ save_fp_regs(vcpu->arch.guest_fpregs.fprs);
save_access_regs(vcpu->run->s.regs.acrs);
- restore_fp_regs(&vcpu->arch.host_fpregs);
+ restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
+ restore_fp_regs(vcpu->arch.host_fpregs.fprs);
restore_access_regs(vcpu->arch.host_acrs);
}
@@ -618,9 +621,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
+ if (test_fp_ctl(fpu->fpc))
+ return -EINVAL;
memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
- vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
- restore_fp_regs(&vcpu->arch.guest_fpregs);
+ vcpu->arch.guest_fpregs.fpc = fpu->fpc;
+ restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
return 0;
}
@@ -876,7 +882,8 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
* copying in vcpu load/put. Lets update our copies before we save
* it into the save area
*/
- save_fp_regs(&vcpu->arch.guest_fpregs);
+ save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ save_fp_regs(vcpu->arch.guest_fpregs.fprs);
save_access_regs(vcpu->run->s.regs.acrs);
if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h
index c2f582bb1cb2..0c991c6748ab 100644
--- a/arch/s390/kvm/trace.h
+++ b/arch/s390/kvm/trace.h
@@ -4,6 +4,7 @@
#include <linux/tracepoint.h>
#include <asm/sigp.h>
#include <asm/debug.h>
+#include <asm/dis.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 20b0e97a7df2..b068729e50ac 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -2,7 +2,7 @@
# Makefile for s390-specific library files..
#
-lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
+lib-y += delay.o string.o uaccess_pt.o find.o
obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
obj-$(CONFIG_64BIT) += mem64.o
lib-$(CONFIG_64BIT) += uaccess_mvcos.o
diff --git a/arch/s390/lib/find.c b/arch/s390/lib/find.c
new file mode 100644
index 000000000000..620d34d6487e
--- /dev/null
+++ b/arch/s390/lib/find.c
@@ -0,0 +1,77 @@
+/*
+ * MSB0 numbered special bitops handling.
+ *
+ * On s390x the bits are numbered:
+ * |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ *
+ * The reason for this bit numbering is the fact that the hardware sets bits
+ * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap
+ * from the 'wrong end'.
+ */
+
+#include <linux/compiler.h>
+#include <linux/bitops.h>
+#include <linux/export.h>
+
+unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG - 1)) {
+ if ((tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = (*p) & (~0UL << (BITS_PER_LONG - size));
+ if (!tmp) /* Are any bits set? */
+ return result + size; /* Nope. */
+found:
+ return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
+}
+EXPORT_SYMBOL(find_first_bit_inv);
+
+unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + (offset / BITS_PER_LONG);
+ unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL >> offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+found_first:
+ tmp &= (~0UL << (BITS_PER_LONG - size));
+ if (!tmp) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
+}
+EXPORT_SYMBOL(find_next_bit_inv);
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 1829742bf479..4b7993bf69b9 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -65,13 +65,6 @@ static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
return size;
}
-static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
-{
- if (size <= 256)
- return copy_from_user_std(size, ptr, x);
- return copy_from_user_mvcos(size, ptr, x);
-}
-
static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
{
register unsigned long reg0 asm("0") = 0x810000UL;
@@ -101,14 +94,6 @@ static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
return size;
}
-static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr,
- const void *x)
-{
- if (size <= 256)
- return copy_to_user_std(size, ptr, x);
- return copy_to_user_mvcos(size, ptr, x);
-}
-
static size_t copy_in_user_mvcos(size_t size, void __user *to,
const void __user *from)
{
@@ -201,23 +186,8 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
}
struct uaccess_ops uaccess_mvcos = {
- .copy_from_user = copy_from_user_mvcos_check,
- .copy_from_user_small = copy_from_user_std,
- .copy_to_user = copy_to_user_mvcos_check,
- .copy_to_user_small = copy_to_user_std,
- .copy_in_user = copy_in_user_mvcos,
- .clear_user = clear_user_mvcos,
- .strnlen_user = strnlen_user_std,
- .strncpy_from_user = strncpy_from_user_std,
- .futex_atomic_op = futex_atomic_op_std,
- .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
-};
-
-struct uaccess_ops uaccess_mvcos_switch = {
.copy_from_user = copy_from_user_mvcos,
- .copy_from_user_small = copy_from_user_mvcos,
.copy_to_user = copy_to_user_mvcos,
- .copy_to_user_small = copy_to_user_mvcos,
.copy_in_user = copy_in_user_mvcos,
.clear_user = clear_user_mvcos,
.strnlen_user = strnlen_user_mvcos,
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 1694d738b175..97e03caf7825 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -461,9 +461,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
struct uaccess_ops uaccess_pt = {
.copy_from_user = copy_from_user_pt,
- .copy_from_user_small = copy_from_user_pt,
.copy_to_user = copy_to_user_pt,
- .copy_to_user_small = copy_to_user_pt,
.copy_in_user = copy_in_user_pt,
.clear_user = clear_user_pt,
.strnlen_user = strnlen_user_pt,
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
deleted file mode 100644
index 4a75d475b06a..000000000000
--- a/arch/s390/lib/uaccess_std.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Standard user space access functions based on mvcp/mvcs and doing
- * interesting things in the secondary space mode.
- *
- * Copyright IBM Corp. 2006
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Gerald Schaefer (gerald.schaefer@de.ibm.com)
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <asm/futex.h>
-#include "uaccess.h"
-
-#ifndef CONFIG_64BIT
-#define AHI "ahi"
-#define ALR "alr"
-#define CLR "clr"
-#define LHI "lhi"
-#define SLR "slr"
-#else
-#define AHI "aghi"
-#define ALR "algr"
-#define CLR "clgr"
-#define LHI "lghi"
-#define SLR "slgr"
-#endif
-
-size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
-{
- unsigned long tmp1, tmp2;
-
- tmp1 = -256UL;
- asm volatile(
- "0: mvcp 0(%0,%2),0(%1),%3\n"
- "10:jz 8f\n"
- "1:"ALR" %0,%3\n"
- " la %1,256(%1)\n"
- " la %2,256(%2)\n"
- "2: mvcp 0(%0,%2),0(%1),%3\n"
- "11:jnz 1b\n"
- " j 8f\n"
- "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
- " "LHI" %3,-4096\n"
- " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 5f\n"
- "4: mvcp 0(%4,%2),0(%1),%3\n"
- "12:"SLR" %0,%4\n"
- " "ALR" %2,%4\n"
- "5:"LHI" %4,-1\n"
- " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
- " bras %3,7f\n" /* memset loop */
- " xc 0(1,%2),0(%2)\n"
- "6: xc 0(256,%2),0(%2)\n"
- " la %2,256(%2)\n"
- "7:"AHI" %4,-256\n"
- " jnm 6b\n"
- " ex %4,0(%3)\n"
- " j 9f\n"
- "8:"SLR" %0,%0\n"
- "9: \n"
- EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
- EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
- : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : : "cc", "memory");
- return size;
-}
-
-static size_t copy_from_user_std_check(size_t size, const void __user *ptr,
- void *x)
-{
- if (size <= 1024)
- return copy_from_user_std(size, ptr, x);
- return copy_from_user_pt(size, ptr, x);
-}
-
-size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
-{
- unsigned long tmp1, tmp2;
-
- tmp1 = -256UL;
- asm volatile(
- "0: mvcs 0(%0,%1),0(%2),%3\n"
- "7: jz 5f\n"
- "1:"ALR" %0,%3\n"
- " la %1,256(%1)\n"
- " la %2,256(%2)\n"
- "2: mvcs 0(%0,%1),0(%2),%3\n"
- "8: jnz 1b\n"
- " j 5f\n"
- "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
- " "LHI" %3,-4096\n"
- " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 6f\n"
- "4: mvcs 0(%4,%1),0(%2),%3\n"
- "9:"SLR" %0,%4\n"
- " j 6f\n"
- "5:"SLR" %0,%0\n"
- "6: \n"
- EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
- EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
- : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : : "cc", "memory");
- return size;
-}
-
-static size_t copy_to_user_std_check(size_t size, void __user *ptr,
- const void *x)
-{
- if (size <= 1024)
- return copy_to_user_std(size, ptr, x);
- return copy_to_user_pt(size, ptr, x);
-}
-
-static size_t copy_in_user_std(size_t size, void __user *to,
- const void __user *from)
-{
- unsigned long tmp1;
-
- asm volatile(
- " sacf 256\n"
- " "AHI" %0,-1\n"
- " jo 5f\n"
- " bras %3,3f\n"
- "0:"AHI" %0,257\n"
- "1: mvc 0(1,%1),0(%2)\n"
- " la %1,1(%1)\n"
- " la %2,1(%2)\n"
- " "AHI" %0,-1\n"
- " jnz 1b\n"
- " j 5f\n"
- "2: mvc 0(256,%1),0(%2)\n"
- " la %1,256(%1)\n"
- " la %2,256(%2)\n"
- "3:"AHI" %0,-256\n"
- " jnm 2b\n"
- "4: ex %0,1b-0b(%3)\n"
- "5: "SLR" %0,%0\n"
- "6: sacf 0\n"
- EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
- : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
- : : "cc", "memory");
- return size;
-}
-
-static size_t clear_user_std(size_t size, void __user *to)
-{
- unsigned long tmp1, tmp2;
-
- asm volatile(
- " sacf 256\n"
- " "AHI" %0,-1\n"
- " jo 5f\n"
- " bras %3,3f\n"
- " xc 0(1,%1),0(%1)\n"
- "0:"AHI" %0,257\n"
- " la %2,255(%1)\n" /* %2 = ptr + 255 */
- " srl %2,12\n"
- " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
- " "SLR" %2,%1\n"
- " "CLR" %0,%2\n" /* clear crosses next page boundary? */
- " jnh 5f\n"
- " "AHI" %2,-1\n"
- "1: ex %2,0(%3)\n"
- " "AHI" %2,1\n"
- " "SLR" %0,%2\n"
- " j 5f\n"
- "2: xc 0(256,%1),0(%1)\n"
- " la %1,256(%1)\n"
- "3:"AHI" %0,-256\n"
- " jnm 2b\n"
- "4: ex %0,0(%3)\n"
- "5: "SLR" %0,%0\n"
- "6: sacf 0\n"
- EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
- : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
- : : "cc", "memory");
- return size;
-}
-
-size_t strnlen_user_std(size_t size, const char __user *src)
-{
- register unsigned long reg0 asm("0") = 0UL;
- unsigned long tmp1, tmp2;
-
- if (unlikely(!size))
- return 0;
- asm volatile(
- " la %2,0(%1)\n"
- " la %3,0(%0,%1)\n"
- " "SLR" %0,%0\n"
- " sacf 256\n"
- "0: srst %3,%2\n"
- " jo 0b\n"
- " la %0,1(%3)\n" /* strnlen_user results includes \0 */
- " "SLR" %0,%1\n"
- "1: sacf 0\n"
- EX_TABLE(0b,1b)
- : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
- return size;
-}
-
-size_t strncpy_from_user_std(size_t count, const char __user *src, char *dst)
-{
- size_t done, len, offset, len_str;
-
- if (unlikely(!count))
- return 0;
- done = 0;
- do {
- offset = (size_t)src & ~PAGE_MASK;
- len = min(count - done, PAGE_SIZE - offset);
- if (copy_from_user_std(len, src, dst))
- return -EFAULT;
- len_str = strnlen(dst, len);
- done += len_str;
- src += len_str;
- dst += len_str;
- } while ((len_str == len) && (done < count));
- return done;
-}
-
-#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
- asm volatile( \
- " sacf 256\n" \
- "0: l %1,0(%6)\n" \
- "1:"insn \
- "2: cs %1,%2,0(%6)\n" \
- "3: jl 1b\n" \
- " lhi %0,0\n" \
- "4: sacf 0\n" \
- EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
- : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
- "=m" (*uaddr) \
- : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
- "m" (*uaddr) : "cc");
-
-int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old)
-{
- int oldval = 0, newval, ret;
-
- switch (op) {
- case FUTEX_OP_SET:
- __futex_atomic_op("lr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_ADD:
- __futex_atomic_op("lr %2,%1\nar %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_OR:
- __futex_atomic_op("lr %2,%1\nor %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_ANDN:
- __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_XOR:
- __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- default:
- ret = -ENOSYS;
- }
- *old = oldval;
- return ret;
-}
-
-int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
-{
- int ret;
-
- asm volatile(
- " sacf 256\n"
- "0: cs %1,%4,0(%5)\n"
- "1: la %0,0\n"
- "2: sacf 0\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
- : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
- : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
- : "cc", "memory" );
- *uval = oldval;
- return ret;
-}
-
-struct uaccess_ops uaccess_std = {
- .copy_from_user = copy_from_user_std_check,
- .copy_from_user_small = copy_from_user_std,
- .copy_to_user = copy_to_user_std_check,
- .copy_to_user_small = copy_to_user_std,
- .copy_in_user = copy_in_user_std,
- .clear_user = clear_user_std,
- .strnlen_user = strnlen_user_std,
- .strncpy_from_user = strncpy_from_user_std,
- .futex_atomic_op = futex_atomic_op_std,
- .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
-};
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
index 58bff541fde9..a6ba0d724335 100644
--- a/arch/s390/math-emu/math.c
+++ b/arch/s390/math-emu/math.c
@@ -19,6 +19,8 @@
#include <math-emu/double.h>
#include <math-emu/quad.h>
+#define FPC_VALID_MASK 0xF8F8FF03
+
/*
* I miss a macro to round a floating point number to the
* nearest integer in the same floating point format.
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 9d84a1feefef..79ddd580d605 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -253,12 +253,12 @@ static int cmm_skip_blanks(char *cp, char **endp)
static struct ctl_table cmm_table[];
-static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int cmm_pages_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
char buf[16], *p;
+ unsigned int len;
long nr;
- int len;
if (!*lenp || (*ppos && !write)) {
*lenp = 0;
@@ -293,12 +293,12 @@ static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer,
return 0;
}
-static int cmm_timeout_handler(ctl_table *ctl, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int cmm_timeout_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
char buf[64], *p;
long nr, seconds;
- int len;
+ unsigned int len;
if (!*lenp || (*ppos && !write)) {
*lenp = 0;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fc6679210d83..d95265b2719f 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -115,13 +115,8 @@ static inline int user_space_fault(unsigned long trans_exc_code)
if (trans_exc_code == 2)
/* Access via secondary space, set_fs setting decides */
return current->thread.mm_segment.ar4;
- if (s390_user_mode == HOME_SPACE_MODE)
- /* User space if the access has been done via home space. */
- return trans_exc_code == 3;
/*
- * If the user space is not the home space the kernel runs in home
- * space. Access via secondary space has already been covered,
- * access via primary space or access register is from user space
+ * Access via primary space or access register is from user space
* and access via home space is from the kernel.
*/
return trans_exc_code != 3;
@@ -428,50 +423,13 @@ void __kprobes do_dat_exception(struct pt_regs *regs)
do_fault_error(regs, fault);
}
-#ifdef CONFIG_64BIT
-void __kprobes do_asce_exception(struct pt_regs *regs)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long trans_exc_code;
-
- /*
- * The instruction that caused the program check has
- * been nullified. Don't signal single step via SIGTRAP.
- */
- clear_tsk_thread_flag(current, TIF_PER_TRAP);
-
- trans_exc_code = regs->int_parm_long;
- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
- goto no_context;
-
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
- up_read(&mm->mmap_sem);
-
- if (vma) {
- update_mm(mm, current);
- return;
- }
-
- /* User mode accesses just cause a SIGSEGV */
- if (user_mode(regs)) {
- do_sigsegv(regs, SEGV_MAPERR);
- return;
- }
-
-no_context:
- do_no_context(regs);
-}
-#endif
-
int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
{
struct pt_regs regs;
int access, fault;
/* Emulate a uaccess fault from kernel mode. */
- regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
+ regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK;
if (!irqs_disabled())
regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
regs.psw.addr = (unsigned long) __builtin_return_address(0);
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 5d758db27bdc..639fce464008 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -180,9 +180,15 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
- if ((end < start) || (end > TASK_SIZE))
+ if ((end <= start) || (end > TASK_SIZE))
return 0;
-
+ /*
+ * local_irq_save() doesn't prevent pagetable teardown, but does
+ * prevent the pagetables from being freed on s390.
+ *
+ * So long as we atomically load page table pointers versus teardown,
+ * we can follow the address down to the the page and take a ref on it.
+ */
local_irq_save(flags);
pgdp = pgd_offset(mm, addr);
do {
@@ -219,63 +225,22 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- pgd_t *pgdp, pgd;
- int nr = 0;
+ int nr, ret;
start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- if ((end < start) || (end > TASK_SIZE))
- goto slow_irqon;
-
- /*
- * local_irq_disable() doesn't prevent pagetable teardown, but does
- * prevent the pagetables from being freed on s390.
- *
- * So long as we atomically load page table pointers versus teardown,
- * we can follow the address down to the the page and take a ref on it.
- */
- local_irq_disable();
- pgdp = pgd_offset(mm, addr);
- do {
- pgd = *pgdp;
- barrier();
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- goto slow;
- if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
- goto slow;
- } while (pgdp++, addr = next, addr != end);
- local_irq_enable();
-
- VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
- return nr;
-
- {
- int ret;
-slow:
- local_irq_enable();
-slow_irqon:
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
-
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start,
- (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
-
- /* Have to be a bit careful with return values */
- if (nr > 0) {
- if (ret < 0)
- ret = nr;
- else
- ret += nr;
- }
-
- return ret;
- }
+ nr = __get_user_pages_fast(start, nr_pages, write, pages);
+ if (nr == nr_pages)
+ return nr;
+
+ /* Try to get the remaining pages with get_user_pages */
+ start += nr << PAGE_SHIFT;
+ pages += nr;
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(current, mm, start,
+ nr_pages - nr, write, 0, pages, NULL);
+ up_read(&mm->mmap_sem);
+ /* Have to be a bit careful with return values */
+ if (nr > 0)
+ ret = (ret < 0) ? nr : ret + nr;
+ return ret;
}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 40023290ee5b..6bcb045d2bd2 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -101,18 +101,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
{
- int rc;
-
if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
return 0;
if (!(flags & MAP_FIXED))
addr = 0;
- if ((addr + len) >= TASK_SIZE) {
- rc = crst_table_upgrade(current->mm, 1UL << 53);
- if (rc)
- return rc;
- update_mm(current->mm, current);
- }
+ if ((addr + len) >= TASK_SIZE)
+ return crst_table_upgrade(current->mm, 1UL << 53);
return 0;
}
@@ -132,7 +126,6 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
- update_mm(mm, current);
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
}
return area;
@@ -155,7 +148,6 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
- update_mm(mm, current);
area = arch_get_unmapped_area_topdown(filp, addr, len,
pgoff, flags);
}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 990397420e6b..8400f494623f 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -9,6 +9,7 @@
#include <asm/pgtable.h>
#include <asm/page.h>
+#if PAGE_DEFAULT_KEY
static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
{
asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
@@ -16,7 +17,7 @@ static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
return addr;
}
-void storage_key_init_range(unsigned long start, unsigned long end)
+void __storage_key_init_range(unsigned long start, unsigned long end)
{
unsigned long boundary, size;
@@ -36,6 +37,7 @@ void storage_key_init_range(unsigned long start, unsigned long end)
start += PAGE_SIZE;
}
}
+#endif
static pte_t *walk_page_table(unsigned long addr)
{
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index de8cbc30dcd1..0a2e5e086749 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -48,12 +48,23 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
}
#ifdef CONFIG_64BIT
+static void __crst_table_upgrade(void *arg)
+{
+ struct mm_struct *mm = arg;
+
+ if (current->active_mm == mm)
+ update_mm(mm, current);
+ __tlb_flush_local();
+}
+
int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
{
unsigned long *table, *pgd;
unsigned long entry;
+ int flush;
BUG_ON(limit > (1UL << 53));
+ flush = 0;
repeat:
table = crst_table_alloc(mm);
if (!table)
@@ -79,12 +90,15 @@ repeat:
mm->pgd = (pgd_t *) table;
mm->task_size = mm->context.asce_limit;
table = NULL;
+ flush = 1;
}
spin_unlock_bh(&mm->page_table_lock);
if (table)
crst_table_free(mm, table);
if (mm->context.asce_limit < limit)
goto repeat;
+ if (flush)
+ on_each_cpu(__crst_table_upgrade, mm, 0);
return 0;
}
@@ -92,6 +106,8 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{
pgd_t *pgd;
+ if (current->active_mm == mm)
+ __tlb_flush_mm(mm);
while (mm->context.asce_limit > limit) {
pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -114,6 +130,8 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
mm->task_size = mm->context.asce_limit;
crst_table_free(mm, (unsigned long *) pgd);
}
+ if (current->active_mm == mm)
+ update_mm(mm, current);
}
#endif
@@ -1087,10 +1105,9 @@ again:
continue;
/* Allocate new page table with pgstes */
new = page_table_alloc_pgste(mm, addr);
- if (!new) {
- mm->context.has_pgste = 0;
- continue;
- }
+ if (!new)
+ return -ENOMEM;
+
spin_lock(&mm->page_table_lock);
if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
/* Nuke pmd entry pointing to the "short" page table */
@@ -1128,13 +1145,15 @@ static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
if (pud_none_or_clear_bad(pud))
continue;
next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
+ if (unlikely(IS_ERR_VALUE(next)))
+ return next;
} while (pud++, addr = next, addr != end);
return addr;
}
-static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long addr, unsigned long end)
+static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long addr, unsigned long end)
{
unsigned long next;
pgd_t *pgd;
@@ -1145,7 +1164,11 @@ static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
if (pgd_none_or_clear_bad(pgd))
continue;
next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
+ if (unlikely(IS_ERR_VALUE(next)))
+ return next;
} while (pgd++, addr = next, addr != end);
+
+ return 0;
}
/*
@@ -1157,10 +1180,6 @@ int s390_enable_sie(void)
struct mm_struct *mm = tsk->mm;
struct mmu_gather tlb;
- /* Do we have switched amode? If no, we cannot do sie */
- if (s390_user_mode == HOME_SPACE_MODE)
- return -EINVAL;
-
/* Do we have pgstes? if yes, we are done */
if (mm_has_pgste(tsk->mm))
return 0;
@@ -1169,9 +1188,9 @@ int s390_enable_sie(void)
/* split thp mappings and disable thp for future mappings */
thp_split_mm(mm);
/* Reallocate the page tables with pgstes */
- mm->context.has_pgste = 1;
tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
- page_table_realloc(&tlb, mm, 0, TASK_SIZE);
+ if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
+ mm->context.has_pgste = 1;
tlb_finish_mmu(&tlb, 0, TASK_SIZE);
up_write(&mm->mmap_sem);
return mm->context.has_pgste ? 0 : -ENOMEM;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index a5df511e27a2..16871da37371 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -12,8 +12,8 @@
#include <linux/random.h>
#include <linux/init.h>
#include <asm/cacheflush.h>
-#include <asm/processor.h>
#include <asm/facility.h>
+#include <asm/dis.h>
/*
* Conventions:
@@ -156,8 +156,8 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
EMIT6(0xeb8ff058, 0x0024);
/* lgr %r14,%r15 */
EMIT4(0xb90400ef);
- /* ahi %r15,<offset> */
- EMIT4_IMM(0xa7fa0000, (jit->seen & SEEN_MEM) ? -112 : -80);
+ /* aghi %r15,<offset> */
+ EMIT4_IMM(0xa7fb0000, (jit->seen & SEEN_MEM) ? -112 : -80);
/* stg %r14,152(%r15) */
EMIT6(0xe3e0f098, 0x0024);
} else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index f17a8343e360..0c9a17780e4b 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -120,26 +120,17 @@ EXPORT_SYMBOL_GPL(pci_proc_domain);
static int zpci_set_airq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
- struct zpci_fib *fib;
- int rc;
-
- fib = (void *) get_zeroed_page(GFP_KERNEL);
- if (!fib)
- return -ENOMEM;
+ struct zpci_fib fib = {0};
- fib->isc = PCI_ISC;
- fib->sum = 1; /* enable summary notifications */
- fib->noi = airq_iv_end(zdev->aibv);
- fib->aibv = (unsigned long) zdev->aibv->vector;
- fib->aibvo = 0; /* each zdev has its own interrupt vector */
- fib->aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
- fib->aisbo = zdev->aisb & 63;
+ fib.isc = PCI_ISC;
+ fib.sum = 1; /* enable summary notifications */
+ fib.noi = airq_iv_end(zdev->aibv);
+ fib.aibv = (unsigned long) zdev->aibv->vector;
+ fib.aibvo = 0; /* each zdev has its own interrupt vector */
+ fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
+ fib.aisbo = zdev->aisb & 63;
- rc = zpci_mod_fc(req, fib);
- pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
-
- free_page((unsigned long) fib);
- return rc;
+ return zpci_mod_fc(req, &fib);
}
struct mod_pci_args {
@@ -152,22 +143,14 @@ struct mod_pci_args {
static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
- struct zpci_fib *fib;
- int rc;
-
- /* The FIB must be available even if it's not used */
- fib = (void *) get_zeroed_page(GFP_KERNEL);
- if (!fib)
- return -ENOMEM;
+ struct zpci_fib fib = {0};
- fib->pba = args->base;
- fib->pal = args->limit;
- fib->iota = args->iota;
- fib->fmb_addr = args->fmb_addr;
+ fib.pba = args->base;
+ fib.pal = args->limit;
+ fib.iota = args->iota;
+ fib.fmb_addr = args->fmb_addr;
- rc = zpci_mod_fc(req, fib);
- free_page((unsigned long) fib);
- return rc;
+ return zpci_mod_fc(req, &fib);
}
/* Modify PCI: Register I/O address translation parameters */
@@ -424,7 +407,6 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
struct msi_msg msg;
int rc;
- pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
return -EINVAL;
msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX);
@@ -489,7 +471,6 @@ out_msi:
out_si:
airq_iv_free_bit(zpci_aisb_iv, aisb);
out:
- dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
return rc;
}
@@ -499,14 +480,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
struct msi_desc *msi;
int rc;
- pr_info("%s: on pdev: %p\n", __func__, pdev);
-
/* Disable adapter interrupts */
rc = zpci_clear_airq(zdev);
- if (rc) {
- dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
+ if (rc)
return;
- }
/* Release MSI interrupts */
list_for_each_entry(msi, &pdev->msi_list, list) {
@@ -625,8 +602,11 @@ static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned lo
r->name = name;
rc = request_resource(&iomem_resource, r);
- if (rc)
- pr_debug("request resource %pR failed\n", r);
+ if (rc) {
+ kfree(r->name);
+ kfree(r);
+ return ERR_PTR(-ENOMEM);
+ }
return r;
}
@@ -708,6 +688,47 @@ void pcibios_disable_device(struct pci_dev *pdev)
zdev->pdev = NULL;
}
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+static int zpci_restore(struct device *dev)
+{
+ struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+ int ret = 0;
+
+ if (zdev->state != ZPCI_FN_STATE_ONLINE)
+ goto out;
+
+ ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
+ if (ret)
+ goto out;
+
+ zpci_map_resources(zdev);
+ zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
+ zdev->start_dma + zdev->iommu_size - 1,
+ (u64) zdev->dma_table);
+
+out:
+ return ret;
+}
+
+static int zpci_freeze(struct device *dev)
+{
+ struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+
+ if (zdev->state != ZPCI_FN_STATE_ONLINE)
+ return 0;
+
+ zpci_unregister_ioat(zdev, 0);
+ return clp_disable_fh(zdev);
+}
+
+struct dev_pm_ops pcibios_pm_ops = {
+ .thaw_noirq = zpci_restore,
+ .freeze_noirq = zpci_freeze,
+ .restore_noirq = zpci_restore,
+ .poweroff_noirq = zpci_freeze,
+};
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+
static int zpci_scan_bus(struct zpci_dev *zdev)
{
struct resource *res;
@@ -781,7 +802,6 @@ int zpci_enable_device(struct zpci_dev *zdev)
rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
if (rc)
goto out;
- pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
rc = zpci_dma_init_device(zdev);
if (rc)
@@ -901,10 +921,6 @@ static int __init pci_base_init(void)
|| !test_facility(71) || !test_facility(72))
return 0;
- pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n",
- test_facility(69), test_facility(70),
- test_facility(71));
-
rc = zpci_debug_init();
if (rc)
goto out;
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 475563c3d1e4..84147984224a 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -16,6 +16,16 @@
#include <asm/pci_debug.h>
#include <asm/pci_clp.h>
+static inline void zpci_err_clp(unsigned int rsp, int rc)
+{
+ struct {
+ unsigned int rsp;
+ int rc;
+ } __packed data = {rsp, rc};
+
+ zpci_err_hex(&data, sizeof(data));
+}
+
/*
* Call Logical Processor
* Retry logic is handled by the caller.
@@ -54,7 +64,6 @@ static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
zdev->msi_addr = response->msia;
zdev->fmb_update = response->mui;
- pr_debug("Supported number of MSI vectors: %u\n", response->noi);
switch (response->version) {
case 1:
zdev->max_bus_speed = PCIE_SPEED_5_0GT;
@@ -84,8 +93,8 @@ static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
clp_store_query_pci_fngrp(zdev, &rrb->response);
else {
- pr_err("Query PCI FNGRP failed with response: %x cc: %d\n",
- rrb->response.hdr.rsp, rc);
+ zpci_err("Q PCI FGRP:\n");
+ zpci_err_clp(rrb->response.hdr.rsp, rc);
rc = -EIO;
}
clp_free_block(rrb);
@@ -131,8 +140,8 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
if (rrb->response.pfgid)
rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
} else {
- pr_err("Query PCI failed with response: %x cc: %d\n",
- rrb->response.hdr.rsp, rc);
+ zpci_err("Q PCI FN:\n");
+ zpci_err_clp(rrb->response.hdr.rsp, rc);
rc = -EIO;
}
out:
@@ -206,8 +215,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
*fh = rrb->response.fh;
else {
- zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc,
- rrb->response.hdr.rsp);
+ zpci_err("Set PCI FN:\n");
+ zpci_err_clp(rrb->response.hdr.rsp, rc);
rc = -EIO;
}
clp_free_block(rrb);
@@ -262,8 +271,8 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
/* Get PCI function handle list */
rc = clp_instr(rrb);
if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
- pr_err("List PCI failed with response: 0x%x cc: %d\n",
- rrb->response.hdr.rsp, rc);
+ zpci_err("List PCI FN:\n");
+ zpci_err_clp(rrb->response.hdr.rsp, rc);
rc = -EIO;
goto out;
}
@@ -273,17 +282,11 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
rrb->response.entry_size;
- pr_info("Detected number of PCI functions: %u\n", entries);
- /* Store the returned resume token as input for the next call */
resume_token = rrb->response.resume_token;
-
for (i = 0; i < entries; i++)
cb(&rrb->response.fh_list[i]);
} while (resume_token);
-
- pr_debug("Maximum number of supported PCI functions: %u\n",
- rrb->response.max_fn);
out:
return rc;
}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 7e5573acb063..9b83d080902d 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -145,10 +145,8 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
return -EINVAL;
spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
- if (!zdev->dma_table) {
- dev_err(&zdev->pdev->dev, "Missing DMA table\n");
+ if (!zdev->dma_table)
goto no_refresh;
- }
for (i = 0; i < nr_pages; i++) {
dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
@@ -280,11 +278,8 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
size = nr_pages * PAGE_SIZE;
dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
- if (dma_addr + size > zdev->end_dma) {
- dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
- dma_addr, size, zdev->end_dma);
+ if (dma_addr + size > zdev->end_dma)
goto out_free;
- }
if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
flags |= ZPCI_TABLE_PROTECTED;
@@ -297,7 +292,8 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
out_free:
dma_free_iommu(zdev, iommu_page_index, nr_pages);
out_err:
- dev_err(dev, "Failed to map addr: %lx\n", pa);
+ zpci_err("map error:\n");
+ zpci_err_hex(&pa, sizeof(pa));
return DMA_ERROR_CODE;
}
@@ -312,8 +308,10 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr = dma_addr & PAGE_MASK;
if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
- ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
- dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
+ ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) {
+ zpci_err("unmap error:\n");
+ zpci_err_hex(&dma_addr, sizeof(dma_addr));
+ }
atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 0aecaf954845..278e671ec9ac 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <asm/pci_debug.h>
/* Content Code Description for PCI Function Error */
struct zpci_ccdf_err {
@@ -41,25 +42,15 @@ struct zpci_ccdf_avail {
u16 pec; /* PCI event code */
} __packed;
-static void zpci_event_log_err(struct zpci_ccdf_err *ccdf)
-{
- struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
-
- zpci_err("SEI error CCD:\n");
- zpci_err_hex(ccdf, sizeof(*ccdf));
- dev_err(&zdev->pdev->dev, "event code: 0x%x\n", ccdf->pec);
-}
-
static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
{
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+ struct pci_dev *pdev = zdev ? zdev->pdev : NULL;
- pr_err("%s%s: availability event: fh: 0x%x fid: 0x%x event code: 0x%x reason:",
- (zdev) ? dev_driver_string(&zdev->pdev->dev) : "?",
- (zdev) ? dev_name(&zdev->pdev->dev) : "?",
- ccdf->fh, ccdf->fid, ccdf->pec);
- print_hex_dump(KERN_CONT, "ccdf", DUMP_PREFIX_OFFSET,
- 16, 1, ccdf, sizeof(*ccdf), false);
+ pr_info("%s: Event 0x%x reconfigured PCI function 0x%x\n",
+ pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
+ zpci_err("avail CCDF:\n");
+ zpci_err_hex(ccdf, sizeof(*ccdf));
switch (ccdf->pec) {
case 0x0301:
@@ -79,14 +70,16 @@ static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
void zpci_event_error(void *data)
{
struct zpci_ccdf_err *ccdf = data;
- struct zpci_dev *zdev;
+ struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+
+ zpci_err("error CCDF:\n");
+ zpci_err_hex(ccdf, sizeof(*ccdf));
- zpci_event_log_err(ccdf);
- zdev = get_zdev_by_fid(ccdf->fid);
- if (!zdev) {
- pr_err("Error event for unknown fid: %x", ccdf->fid);
+ if (!zdev)
return;
- }
+
+ pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
+ pci_name(zdev->pdev), ccdf->pec, ccdf->fid);
}
void zpci_event_availability(void *data)
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 66e505ca24ef..3c7eb5dd91c6 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -133,7 +133,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- pr_debug("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
kfree(slot);
@@ -183,10 +182,9 @@ int zpci_init_slot(struct zpci_dev *zdev)
snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
ZPCI_DEVFN, name);
- if (rc) {
- pr_err("pci_hp_register failed with error %d\n", rc);
+ if (rc)
goto error_reg;
- }
+
list_add(&slot->slot_list, &s390_hotplug_slot_list);
return 0;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 451bf99582ff..244f77f844f0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -698,10 +698,11 @@ static void dasd_profile_start(struct dasd_block *block,
}
spin_lock(&block->profile.lock);
- if (block->profile.data)
+ if (block->profile.data) {
block->profile.data->dasd_io_nr_req[counter]++;
if (rq_data_dir(req) == READ)
block->profile.data->dasd_read_nr_req[counter]++;
+ }
spin_unlock(&block->profile.lock);
/*
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 5d73e6e49af6..548209a9c43c 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -223,8 +223,12 @@ static void scm_blk_request(struct request_queue *rq)
int ret;
while ((req = blk_peek_request(rq))) {
- if (req->cmd_type != REQ_TYPE_FS)
+ if (req->cmd_type != REQ_TYPE_FS) {
+ blk_start_request(req);
+ blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
+ blk_end_request_all(req, -EIO);
continue;
+ }
if (!scm_permit_request(bdev, req)) {
scm_ensure_queue_restart(bdev);
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 8b387b32fd62..e59331e6c2e5 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -107,7 +107,7 @@ extern debug_info_t *scm_debug;
static inline void SCM_LOG_HEX(int level, void *data, int length)
{
- if (level > scm_debug->level)
+ if (!debug_level_enabled(scm_debug, level))
return;
while (length > 0) {
debug_event(scm_debug, level, data, length);
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 4600aa10a1c6..668b32b0dc1d 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -60,7 +60,7 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
struct appldata_product_id id;
int rc;
- strcpy(id.prod_nr, "LNXAPPL");
+ strncpy(id.prod_nr, "LNXAPPL", 7);
id.prod_fn = myhdr->applid;
id.record_nr = myhdr->record_num;
id.version_nr = myhdr->version;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 24a08e8f19e1..2cdec21e8924 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -615,10 +615,10 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
if (rp->state != RAW3270_STATE_RESET)
return;
- if (rq && rq->rc) {
+ if (rq->rc) {
/* Reset command failed. */
rp->state = RAW3270_STATE_INIT;
- } else if (0 && MACHINE_IS_VM) {
+ } else if (MACHINE_IS_VM) {
raw3270_size_device_vm(rp);
raw3270_size_device_done(rp);
} else
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 794820a123d0..ffb1fcf0bf5b 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -151,7 +151,7 @@ static int __init init_cpu_info(enum arch_id arch)
/* get info for boot cpu from lowcore, stored in the HSA */
- sa = kmalloc(sizeof(*sa), GFP_KERNEL);
+ sa = dump_save_area_create(0);
if (!sa)
return -ENOMEM;
if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
@@ -159,7 +159,6 @@ static int __init init_cpu_info(enum arch_id arch)
kfree(sa);
return -EIO;
}
- zfcpdump_save_areas[0] = sa;
return 0;
}
@@ -246,24 +245,25 @@ static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
{
unsigned long end;
- int i = 0;
+ int i;
if (count == 0)
return 0;
end = start + count;
- while (zfcpdump_save_areas[i]) {
+ for (i = 0; i < dump_save_areas.count; i++) {
unsigned long cp_start, cp_end; /* copy range */
unsigned long sa_start, sa_end; /* save area range */
unsigned long prefix;
unsigned long sa_off, len, buf_off;
+ struct save_area *save_area = dump_save_areas.areas[i];
- prefix = zfcpdump_save_areas[i]->pref_reg;
+ prefix = save_area->pref_reg;
sa_start = prefix + sys_info.sa_base;
sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
if ((end < sa_start) || (start > sa_end))
- goto next;
+ continue;
cp_start = max(start, sa_start);
cp_end = min(end, sa_end);
@@ -272,10 +272,8 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
len = cp_end - cp_start;
TRACE("copy_lc for: %lx\n", start);
- if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
+ if (copy_lc(buf + buf_off, save_area, sa_off, len))
return -EFAULT;
-next:
- i++;
}
return 0;
}
@@ -637,8 +635,8 @@ static void __init zcore_header_init(int arch, struct zcore_header *hdr,
hdr->num_pages = mem_size / PAGE_SIZE;
hdr->tod = get_tod_clock();
get_cpu_id(&hdr->cpu_id);
- for (i = 0; zfcpdump_save_areas[i]; i++) {
- prefix = zfcpdump_save_areas[i]->pref_reg;
+ for (i = 0; i < dump_save_areas.count; i++) {
+ prefix = dump_save_areas.areas[i]->pref_reg;
hdr->real_cpu_cnt++;
if (!prefix)
continue;
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index d028fd800c9c..f055df0b167f 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -194,15 +194,14 @@ EXPORT_SYMBOL(airq_iv_release);
*/
unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
{
- const unsigned long be_to_le = BITS_PER_LONG - 1;
unsigned long bit;
if (!iv->avail)
return -1UL;
spin_lock(&iv->lock);
- bit = find_first_bit_left(iv->avail, iv->bits);
+ bit = find_first_bit_inv(iv->avail, iv->bits);
if (bit < iv->bits) {
- clear_bit(bit ^ be_to_le, iv->avail);
+ clear_bit_inv(bit, iv->avail);
if (bit >= iv->end)
iv->end = bit + 1;
} else
@@ -220,19 +219,17 @@ EXPORT_SYMBOL(airq_iv_alloc_bit);
*/
void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
{
- const unsigned long be_to_le = BITS_PER_LONG - 1;
-
if (!iv->avail)
return;
spin_lock(&iv->lock);
/* Clear (possibly left over) interrupt bit */
- clear_bit(bit ^ be_to_le, iv->vector);
+ clear_bit_inv(bit, iv->vector);
/* Make the bit position available again */
- set_bit(bit ^ be_to_le, iv->avail);
+ set_bit_inv(bit, iv->avail);
if (bit == iv->end - 1) {
/* Find new end of bit-field */
while (--iv->end > 0)
- if (!test_bit((iv->end - 1) ^ be_to_le, iv->avail))
+ if (!test_bit_inv(iv->end - 1, iv->avail))
break;
}
spin_unlock(&iv->lock);
@@ -251,15 +248,13 @@ EXPORT_SYMBOL(airq_iv_free_bit);
unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
unsigned long end)
{
- const unsigned long be_to_le = BITS_PER_LONG - 1;
unsigned long bit;
/* Find non-zero bit starting from 'ivs->next'. */
- bit = find_next_bit_left(iv->vector, end, start);
+ bit = find_next_bit_inv(iv->vector, end, start);
if (bit >= end)
return -1UL;
- /* Clear interrupt bit (find left uses big-endian bit numbers) */
- clear_bit(bit ^ be_to_le, iv->vector);
+ clear_bit_inv(bit, iv->vector);
return bit;
}
EXPORT_SYMBOL(airq_iv_scan);
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index d9eddcba7e88..aca7bfc113aa 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -6,6 +6,7 @@
*/
#include <linux/kernel_stat.h>
+#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/device.h>
@@ -42,7 +43,7 @@ static debug_info_t *eadm_debug;
static void EADM_LOG_HEX(int level, void *data, int length)
{
- if (level > eadm_debug->level)
+ if (!debug_level_enabled(eadm_debug, level))
return;
while (length > 0) {
debug_event(eadm_debug, level, data, length);
@@ -159,6 +160,9 @@ static void eadm_subchannel_irq(struct subchannel *sch)
}
scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
private->state = EADM_IDLE;
+
+ if (private->completion)
+ complete(private->completion);
}
static struct subchannel *eadm_get_idle_sch(void)
@@ -255,13 +259,32 @@ out:
static void eadm_quiesce(struct subchannel *sch)
{
+ struct eadm_private *private = get_eadm_private(sch);
+ DECLARE_COMPLETION_ONSTACK(completion);
int ret;
+ spin_lock_irq(sch->lock);
+ if (private->state != EADM_BUSY)
+ goto disable;
+
+ if (eadm_subchannel_clear(sch))
+ goto disable;
+
+ private->completion = &completion;
+ spin_unlock_irq(sch->lock);
+
+ wait_for_completion_io(&completion);
+
+ spin_lock_irq(sch->lock);
+ private->completion = NULL;
+
+disable:
+ eadm_subchannel_set_timeout(sch, 0);
do {
- spin_lock_irq(sch->lock);
ret = cio_disable_subchannel(sch);
- spin_unlock_irq(sch->lock);
} while (ret == -EBUSY);
+
+ spin_unlock_irq(sch->lock);
}
static int eadm_subchannel_remove(struct subchannel *sch)
diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h
index 2779be093982..9664e4653f98 100644
--- a/drivers/s390/cio/eadm_sch.h
+++ b/drivers/s390/cio/eadm_sch.h
@@ -1,6 +1,7 @@
#ifndef EADM_SCH_H
#define EADM_SCH_H
+#include <linux/completion.h>
#include <linux/device.h>
#include <linux/timer.h>
#include <linux/list.h>
@@ -9,9 +10,10 @@
struct eadm_private {
union orb orb;
enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
+ struct completion *completion;
+ struct subchannel *sch;
struct timer_list timer;
struct list_head head;
- struct subchannel *sch;
} __aligned(8);
#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index 647b422bb22a..dfac9bfefea3 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -16,12 +16,6 @@
extern debug_info_t *qdio_dbf_setup;
extern debug_info_t *qdio_dbf_error;
-/* sort out low debug levels early to avoid wasted sprints */
-static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define DBF_ERR 3 /* error conditions */
#define DBF_WARN 4 /* warning conditions */
#define DBF_INFO 6 /* informational */
@@ -65,7 +59,7 @@ static inline void DBF_ERROR_HEX(void *addr, int len)
#define DBF_DEV_EVENT(level, device, text...) \
do { \
char debug_buffer[QDIO_DBF_LEN]; \
- if (qdio_dbf_passes(device->debug_area, level)) { \
+ if (debug_level_enabled(device->debug_area, level)) { \
snprintf(debug_buffer, QDIO_DBF_LEN, text); \
debug_text_event(device->debug_area, level, debug_buffer); \
} \
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index bbd3e511c771..3e602e8affa7 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -528,7 +528,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
- if (atomic_sub(count, &q->nr_buf_used) == 0)
+ if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 841ea72e4a4e..28d9349de1ad 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -11,12 +11,6 @@
/* that gives us 15 characters in the text event views */
#define ZCRYPT_DBF_LEN 16
-/* sort out low debug levels early to avoid wasted sprints */
-static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define DBF_ERR 3 /* error conditions */
#define DBF_WARN 4 /* warning conditions */
#define DBF_INFO 6 /* informational */
@@ -25,7 +19,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
#define ZCRYPT_DBF_COMMON(level, text...) \
do { \
- if (zcrypt_dbf_passes(zcrypt_dbf_common, level)) { \
+ if (debug_level_enabled(zcrypt_dbf_common, level)) { \
char debug_buffer[ZCRYPT_DBF_LEN]; \
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(zcrypt_dbf_common, level, \
@@ -35,7 +29,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
#define ZCRYPT_DBF_DEVICES(level, text...) \
do { \
- if (zcrypt_dbf_passes(zcrypt_dbf_devices, level)) { \
+ if (debug_level_enabled(zcrypt_dbf_devices, level)) { \
char debug_buffer[ZCRYPT_DBF_LEN]; \
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(zcrypt_dbf_devices, level, \
@@ -45,7 +39,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
#define ZCRYPT_DBF_DEV(level, device, text...) \
do { \
- if (zcrypt_dbf_passes(device->dbf_area, level)) { \
+ if (debug_level_enabled(device->dbf_area, level)) { \
char debug_buffer[ZCRYPT_DBF_LEN]; \
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(device->dbf_area, level, \
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 1bc5904df19f..3339b9b607b3 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -114,15 +114,9 @@ do { \
debug_event(claw_dbf_##name,level,(void*)(addr),len); \
} while (0)
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define CLAW_DBF_TEXT_(level,name,text...) \
do { \
- if (claw_dbf_passes(claw_dbf_##name, level)) { \
+ if (debug_level_enabled(claw_dbf_##name, level)) { \
sprintf(debug_buffer, text); \
debug_text_event(claw_dbf_##name, level, \
debug_buffer); \
diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c
index 6514e1cb3f1c..8363f1c966ef 100644
--- a/drivers/s390/net/ctcm_dbug.c
+++ b/drivers/s390/net/ctcm_dbug.c
@@ -66,7 +66,7 @@ void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...)
char dbf_txt_buf[64];
va_list args;
- if (level > (ctcm_dbf[dbf_nix].id)->level)
+ if (!debug_level_enabled(ctcm_dbf[dbf_nix].id, level))
return;
va_start(args, fmt);
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 8c03392ac833..150fcb4cebc3 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -16,15 +16,9 @@ do { \
debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
} while (0)
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define LCS_DBF_TEXT_(level,name,text...) \
do { \
- if (lcs_dbf_passes(lcs_dbf_##name, level)) { \
+ if (debug_level_enabled(lcs_dbf_##name, level)) { \
sprintf(debug_buffer, text); \
debug_text_event(lcs_dbf_##name, level, debug_buffer); \
} \
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 279ad504ec3c..9b333fcf1a4c 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -105,15 +105,9 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define IUCV_DBF_TEXT_(name, level, text...) \
do { \
- if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
+ if (debug_level_enabled(iucv_dbf_##name, level)) { \
char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
sprintf(__buf, text); \
debug_text_event(iucv_dbf_##name, level, __buf); \
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 0a328d0d11be..d7b66a28fe75 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5096,7 +5096,7 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
char dbf_txt_buf[32];
va_list args;
- if (level > id->level)
+ if (!debug_level_enabled(id, level))
return;
va_start(args, fmt);
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 3ac7a4b30dd9..0be3d48681ae 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -278,7 +278,7 @@ struct zfcp_dbf {
static inline
void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
{
- if (level <= req->adapter->dbf->hba->level)
+ if (debug_level_enabled(req->adapter->dbf->hba, level))
zfcp_dbf_hba_fsf_res(tag, req);
}
@@ -317,7 +317,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
struct zfcp_adapter *adapter = (struct zfcp_adapter *)
scmd->device->host->hostdata[0];
- if (level <= adapter->dbf->scsi->level)
+ if (debug_level_enabled(adapter->dbf->scsi, level))
zfcp_dbf_scsi(tag, scmd, req);
}
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index fd17a9b804b8..db19a38c8c69 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -1354,8 +1354,7 @@ out_error_memory:
mempool_destroy(hvc_iucv_mempool);
kmem_cache_destroy(hvc_iucv_buffer_cache);
out_error:
- if (hvc_iucv_filter)
- kfree(hvc_iucv_filter);
+ kfree(hvc_iucv_filter);
hvc_iucv_devices = 0; /* ensure that we do not provide any device */
return rc;
}