summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2015-11-09 13:29:39 -0500
committerDan Williams <dan.j.williams@intel.com>2015-11-09 13:29:39 -0500
commit85ce230051c37dfb979385eb0244bf3655625ba6 (patch)
tree90abeab25f8c1ea4836b38279790b90f0c780ae3 /arch
parent538ea4aa44737127ce2b5c8511c7349d2abdcf9c (diff)
parent209851649dc4f7900a6bfe1de5e2640ab2c7d931 (diff)
downloadlinux-stable-85ce230051c37dfb979385eb0244bf3655625ba6.tar.gz
linux-stable-85ce230051c37dfb979385eb0244bf3655625ba6.tar.bz2
linux-stable-85ce230051c37dfb979385eb0244bf3655625ba6.zip
Merge branch 'for-4.4/hotplug' into libnvdimm-for-next
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/word-at-a-time.h2
-rw-r--r--arch/arm/boot/dts/Makefile2
-rw-r--r--arch/arm/boot/dts/exynos4412.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts1
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi1
-rw-r--r--arch/arm/boot/dts/imx53-qsrb.dts2
-rw-r--r--arch/arm/boot/dts/imx53.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qdl-rex.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi1
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi2
-rw-r--r--arch/arm/mach-exynos/mcpm-exynos.c27
-rw-r--r--arch/arm/mach-exynos/regs-pmu.h6
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h9
-rw-r--r--arch/arm64/include/uapi/asm/signal.h3
-rw-r--r--arch/arm64/kernel/debug-monitors.c23
-rw-r--r--arch/arm64/kernel/insn.c6
-rw-r--r--arch/arm64/kernel/setup.c2
-rw-r--r--arch/arm64/mm/fault.c1
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/io.h1
-rw-r--r--arch/mips/include/uapi/asm/swab.h19
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig2
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/machdep.h9
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h5
-rw-r--r--arch/powerpc/mm/hash_native_64.c23
-rw-r--r--arch/powerpc/platforms/powernv/opal.c7
-rw-r--r--arch/powerpc/platforms/ps3/os-area.c5
-rw-r--r--arch/s390/boot/compressed/Makefile2
-rw-r--r--arch/s390/configs/default_defconfig2
-rw-r--r--arch/s390/configs/gcov_defconfig2
-rw-r--r--arch/s390/configs/performance_defconfig2
-rw-r--r--arch/s390/include/asm/numa.h2
-rw-r--r--arch/s390/include/asm/topology.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/entry.S30
-rw-r--r--arch/s390/kernel/vtime.c66
-rw-r--r--arch/s390/numa/mode_emu.c4
-rw-r--r--arch/s390/numa/numa.c4
-rw-r--r--arch/sh/include/asm/page.h1
-rw-r--r--arch/sparc/crypto/aes_glue.c2
-rw-r--r--arch/sparc/crypto/camellia_glue.c1
-rw-r--r--arch/sparc/crypto/des_glue.c2
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/word-at-a-time.h8
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c5
-rw-r--r--arch/x86/include/asm/kvm_host.h6
-rw-r--r--arch/x86/include/asm/xen/hypercall.h4
-rw-r--r--arch/x86/kvm/emulate.c10
-rw-r--r--arch/x86/kvm/vmx.c26
-rw-r--r--arch/x86/kvm/x86.c135
-rw-r--r--arch/x86/xen/enlighten.c24
-rw-r--r--arch/x86/xen/p2m.c19
-rw-r--r--arch/x86/xen/setup.c4
60 files changed, 346 insertions, 194 deletions
diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h
index 6b340d0f1521..902e6ab00a06 100644
--- a/arch/alpha/include/asm/word-at-a-time.h
+++ b/arch/alpha/include/asm/word-at-a-time.h
@@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits)
#endif
}
+#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1)
+
#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 233159d2eaab..bb8fa023d574 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -578,7 +578,7 @@ dtb-$(CONFIG_MACH_SUN4I) += \
sun4i-a10-hackberry.dtb \
sun4i-a10-hyundai-a7hd.dtb \
sun4i-a10-inet97fv2.dtb \
- sun4i-a10-itead-iteaduino-plus.dts \
+ sun4i-a10-itead-iteaduino-plus.dtb \
sun4i-a10-jesurun-q5.dtb \
sun4i-a10-marsboard.dtb \
sun4i-a10-mini-xplus.dtb \
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi
index ca0e3c15977f..294cfe40388d 100644
--- a/arch/arm/boot/dts/exynos4412.dtsi
+++ b/arch/arm/boot/dts/exynos4412.dtsi
@@ -98,6 +98,7 @@
opp-hz = /bits/ 64 <800000000>;
opp-microvolt = <1000000>;
clock-latency-ns = <200000>;
+ opp-suspend;
};
opp07 {
opp-hz = /bits/ 64 <900000000>;
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 15aea760c1da..c625e71217aa 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -197,6 +197,7 @@
regulator-name = "P1.8V_LDO_OUT10";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
};
ldo11_reg: LDO11 {
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index df9aee92ecf4..1b3d6c769a3c 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -1117,7 +1117,7 @@
interrupt-parent = <&combiner>;
interrupts = <3 0>;
clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>;
+ clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>;
power-domains = <&disp_pd>;
#iommu-cells = <0>;
};
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index 79ffdfe712aa..3b43e57845ae 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -472,7 +472,6 @@
*/
pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>;
pinctrl-names = "default";
- samsung,pwm-outputs = <0>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts
index 66e47de5e826..96d7eede412e 100644
--- a/arch/arm/boot/dts/imx53-qsrb.dts
+++ b/arch/arm/boot/dts/imx53-qsrb.dts
@@ -36,7 +36,7 @@
pinctrl-0 = <&pinctrl_pmic>;
reg = <0x08>;
interrupt-parent = <&gpio5>;
- interrupts = <23 0x8>;
+ interrupts = <23 IRQ_TYPE_LEVEL_HIGH>;
regulators {
sw1_reg: sw1a {
regulator-name = "SW1";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index c3e3ca9362fb..cd170376eaca 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -15,6 +15,7 @@
#include <dt-bindings/clock/imx5-clock.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
/ {
aliases {
diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
index 3373fd958e95..a50356243888 100644
--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
@@ -35,7 +35,6 @@
compatible = "regulator-fixed";
reg = <1>;
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usbh1>;
regulator-name = "usbh1_vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
@@ -47,7 +46,6 @@
compatible = "regulator-fixed";
reg = <2>;
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usbotg>;
regulator-name = "usb_otg_vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index a0b2a79cbfbd..4624d0f2a754 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -1627,6 +1627,7 @@
"mix.0", "mix.1",
"dvc.0", "dvc.1",
"clk_a", "clk_b", "clk_c", "clk_i";
+ power-domains = <&cpg_clocks>;
status = "disabled";
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 831525dd39a6..1666c8a6b143 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -1677,6 +1677,7 @@
"mix.0", "mix.1",
"dvc.0", "dvc.1",
"clk_a", "clk_b", "clk_c", "clk_i";
+ power-domains = <&cpg_clocks>;
status = "disabled";
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 2bebaa286f9a..391230c3dc93 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -107,7 +107,7 @@
720000 1200000
528000 1100000
312000 1000000
- 144000 900000
+ 144000 1000000
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index 9bdf54795f05..56978199c479 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -20,6 +20,7 @@
#include <asm/cputype.h>
#include <asm/cp15.h>
#include <asm/mcpm.h>
+#include <asm/smp_plat.h>
#include "regs-pmu.h"
#include "common.h"
@@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
cluster >= EXYNOS5420_NR_CLUSTERS)
return -EINVAL;
- exynos_cpu_power_up(cpunr);
+ if (!exynos_cpu_power_state(cpunr)) {
+ exynos_cpu_power_up(cpunr);
+
+ /*
+ * This assumes the cluster number of the big cores(Cortex A15)
+ * is 0 and the Little cores(Cortex A7) is 1.
+ * When the system was booted from the Little core,
+ * they should be reset during power up cpu.
+ */
+ if (cluster &&
+ cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) {
+ /*
+ * Before we reset the Little cores, we should wait
+ * the SPARE2 register is set to 1 because the init
+ * codes of the iROM will set the register after
+ * initialization.
+ */
+ while (!pmu_raw_readl(S5P_PMU_SPARE2))
+ udelay(10);
+
+ pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
+ EXYNOS_SWRESET);
+ }
+ }
+
return 0;
}
diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
index b7614333d296..fba9068ed260 100644
--- a/arch/arm/mach-exynos/regs-pmu.h
+++ b/arch/arm/mach-exynos/regs-pmu.h
@@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr)
#define SPREAD_ENABLE 0xF
#define SPREAD_USE_STANDWFI 0xF
+#define EXYNOS5420_KFC_CORE_RESET0 BIT(8)
+#define EXYNOS5420_KFC_ETM_RESET0 BIT(20)
+
+#define EXYNOS5420_KFC_CORE_RESET(_nr) \
+ ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
+
#define EXYNOS5420_BB_CON1 0x0784
#define EXYNOS5420_BB_SEL_EN BIT(31)
#define EXYNOS5420_BB_PMOS_EN BIT(7)
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index f9914d7c1bb0..d10b5d483022 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -42,7 +42,7 @@ endif
CHECKFLAGS += -D__aarch64__
ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
-CFLAGS_MODULE += -mcmodel=large
+KBUILD_CFLAGS_MODULE += -mcmodel=large
endif
# Default value
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 3bc498c250dc..41e58fe3c041 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 388
+#define __NR_compat_syscalls 390
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index cef934a90f17..5b925b761a2a 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -797,3 +797,12 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create)
__SYSCALL(__NR_bpf, sys_bpf)
#define __NR_execveat 387
__SYSCALL(__NR_execveat, compat_sys_execveat)
+#define __NR_userfaultfd 388
+__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
+#define __NR_membarrier 389
+__SYSCALL(__NR_membarrier, sys_membarrier)
+
+/*
+ * Please add new compat syscalls above this comment and update
+ * __NR_compat_syscalls in asm/unistd.h.
+ */
diff --git a/arch/arm64/include/uapi/asm/signal.h b/arch/arm64/include/uapi/asm/signal.h
index 8d1e7236431b..991bf5db2ca1 100644
--- a/arch/arm64/include/uapi/asm/signal.h
+++ b/arch/arm64/include/uapi/asm/signal.h
@@ -19,6 +19,9 @@
/* Required for AArch32 compatibility. */
#define SA_RESTORER 0x04000000
+#define MINSIGSTKSZ 5120
+#define SIGSTKSZ 16384
+
#include <asm-generic/signal.h>
#endif
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index cebf78661a55..253021ef2769 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook)
}
/*
- * Call registered single step handers
+ * Call registered single step handlers
* There is no Syndrome info to check for determining the handler.
* So we call all the registered handlers, until the right handler is
* found which returns zero.
@@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
* Use reader/writer locks instead of plain spinlock.
*/
static LIST_HEAD(break_hook);
-static DEFINE_RWLOCK(break_hook_lock);
+static DEFINE_SPINLOCK(break_hook_lock);
void register_break_hook(struct break_hook *hook)
{
- write_lock(&break_hook_lock);
- list_add(&hook->node, &break_hook);
- write_unlock(&break_hook_lock);
+ spin_lock(&break_hook_lock);
+ list_add_rcu(&hook->node, &break_hook);
+ spin_unlock(&break_hook_lock);
}
void unregister_break_hook(struct break_hook *hook)
{
- write_lock(&break_hook_lock);
- list_del(&hook->node);
- write_unlock(&break_hook_lock);
+ spin_lock(&break_hook_lock);
+ list_del_rcu(&hook->node);
+ spin_unlock(&break_hook_lock);
+ synchronize_rcu();
}
static int call_break_hook(struct pt_regs *regs, unsigned int esr)
@@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
struct break_hook *hook;
int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
- read_lock(&break_hook_lock);
- list_for_each_entry(hook, &break_hook, node)
+ rcu_read_lock();
+ list_for_each_entry_rcu(hook, &break_hook, node)
if ((esr & hook->esr_mask) == hook->esr_val)
fn = hook->fn;
- read_unlock(&break_hook_lock);
+ rcu_read_unlock();
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
}
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index f341866aa810..c08b9ad6f429 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn)
aarch64_insn_is_bcond(insn));
}
-static DEFINE_SPINLOCK(patch_lock);
+static DEFINE_RAW_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap)
{
@@ -131,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
unsigned long flags = 0;
int ret;
- spin_lock_irqsave(&patch_lock, flags);
+ raw_spin_lock_irqsave(&patch_lock, flags);
waddr = patch_map(addr, FIX_TEXT_POKE0);
ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
patch_unmap(FIX_TEXT_POKE0);
- spin_unlock_irqrestore(&patch_lock, flags);
+ raw_spin_unlock_irqrestore(&patch_lock, flags);
return ret;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 6bab21f84a9f..232247945b1c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -364,6 +364,8 @@ static void __init relocate_initrd(void)
to_free = ram_end - orig_start;
size = orig_end - orig_start;
+ if (!size)
+ return;
/* initrd needs to be relocated completely inside linear mapping */
new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index aba9ead1384c..9fadf6d7039b 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -287,6 +287,7 @@ retry:
* starvation.
*/
mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ mm_flags |= FAULT_FLAG_TRIED;
goto retry;
}
}
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 70e6ae1e7006..373cb23301e3 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -73,4 +73,5 @@ generic-y += uaccess.h
generic-y += ucontext.h
generic-y += unaligned.h
generic-y += vga.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 9e777cd42b67..d10fd80dbb7e 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -256,6 +256,7 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
*/
#define ioremap_nocache(offset, size) \
__ioremap_mode((offset), (size), _CACHE_UNCACHED)
+#define ioremap_uc ioremap_nocache
/*
* ioremap_cachable - map bus memory into CPU space
diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h
index c4ddc4f0d2dc..23cd9b118c9e 100644
--- a/arch/mips/include/uapi/asm/swab.h
+++ b/arch/mips/include/uapi/asm/swab.h
@@ -13,16 +13,15 @@
#define __SWAB_64_THRU_32__
-#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \
- defined(_MIPS_ARCH_LOONGSON3A)
+#if !defined(__mips16) && \
+ ((defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \
+ defined(_MIPS_ARCH_LOONGSON3A))
-static inline __attribute__((nomips16)) __attribute_const__
- __u16 __arch_swab16(__u16 x)
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
{
__asm__(
" .set push \n"
" .set arch=mips32r2 \n"
- " .set nomips16 \n"
" wsbh %0, %1 \n"
" .set pop \n"
: "=r" (x)
@@ -32,13 +31,11 @@ static inline __attribute__((nomips16)) __attribute_const__
}
#define __arch_swab16 __arch_swab16
-static inline __attribute__((nomips16)) __attribute_const__
- __u32 __arch_swab32(__u32 x)
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
__asm__(
" .set push \n"
" .set arch=mips32r2 \n"
- " .set nomips16 \n"
" wsbh %0, %1 \n"
" rotr %0, %0, 16 \n"
" .set pop \n"
@@ -54,13 +51,11 @@ static inline __attribute__((nomips16)) __attribute_const__
* 64-bit kernel on r2 CPUs.
*/
#ifdef __mips64
-static inline __attribute__((nomips16)) __attribute_const__
- __u64 __arch_swab64(__u64 x)
+static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
{
__asm__(
" .set push \n"
" .set arch=mips64r2 \n"
- " .set nomips16 \n"
" dsbh %0, %1 \n"
" dshd %0, %0 \n"
" .set pop \n"
@@ -71,5 +66,5 @@ static inline __attribute__((nomips16)) __attribute_const__
}
#define __arch_swab64 __arch_swab64
#endif /* __mips64 */
-#endif /* MIPS R2 or newer or Loongson 3A */
+#endif /* (not __mips16) and (MIPS R2 or newer or Loongson 3A) */
#endif /* _ASM_SWAB_H */
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 6bc0ee4b1070..2c041b535a64 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -111,7 +111,7 @@ CONFIG_SCSI_QLA_FC=m
CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_LPFC=m
CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_ATA=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 7991f37e5fe2..36871a4bfa54 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -114,7 +114,7 @@ CONFIG_SCSI_QLA_FC=m
CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_LPFC=m
CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_ATA=y
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index ac1662956e0c..ab9f4e0ed4cf 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -7,4 +7,3 @@ generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += rwsem.h
generic-y += vtime.h
-generic-y += word-at-a-time.h
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index cab6753f1be5..3f191f573d4f 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -61,8 +61,13 @@ struct machdep_calls {
unsigned long addr,
unsigned char *hpte_slot_array,
int psize, int ssize, int local);
- /* special for kexec, to be called in real mode, linear mapping is
- * destroyed as well */
+ /*
+ * Special for kexec.
+ * To be called in real mode with interrupts disabled. No locks are
+ * taken as such, concurrent access on pre POWER5 hardware could result
+ * in a deadlock.
+ * The linear mapping is destroyed as well.
+ */
void (*hpte_clear_all)(void);
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index 5b3a903adae6..e4396a7d0f7c 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -40,6 +40,11 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
return (val + c->high_bits) & ~rhs;
}
+static inline unsigned long zero_bytemask(unsigned long mask)
+{
+ return ~1ul << __fls(mask);
+}
+
#else
#ifdef CONFIG_64BIT
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 13befa35d8a8..c8822af10a58 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -582,13 +582,21 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
* be when they isi), and we are the only one left. We rely on our kernel
* mapping being 0xC0's and the hardware ignoring those two real bits.
*
+ * This must be called with interrupts disabled.
+ *
+ * Taking the native_tlbie_lock is unsafe here due to the possibility of
+ * lockdep being on. On pre POWER5 hardware, not taking the lock could
+ * cause deadlock. POWER5 and newer not taking the lock is fine. This only
+ * gets called during boot before secondary CPUs have come up and during
+ * crashdump and all bets are off anyway.
+ *
* TODO: add batching support when enabled. remember, no dynamic memory here,
* athough there is the control page available...
*/
static void native_hpte_clear(void)
{
unsigned long vpn = 0;
- unsigned long slot, slots, flags;
+ unsigned long slot, slots;
struct hash_pte *hptep = htab_address;
unsigned long hpte_v;
unsigned long pteg_count;
@@ -596,13 +604,6 @@ static void native_hpte_clear(void)
pteg_count = htab_hash_mask + 1;
- local_irq_save(flags);
-
- /* we take the tlbie lock and hold it. Some hardware will
- * deadlock if we try to tlbie from two processors at once.
- */
- raw_spin_lock(&native_tlbie_lock);
-
slots = pteg_count * HPTES_PER_GROUP;
for (slot = 0; slot < slots; slot++, hptep++) {
@@ -614,8 +615,8 @@ static void native_hpte_clear(void)
hpte_v = be64_to_cpu(hptep->v);
/*
- * Call __tlbie() here rather than tlbie() since we
- * already hold the native_tlbie_lock.
+ * Call __tlbie() here rather than tlbie() since we can't take the
+ * native_tlbie_lock.
*/
if (hpte_v & HPTE_V_VALID) {
hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
@@ -625,8 +626,6 @@ static void native_hpte_clear(void)
}
asm volatile("eieio; tlbsync; ptesync":::"memory");
- raw_spin_unlock(&native_tlbie_lock);
- local_irq_restore(flags);
}
/*
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 230f3a7cdea4..4296d55e88f3 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -487,9 +487,12 @@ int opal_machine_check(struct pt_regs *regs)
* PRD component would have already got notified about this
* error through other channels.
*
- * In any case, let us just fall through. We anyway heading
- * down to panic path.
+ * If hardware marked this as an unrecoverable MCE, we are
+ * going to panic anyway. Even if it didn't, it's not safe to
+ * continue at this point, so we should explicitly panic.
*/
+
+ panic("PowerNV Unrecovered Machine Check");
return 0;
}
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index 09787139834d..3db53e8aff92 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -194,11 +194,6 @@ static const struct os_area_db_id os_area_db_id_rtc_diff = {
.key = OS_AREA_DB_KEY_RTC_DIFF
};
-static const struct os_area_db_id os_area_db_id_video_mode = {
- .owner = OS_AREA_DB_OWNER_LINUX,
- .key = OS_AREA_DB_KEY_VIDEO_MODE
-};
-
#define SECONDS_FROM_1970_TO_2000 946684800LL
/**
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index d4788111c161..fac6ac9790fa 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o
KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
+KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 0c98f1508542..ed7da281df66 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -381,7 +381,7 @@ CONFIG_ISCSI_TCP=m
CONFIG_SCSI_DEBUG=m
CONFIG_ZFCP=y
CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_HP_SW=m
CONFIG_SCSI_DH_EMC=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 82083e1fbdc4..9858b14cde1e 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
CONFIG_SCSI_DEBUG=m
CONFIG_ZFCP=y
CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_HP_SW=m
CONFIG_SCSI_DH_EMC=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index c05c9e0821e3..7f14f80717d4 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
CONFIG_SCSI_DEBUG=m
CONFIG_ZFCP=y
CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_HP_SW=m
CONFIG_SCSI_DH_EMC=m
diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h
index 2a0efc63b9e5..dc19ee0c92aa 100644
--- a/arch/s390/include/asm/numa.h
+++ b/arch/s390/include/asm/numa.h
@@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn);
int __node_distance(int a, int b);
void numa_update_cpu_topology(void);
-extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
extern int numa_debug_enabled;
#else
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 27ebde643933..94fc55fc72ce 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu)
#define cpumask_of_node cpumask_of_node
static inline const struct cpumask *cpumask_of_node(int node)
{
- return node_to_cpumask_map[node];
+ return &node_to_cpumask_map[node];
}
/*
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 48c9af7a7683..3aeeb1b562c0 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -176,6 +176,7 @@ int main(void)
DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
+ DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset));
DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 09b039d7983d..582fe44ab07c 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -733,6 +733,14 @@ ENTRY(psw_idle)
stg %r3,__SF_EMPTY(%r15)
larl %r1,.Lpsw_idle_lpsw+4
stg %r1,__SF_EMPTY+8(%r15)
+#ifdef CONFIG_SMP
+ larl %r1,smp_cpu_mtid
+ llgf %r1,0(%r1)
+ ltgr %r1,%r1
+ jz .Lpsw_idle_stcctm
+ .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
+.Lpsw_idle_stcctm:
+#endif
STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
.Lpsw_idle_lpsw:
@@ -1159,7 +1167,27 @@ cleanup_critical:
jhe 1f
mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
-1: # account system time going idle
+1: # calculate idle cycles
+#ifdef CONFIG_SMP
+ clg %r9,BASED(.Lcleanup_idle_insn)
+ jl 3f
+ larl %r1,smp_cpu_mtid
+ llgf %r1,0(%r1)
+ ltgr %r1,%r1
+ jz 3f
+ .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
+ larl %r3,mt_cycles
+ ag %r3,__LC_PERCPU_OFFSET
+ la %r4,__SF_EMPTY+16(%r15)
+2: lg %r0,0(%r3)
+ slg %r0,0(%r4)
+ alg %r0,64(%r4)
+ stg %r0,0(%r3)
+ la %r3,8(%r3)
+ la %r4,8(%r4)
+ brct %r1,2b
+#endif
+3: # account system time going idle
lg %r9,__LC_STEAL_TIMER
alg %r9,__CLOCK_IDLE_ENTER(%r2)
slg %r9,__LC_LAST_UPDATE_CLOCK
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c8653435c70d..dafc44f519c3 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -25,7 +25,7 @@ static DEFINE_SPINLOCK(virt_timer_lock);
static atomic64_t virt_timer_current;
static atomic64_t virt_timer_elapsed;
-static DEFINE_PER_CPU(u64, mt_cycles[32]);
+DEFINE_PER_CPU(u64, mt_cycles[8]);
static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
@@ -60,6 +60,34 @@ static inline int virt_timer_forward(u64 elapsed)
return elapsed >= atomic64_read(&virt_timer_current);
}
+static void update_mt_scaling(void)
+{
+ u64 cycles_new[8], *cycles_old;
+ u64 delta, fac, mult, div;
+ int i;
+
+ stcctm5(smp_cpu_mtid + 1, cycles_new);
+ cycles_old = this_cpu_ptr(mt_cycles);
+ fac = 1;
+ mult = div = 0;
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ delta = cycles_new[i] - cycles_old[i];
+ div += delta;
+ mult *= i + 1;
+ mult += delta * fac;
+ fac *= i + 1;
+ }
+ div *= fac;
+ if (div > 0) {
+ /* Update scaling factor */
+ __this_cpu_write(mt_scaling_mult, mult);
+ __this_cpu_write(mt_scaling_div, div);
+ memcpy(cycles_old, cycles_new,
+ sizeof(u64) * (smp_cpu_mtid + 1));
+ }
+ __this_cpu_write(mt_scaling_jiffies, jiffies_64);
+}
+
/*
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
@@ -69,7 +97,6 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
struct thread_info *ti = task_thread_info(tsk);
u64 timer, clock, user, system, steal;
u64 user_scaled, system_scaled;
- int i;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
@@ -85,34 +112,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
- /* Do MT utilization calculation */
+ /* Update MT utilization calculation */
if (smp_cpu_mtid &&
- time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) {
- u64 cycles_new[32], *cycles_old;
- u64 delta, fac, mult, div;
-
- cycles_old = this_cpu_ptr(mt_cycles);
- if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
- fac = 1;
- mult = div = 0;
- for (i = 0; i <= smp_cpu_mtid; i++) {
- delta = cycles_new[i] - cycles_old[i];
- div += delta;
- mult *= i + 1;
- mult += delta * fac;
- fac *= i + 1;
- }
- div *= fac;
- if (div > 0) {
- /* Update scaling factor */
- __this_cpu_write(mt_scaling_mult, mult);
- __this_cpu_write(mt_scaling_div, div);
- memcpy(cycles_old, cycles_new,
- sizeof(u64) * (smp_cpu_mtid + 1));
- }
- }
- __this_cpu_write(mt_scaling_jiffies, jiffies_64);
- }
+ time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
+ update_mt_scaling();
user = S390_lowcore.user_timer - ti->user_timer;
S390_lowcore.steal_timer -= user;
@@ -181,6 +184,11 @@ void vtime_account_irq_enter(struct task_struct *tsk)
S390_lowcore.last_update_timer = get_vtimer();
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
+ /* Update MT utilization calculation */
+ if (smp_cpu_mtid &&
+ time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
+ update_mt_scaling();
+
system = S390_lowcore.system_timer - ti->system_timer;
S390_lowcore.steal_timer -= system;
ti->system_timer = S390_lowcore.system_timer;
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index 7de4e2f780d7..30b2698a28e2 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core)
cpumask_copy(&top->thread_mask, &core->mask);
cpumask_copy(&top->core_mask, &core_mc(core)->mask);
cpumask_copy(&top->book_mask, &core_book(core)->mask);
- cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]);
+ cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
top->node_id = core_node(core)->id;
}
}
@@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa)
/* Clear all node masks */
for (i = 0; i < MAX_NUMNODES; i++)
- cpumask_clear(node_to_cpumask_map[i]);
+ cpumask_clear(&node_to_cpumask_map[i]);
/* Rebuild all masks */
toptree_for_each(core, numa, CORE)
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index 09b1d2355bd9..43f32ce60aa3 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -23,7 +23,7 @@
pg_data_t *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(node_data);
-cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+cpumask_t node_to_cpumask_map[MAX_NUMNODES];
EXPORT_SYMBOL(node_to_cpumask_map);
const struct numa_mode numa_mode_plain = {
@@ -144,7 +144,7 @@ void __init numa_setup(void)
static int __init numa_init_early(void)
{
/* Attach all possible CPUs to node 0 for now. */
- cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask);
+ cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
return 0;
}
early_initcall(numa_init_early);
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index fe20d14ae051..ceb5201a30ed 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -59,6 +59,7 @@ pages_do_alias(unsigned long addr1, unsigned long addr2)
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, void *from);
+#define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE)
struct page;
struct vm_area_struct;
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index 2e48eb8813ff..c90930de76ba 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
.setkey = aes_set_key,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
@@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
.setkey = aes_set_key,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
index 6bf2479a12fb..561a84d93cf6 100644
--- a/arch/sparc/crypto/camellia_glue.c
+++ b/arch/sparc/crypto/camellia_glue.c
@@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
.setkey = camellia_set_key,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index dd6a34fa6e19..61af794aa2d3 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
.setkey = des_set_key,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
@@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
.setkey = des3_ede_set_key,
.encrypt = cbc3_encrypt,
.decrypt = cbc3_decrypt,
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 0b6cacaad933..ba35c41c71ff 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -40,5 +40,4 @@ generic-y += termbits.h
generic-y += termios.h
generic-y += trace_clock.h
generic-y += types.h
-generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/tile/include/asm/word-at-a-time.h b/arch/tile/include/asm/word-at-a-time.h
index 9e5ce0d7b292..b66a693c2c34 100644
--- a/arch/tile/include/asm/word-at-a-time.h
+++ b/arch/tile/include/asm/word-at-a-time.h
@@ -6,7 +6,7 @@
struct word_at_a_time { /* unused */ };
#define WORD_AT_A_TIME_CONSTANTS {}
-/* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */
+/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */
static inline unsigned long has_zero(unsigned long val, unsigned long *data,
const struct word_at_a_time *c)
{
@@ -33,4 +33,10 @@ static inline long find_zero(unsigned long mask)
#endif
}
+#ifdef __BIG_ENDIAN
+#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask)))
+#else
+#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1)
+#endif
+
#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 328c8352480c..96d058a87100 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1308,6 +1308,7 @@ config HIGHMEM
config X86_PAE
bool "PAE (Physical Address Extension) Support"
depends on X86_32 && !HIGHMEM4G
+ select SWIOTLB
---help---
PAE is required for NX support, and furthermore enables
larger swapspace support for non-overcommit purposes. It
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index 80a0e4389c9a..bacaa13acac5 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -554,6 +554,11 @@ static int __init camellia_aesni_init(void)
{
const char *feature_name;
+ if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
+ pr_info("AVX or AES-NI instructions are not detected.\n");
+ return -ENODEV;
+ }
+
if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2beee0382088..3a36ee704c30 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1226,10 +1226,8 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
int kvm_is_in_guest(void);
-int __x86_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
-int x86_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
+int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
+int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 83aea8055119..4c20dd333412 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -336,10 +336,10 @@ HYPERVISOR_update_descriptor(u64 ma, u64 desc)
return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
}
-static inline int
+static inline long
HYPERVISOR_memory_op(unsigned int cmd, void *arg)
{
- return _hypercall2(int, memory_op, cmd, arg);
+ return _hypercall2(long, memory_op, cmd, arg);
}
static inline int
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index b372a7557c16..9da95b9daf8d 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2418,7 +2418,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
u64 val, cr0, cr4;
u32 base3;
u16 selector;
- int i;
+ int i, r;
for (i = 0; i < 16; i++)
*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
@@ -2460,13 +2460,17 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
ctxt->ops->set_gdt(ctxt, &dt);
+ r = rsm_enter_protected_mode(ctxt, cr0, cr4);
+ if (r != X86EMUL_CONTINUE)
+ return r;
+
for (i = 0; i < 6; i++) {
- int r = rsm_load_seg_64(ctxt, smbase, i);
+ r = rsm_load_seg_64(ctxt, smbase, i);
if (r != X86EMUL_CONTINUE)
return r;
}
- return rsm_enter_protected_mode(ctxt, cr0, cr4);
+ return X86EMUL_CONTINUE;
}
static int em_rsm(struct x86_emulate_ctxt *ctxt)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 06ef4908ba61..6a8bc64566ab 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4105,17 +4105,13 @@ static void seg_setup(int seg)
static int alloc_apic_access_page(struct kvm *kvm)
{
struct page *page;
- struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
mutex_lock(&kvm->slots_lock);
if (kvm->arch.apic_access_page_done)
goto out;
- kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
- kvm_userspace_mem.flags = 0;
- kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
- kvm_userspace_mem.memory_size = PAGE_SIZE;
- r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
+ r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
+ APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
if (r)
goto out;
@@ -4140,17 +4136,12 @@ static int alloc_identity_pagetable(struct kvm *kvm)
{
/* Called with kvm->slots_lock held. */
- struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
BUG_ON(kvm->arch.ept_identity_pagetable_done);
- kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
- kvm_userspace_mem.flags = 0;
- kvm_userspace_mem.guest_phys_addr =
- kvm->arch.ept_identity_map_addr;
- kvm_userspace_mem.memory_size = PAGE_SIZE;
- r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
+ r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
+ kvm->arch.ept_identity_map_addr, PAGE_SIZE);
return r;
}
@@ -4949,14 +4940,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
{
int ret;
- struct kvm_userspace_memory_region tss_mem = {
- .slot = TSS_PRIVATE_MEMSLOT,
- .guest_phys_addr = addr,
- .memory_size = PAGE_SIZE * 3,
- .flags = 0,
- };
- ret = x86_set_memory_region(kvm, &tss_mem);
+ ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
+ PAGE_SIZE * 3);
if (ret)
return ret;
kvm->arch.tss_addr = addr;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 92511d4b7236..9a9a19830321 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6453,6 +6453,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
return 1;
}
+static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+ !vcpu->arch.apf.halted);
+}
+
static int vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
@@ -6461,8 +6467,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
for (;;) {
- if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
- !vcpu->arch.apf.halted)
+ if (kvm_vcpu_running(vcpu))
r = vcpu_enter_guest(vcpu);
else
r = vcpu_block(kvm, vcpu);
@@ -7474,34 +7479,66 @@ void kvm_arch_sync_events(struct kvm *kvm)
kvm_free_pit(kvm);
}
-int __x86_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem)
+int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
{
int i, r;
+ unsigned long hva;
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ struct kvm_memory_slot *slot, old;
/* Called with kvm->slots_lock held. */
- BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM);
+ if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
+ return -EINVAL;
+
+ slot = id_to_memslot(slots, id);
+ if (size) {
+ if (WARN_ON(slot->npages))
+ return -EEXIST;
+
+ /*
+ * MAP_SHARED to prevent internal slot pages from being moved
+ * by fork()/COW.
+ */
+ hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, 0);
+ if (IS_ERR((void *)hva))
+ return PTR_ERR((void *)hva);
+ } else {
+ if (!slot->npages)
+ return 0;
+ hva = 0;
+ }
+
+ old = *slot;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
- struct kvm_userspace_memory_region m = *mem;
+ struct kvm_userspace_memory_region m;
- m.slot |= i << 16;
+ m.slot = id | (i << 16);
+ m.flags = 0;
+ m.guest_phys_addr = gpa;
+ m.userspace_addr = hva;
+ m.memory_size = size;
r = __kvm_set_memory_region(kvm, &m);
if (r < 0)
return r;
}
+ if (!size) {
+ r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
+ WARN_ON(r < 0);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(__x86_set_memory_region);
-int x86_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem)
+int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
{
int r;
mutex_lock(&kvm->slots_lock);
- r = __x86_set_memory_region(kvm, mem);
+ r = __x86_set_memory_region(kvm, id, gpa, size);
mutex_unlock(&kvm->slots_lock);
return r;
@@ -7516,16 +7553,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
* unless the the memory map has changed due to process exit
* or fd copying.
*/
- struct kvm_userspace_memory_region mem;
- memset(&mem, 0, sizeof(mem));
- mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
- x86_set_memory_region(kvm, &mem);
-
- mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
- x86_set_memory_region(kvm, &mem);
-
- mem.slot = TSS_PRIVATE_MEMSLOT;
- x86_set_memory_region(kvm, &mem);
+ x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
+ x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
+ x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
}
kvm_iommu_unmap_guest(kvm);
kfree(kvm->arch.vpic);
@@ -7628,27 +7658,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
- /*
- * Only private memory slots need to be mapped here since
- * KVM_SET_MEMORY_REGION ioctl is no longer supported.
- */
- if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
- unsigned long userspace_addr;
-
- /*
- * MAP_SHARED to prevent internal slot pages from being moved
- * by fork()/COW.
- */
- userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, 0);
-
- if (IS_ERR((void *)userspace_addr))
- return PTR_ERR((void *)userspace_addr);
-
- memslot->userspace_addr = userspace_addr;
- }
-
return 0;
}
@@ -7710,17 +7719,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
{
int nr_mmu_pages = 0;
- if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) {
- int ret;
-
- ret = vm_munmap(old->userspace_addr,
- old->npages * PAGE_SIZE);
- if (ret < 0)
- printk(KERN_WARNING
- "kvm_vm_ioctl_set_memory_region: "
- "failed to munmap memory\n");
- }
-
if (!kvm->arch.n_requested_mmu_pages)
nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
@@ -7769,19 +7767,36 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
kvm_mmu_invalidate_zap_all_pages(kvm);
}
+static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+{
+ if (!list_empty_careful(&vcpu->async_pf.done))
+ return true;
+
+ if (kvm_apic_has_events(vcpu))
+ return true;
+
+ if (vcpu->arch.pv.pv_unhalted)
+ return true;
+
+ if (atomic_read(&vcpu->arch.nmi_queued))
+ return true;
+
+ if (test_bit(KVM_REQ_SMI, &vcpu->requests))
+ return true;
+
+ if (kvm_arch_interrupt_allowed(vcpu) &&
+ kvm_cpu_has_interrupt(vcpu))
+ return true;
+
+ return false;
+}
+
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
kvm_x86_ops->check_nested_events(vcpu, false);
- return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
- !vcpu->arch.apf.halted)
- || !list_empty_careful(&vcpu->async_pf.done)
- || kvm_apic_has_events(vcpu)
- || vcpu->arch.pv.pv_unhalted
- || atomic_read(&vcpu->arch.nmi_queued) ||
- (kvm_arch_interrupt_allowed(vcpu) &&
- kvm_cpu_has_interrupt(vcpu));
+ return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 30d12afe52ed..993b7a71386d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -33,6 +33,10 @@
#include <linux/memblock.h>
#include <linux/edd.h>
+#ifdef CONFIG_KEXEC_CORE
+#include <linux/kexec.h>
+#endif
+
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/xen.h>
@@ -1077,6 +1081,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
/* Fast syscall setup is all done in hypercalls, so
these are all ignored. Stub them out here to stop
Xen console noise. */
+ break;
default:
if (!pmu_msr_write(msr, low, high, &ret))
@@ -1807,6 +1812,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
.notifier_call = xen_hvm_cpu_notify,
};
+#ifdef CONFIG_KEXEC_CORE
+static void xen_hvm_shutdown(void)
+{
+ native_machine_shutdown();
+ if (kexec_in_progress)
+ xen_reboot(SHUTDOWN_soft_reset);
+}
+
+static void xen_hvm_crash_shutdown(struct pt_regs *regs)
+{
+ native_machine_crash_shutdown(regs);
+ xen_reboot(SHUTDOWN_soft_reset);
+}
+#endif
+
static void __init xen_hvm_guest_init(void)
{
if (xen_pv_domain())
@@ -1826,6 +1846,10 @@ static void __init xen_hvm_guest_init(void)
x86_init.irqs.intr_init = xen_init_IRQ;
xen_hvm_init_time_ops();
xen_hvm_init_mmu_ops();
+#ifdef CONFIG_KEXEC_CORE
+ machine_ops.shutdown = xen_hvm_shutdown;
+ machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
+#endif
}
#endif
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index bfc08b13044b..660b3cfef234 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -112,6 +112,15 @@ static unsigned long *p2m_identity;
static pte_t *p2m_missing_pte;
static pte_t *p2m_identity_pte;
+/*
+ * Hint at last populated PFN.
+ *
+ * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack
+ * can avoid scanning the whole P2M (which may be sized to account for
+ * hotplugged memory).
+ */
+static unsigned long xen_p2m_last_pfn;
+
static inline unsigned p2m_top_index(unsigned long pfn)
{
BUG_ON(pfn >= MAX_P2M_PFN);
@@ -270,7 +279,7 @@ void xen_setup_mfn_list_list(void)
else
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
virt_to_mfn(p2m_top_mfn);
- HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
+ HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
HYPERVISOR_shared_info->arch.p2m_generation = 0;
HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
HYPERVISOR_shared_info->arch.p2m_cr3 =
@@ -406,6 +415,8 @@ void __init xen_vmalloc_p2m_tree(void)
static struct vm_struct vm;
unsigned long p2m_limit;
+ xen_p2m_last_pfn = xen_max_p2m_pfn;
+
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
vm.flags = VM_ALLOC;
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
@@ -608,6 +619,12 @@ static bool alloc_p2m(unsigned long pfn)
free_p2m_page(p2m);
}
+ /* Expanded the p2m? */
+ if (pfn > xen_p2m_last_pfn) {
+ xen_p2m_last_pfn = pfn;
+ HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
+ }
+
return true;
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index f5ef6746d47a..1c30e4ab1022 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -548,7 +548,7 @@ static unsigned long __init xen_get_max_pages(void)
{
unsigned long max_pages, limit;
domid_t domid = DOMID_SELF;
- int ret;
+ long ret;
limit = xen_get_pages_limit();
max_pages = limit;
@@ -798,7 +798,7 @@ char * __init xen_memory_setup(void)
xen_ignore_unusable();
/* Make sure the Xen-supplied memory map is well-ordered. */
- sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
+ sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
&xen_e820_map_entries);
max_pages = xen_get_max_pages();