summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-11-10 10:00:18 +0900
committerDavid S. Miller <davem@davemloft.net>2017-11-10 10:00:18 +0900
commit4dc6758d7824a6d25717ccceefc488cafdb07210 (patch)
tree992e5d5996910af35a5c12fe94da14d0bb167452 /arch
parent19aeeb9f46cb4b9474ebeb50cb01b9a1adba73b8 (diff)
parent3fefc31843cfe2b5f072efe11ed9ccaf6a7a5092 (diff)
downloadlinux-4dc6758d7824a6d25717ccceefc488cafdb07210.tar.gz
linux-4dc6758d7824a6d25717ccceefc488cafdb07210.tar.bz2
linux-4dc6758d7824a6d25717ccceefc488cafdb07210.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Simple cases of overlapping changes in the packet scheduler. Must easier to resolve this time. Which probably means that I screwed it up somehow. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.S9
-rw-r--r--arch/arm/boot/dts/armada-375.dtsi4
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi4
-rw-r--r--arch/arm/boot/dts/armada-39x.dtsi4
-rw-r--r--arch/arm/boot/dts/uniphier-ld4.dtsi9
-rw-r--r--arch/arm/boot/dts/uniphier-pro4.dtsi6
-rw-r--r--arch/arm/boot/dts/uniphier-sld8.dtsi9
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/unaligned.h27
-rw-r--r--arch/arm/kvm/emulate.c6
-rw-r--r--arch/arm/kvm/hyp/Makefile2
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi9
-rw-r--r--arch/arm64/kvm/hyp/Makefile2
-rw-r--r--arch/arm64/kvm/inject_fault.c16
-rw-r--r--arch/ia64/include/asm/acpi.h2
-rw-r--r--arch/mips/generic/board-ni169445.its.S2
-rw-r--r--arch/mips/generic/init.c2
-rw-r--r--arch/mips/generic/kexec.c2
-rw-r--r--arch/mips/include/asm/mips-cm.h4
-rw-r--r--arch/mips/include/asm/stackframe.h8
-rw-r--r--arch/mips/kernel/probes-common.h2
-rw-r--r--arch/mips/kernel/smp-cmp.c6
-rw-r--r--arch/mips/kernel/smp-cps.c2
-rw-r--r--arch/mips/kernel/smp.c24
-rw-r--r--arch/mips/mm/uasm-micromips.c2
-rw-r--r--arch/mips/net/ebpf_jit.c2
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S12
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S12
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/kernel/cpu/mcheck/dev-mcelog.c121
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/module.c13
-rw-r--r--arch/x86/kvm/lapic.c5
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/mm/mem_encrypt.c2
-rw-r--r--arch/x86/mm/tlb.c17
37 files changed, 195 insertions, 162 deletions
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 817e5cfef83a..36ae4454554c 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -44,10 +44,12 @@ endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
KBUILD_CPPFLAGS += -mbig-endian
+CHECKFLAGS += -D__ARMEB__
AS += -EB
LD += -EB
else
KBUILD_CPPFLAGS += -mlittle-endian
+CHECKFLAGS += -D__ARMEL__
AS += -EL
LD += -EL
endif
diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S
index 7a4c59154361..7d06aa19c3e6 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.S
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -85,6 +85,15 @@ SECTIONS
_edata = .;
+ /*
+ * The image_end section appears after any additional loadable sections
+ * that the linker may decide to insert in the binary image. Having
+ * this symbol allows further debug in the near future.
+ */
+ .image_end (NOLOAD) : {
+ _edata_real = .;
+ }
+
_magic_sig = ZIMAGE_MAGIC(0x016f2818);
_magic_start = ZIMAGE_MAGIC(_start);
_magic_end = ZIMAGE_MAGIC(_edata);
diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
index 7225c7ce9a8d..2cb1bcd30976 100644
--- a/arch/arm/boot/dts/armada-375.dtsi
+++ b/arch/arm/boot/dts/armada-375.dtsi
@@ -178,9 +178,9 @@
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
- arm,double-linefill-incr = <1>;
+ arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
- arm,double-linefill = <1>;
+ arm,double-linefill = <0>;
prefetch-data = <1>;
};
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 4960722aab32..00ff549d4e39 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -143,9 +143,9 @@
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
- arm,double-linefill-incr = <1>;
+ arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
- arm,double-linefill = <1>;
+ arm,double-linefill = <0>;
prefetch-data = <1>;
};
diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
index ea657071e278..5218bd2a248d 100644
--- a/arch/arm/boot/dts/armada-39x.dtsi
+++ b/arch/arm/boot/dts/armada-39x.dtsi
@@ -111,9 +111,9 @@
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
- arm,double-linefill-incr = <1>;
+ arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
- arm,double-linefill = <1>;
+ arm,double-linefill = <0>;
prefetch-data = <1>;
};
diff --git a/arch/arm/boot/dts/uniphier-ld4.dtsi b/arch/arm/boot/dts/uniphier-ld4.dtsi
index 79183db5b386..93586faf950f 100644
--- a/arch/arm/boot/dts/uniphier-ld4.dtsi
+++ b/arch/arm/boot/dts/uniphier-ld4.dtsi
@@ -209,7 +209,8 @@
interrupts = <0 80 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>;
- clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+ clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 8>,
+ <&mio_clk 12>;
resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
<&mio_rst 12>;
};
@@ -221,7 +222,8 @@
interrupts = <0 81 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>;
- clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+ clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 9>,
+ <&mio_clk 13>;
resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
<&mio_rst 13>;
};
@@ -233,7 +235,8 @@
interrupts = <0 82 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb2>;
- clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
+ clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 10>,
+ <&mio_clk 14>;
resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>,
<&mio_rst 14>;
};
diff --git a/arch/arm/boot/dts/uniphier-pro4.dtsi b/arch/arm/boot/dts/uniphier-pro4.dtsi
index b3dbbd9b6e39..2a9bd7f9f5db 100644
--- a/arch/arm/boot/dts/uniphier-pro4.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro4.dtsi
@@ -241,7 +241,8 @@
interrupts = <0 80 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb2>;
- clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+ clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 8>,
+ <&mio_clk 12>;
resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
<&mio_rst 12>;
};
@@ -253,7 +254,8 @@
interrupts = <0 81 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb3>;
- clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+ clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 9>,
+ <&mio_clk 13>;
resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
<&mio_rst 13>;
};
diff --git a/arch/arm/boot/dts/uniphier-sld8.dtsi b/arch/arm/boot/dts/uniphier-sld8.dtsi
index b08390332971..ebd0c3f63e7f 100644
--- a/arch/arm/boot/dts/uniphier-sld8.dtsi
+++ b/arch/arm/boot/dts/uniphier-sld8.dtsi
@@ -209,7 +209,8 @@
interrupts = <0 80 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>;
- clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+ clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 8>,
+ <&mio_clk 12>;
resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
<&mio_rst 12>;
};
@@ -221,7 +222,8 @@
interrupts = <0 81 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>;
- clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+ clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 9>,
+ <&mio_clk 13>;
resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
<&mio_rst 13>;
};
@@ -233,7 +235,8 @@
interrupts = <0 82 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb2>;
- clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
+ clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 10>,
+ <&mio_clk 14>;
resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>,
<&mio_rst 14>;
};
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 721ab5ecfb9b..0f2c8a2a8131 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -20,7 +20,6 @@ generic-y += simd.h
generic-y += sizes.h
generic-y += timex.h
generic-y += trace_clock.h
-generic-y += unaligned.h
generated-y += mach-types.h
generated-y += unistd-nr.h
diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
new file mode 100644
index 000000000000..ab905ffcf193
--- /dev/null
+++ b/arch/arm/include/asm/unaligned.h
@@ -0,0 +1,27 @@
+#ifndef __ASM_ARM_UNALIGNED_H
+#define __ASM_ARM_UNALIGNED_H
+
+/*
+ * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
+ * but we don't want to use linux/unaligned/access_ok.h since that can lead
+ * to traps on unaligned stm/ldm or strd/ldrd.
+ */
+#include <asm/byteorder.h>
+
+#if defined(__LITTLE_ENDIAN)
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#elif defined(__BIG_ENDIAN)
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#else
+# error need to define endianess
+#endif
+
+#endif /* __ASM_ARM_UNALIGNED_H */
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index 0064b86a2c87..30a13647c54c 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -227,7 +227,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
u32 return_offset = (is_thumb) ? 2 : 4;
kvm_update_psr(vcpu, UND_MODE);
- *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
+ *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
/* Branch to exception vector */
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
@@ -239,10 +239,8 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
*/
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
{
- unsigned long cpsr = *vcpu_cpsr(vcpu);
- bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset;
- u32 return_offset = (is_thumb) ? 4 : 0;
+ u32 return_offset = (is_pabt) ? 4 : 8;
bool is_lpae;
kvm_update_psr(vcpu, ABT_MODE);
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
index 5fca24d52fe6..5638ce0c9524 100644
--- a/arch/arm/kvm/hyp/Makefile
+++ b/arch/arm/kvm/hyp/Makefile
@@ -3,7 +3,7 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
-ccflags-y += -fno-stack-protector
+ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
index ee4aff53a5f5..09c429cb6d61 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
@@ -299,7 +299,8 @@
interrupts = <0 243 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>;
- clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+ clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 8>,
+ <&mio_clk 12>;
resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
<&mio_rst 12>;
};
@@ -311,7 +312,8 @@
interrupts = <0 244 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>;
- clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+ clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 9>,
+ <&mio_clk 13>;
resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
<&mio_rst 13>;
};
@@ -323,7 +325,8 @@
interrupts = <0 245 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb2>;
- clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
+ clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 10>,
+ <&mio_clk 14>;
resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>,
<&mio_rst 14>;
};
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 7c54d8fde855..f04400d494b7 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -3,7 +3,7 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
-ccflags-y += -fno-stack-protector
+ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index da6a8cfa54a0..3556715a774e 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -33,12 +33,26 @@
#define LOWER_EL_AArch64_VECTOR 0x400
#define LOWER_EL_AArch32_VECTOR 0x600
+/*
+ * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
+ */
+static const u8 return_offsets[8][2] = {
+ [0] = { 0, 0 }, /* Reset, unused */
+ [1] = { 4, 2 }, /* Undefined */
+ [2] = { 0, 0 }, /* SVC, unused */
+ [3] = { 4, 4 }, /* Prefetch abort */
+ [4] = { 8, 8 }, /* Data abort */
+ [5] = { 0, 0 }, /* HVC, unused */
+ [6] = { 4, 4 }, /* IRQ, unused */
+ [7] = { 4, 4 }, /* FIQ, unused */
+};
+
static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
{
unsigned long cpsr;
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
- u32 return_offset = (is_thumb) ? 4 : 0;
+ u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
cpsr = mode | COMPAT_PSR_I_BIT;
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index c86a947f5368..a3d0211970e9 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -112,6 +112,8 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
}
+#define acpi_unlazy_tlb(x)
+
#ifdef CONFIG_ACPI_NUMA
extern cpumask_t early_cpu_possible_map;
#define for_each_possible_early_cpu(cpu) \
diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S
index d12e12fe90be..e4cb4f95a8cc 100644
--- a/arch/mips/generic/board-ni169445.its.S
+++ b/arch/mips/generic/board-ni169445.its.S
@@ -1,4 +1,4 @@
-{
+/ {
images {
fdt@ni169445 {
description = "NI 169445 device tree";
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index cf409ba358a1..5ba6fcc26fa7 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -20,7 +20,7 @@
#include <asm/fw/fw.h>
#include <asm/irq_cpu.h>
#include <asm/machine.h>
-#include <asm/mips-cpc.h>
+#include <asm/mips-cps.h>
#include <asm/prom.h>
#include <asm/smp-ops.h>
#include <asm/time.h>
diff --git a/arch/mips/generic/kexec.c b/arch/mips/generic/kexec.c
index e9fb735299e3..1ca409f58929 100644
--- a/arch/mips/generic/kexec.c
+++ b/arch/mips/generic/kexec.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2016 Imagination Technologies
- * Author: Marcin Nowakowski <marcin.nowakowski@imgtec.com>
+ * Author: Marcin Nowakowski <marcin.nowakowski@mips.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index 3708b8ccc0b4..8bc5df49b0e1 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -142,8 +142,8 @@ GCR_ACCESSOR_RO(64, 0x000, config)
GCR_ACCESSOR_RW(64, 0x008, base)
#define CM_GCR_BASE_GCRBASE GENMASK_ULL(47, 15)
#define CM_GCR_BASE_CMDEFTGT GENMASK(1, 0)
-#define CM_GCR_BASE_CMDEFTGT_DISABLED 0
-#define CM_GCR_BASE_CMDEFTGT_MEM 1
+#define CM_GCR_BASE_CMDEFTGT_MEM 0
+#define CM_GCR_BASE_CMDEFTGT_RESERVED 1
#define CM_GCR_BASE_CMDEFTGT_IOCU0 2
#define CM_GCR_BASE_CMDEFTGT_IOCU1 3
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 5d3563c55e0c..2161357cc68f 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -199,6 +199,10 @@
sll k0, 3 /* extract cu0 bit */
.set noreorder
bltz k0, 8f
+ move k0, sp
+ .if \docfi
+ .cfi_register sp, k0
+ .endif
#ifdef CONFIG_EVA
/*
* Flush interAptiv's Return Prediction Stack (RPS) by writing
@@ -225,10 +229,6 @@
MTC0 k0, CP0_ENTRYHI
#endif
.set reorder
- move k0, sp
- .if \docfi
- .cfi_register sp, k0
- .endif
/* Called from user mode, new stack. */
get_saved_sp docfi=\docfi tosp=1
8:
diff --git a/arch/mips/kernel/probes-common.h b/arch/mips/kernel/probes-common.h
index dd08e41134b6..d2bf77b18822 100644
--- a/arch/mips/kernel/probes-common.h
+++ b/arch/mips/kernel/probes-common.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2016 Imagination Technologies
- * Author: Marcin Nowakowski <marcin.nowakowski@imgtec.com>
+ * Author: Marcin Nowakowski <marcin.nowakowski@mips.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 05295a4909f1..a2322009cac3 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -19,7 +19,7 @@
#undef DEBUG
#include <linux/kernel.h>
-#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
@@ -50,8 +50,8 @@ static void cmp_init_secondary(void)
#ifdef CONFIG_MIPS_MT_SMP
if (cpu_has_mipsmt)
- c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
- TCBIND_CURVPE;
+ cpu_set_vpe_id(c, (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
+ TCBIND_CURVPE);
#endif
}
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 7d6af41888e8..ecc1a853f48d 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -306,7 +306,7 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
int err;
/* We don't yet support booting CPUs in other clusters */
- if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&current_cpu_data))
+ if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
return -ENOSYS;
vpe_cfg->pc = (unsigned long)&smp_bootstrap;
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index bbe19b64def5..88be966d3e61 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -42,7 +42,7 @@
#include <asm/processor.h>
#include <asm/idle.h>
#include <asm/r4k-timer.h>
-#include <asm/mips-cpc.h>
+#include <asm/mips-cps.h>
#include <asm/mmu_context.h>
#include <asm/time.h>
#include <asm/setup.h>
@@ -66,6 +66,7 @@ EXPORT_SYMBOL(cpu_sibling_map);
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
+static DECLARE_COMPLETION(cpu_starting);
static DECLARE_COMPLETION(cpu_running);
/*
@@ -374,6 +375,12 @@ asmlinkage void start_secondary(void)
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
+ /* Notify boot CPU that we're starting & ready to sync counters */
+ complete(&cpu_starting);
+
+ synchronise_count_slave(cpu);
+
+ /* The CPU is running and counters synchronised, now mark it online */
set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu);
@@ -381,8 +388,11 @@ asmlinkage void start_secondary(void)
calculate_cpu_foreign_map();
+ /*
+ * Notify boot CPU that we're up & online and it can safely return
+ * from __cpu_up
+ */
complete(&cpu_running);
- synchronise_count_slave(cpu);
/*
* irq will be enabled in ->smp_finish(), enabling it too early
@@ -445,17 +455,17 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
if (err)
return err;
- /*
- * We must check for timeout here, as the CPU will not be marked
- * online until the counters are synchronised.
- */
- if (!wait_for_completion_timeout(&cpu_running,
+ /* Wait for CPU to start and be ready to sync counters */
+ if (!wait_for_completion_timeout(&cpu_starting,
msecs_to_jiffies(1000))) {
pr_crit("CPU%u: failed to start\n", cpu);
return -EIO;
}
synchronise_count_master(cpu);
+
+ /* Wait for CPU to finish startup & mark itself online before return */
+ wait_for_completion(&cpu_running);
return 0;
}
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index c28ff53c8da0..cdb5a191b9d5 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -80,7 +80,7 @@ static const struct insn const insn_table_MM[insn_invalid] = {
[insn_jr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS},
[insn_lb] = {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
[insn_ld] = {0, 0},
- [insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM},
+ [insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
[insn_ll] = {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM},
[insn_lld] = {0, 0},
[insn_lui] = {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM},
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 01b7a87ea678..962b0259b4b6 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -1513,7 +1513,7 @@ ld_skb_common:
}
src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
if (src < 0)
- return dst;
+ return src;
if (BPF_MODE(insn->code) == BPF_XADD) {
switch (BPF_SIZE(insn->code)) {
case BPF_W:
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
index 93b945597ecf..7cfba738f104 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
@@ -157,8 +157,8 @@ LABEL skip_ %I
.endr
# Find min length
- vmovdqa _lens+0*16(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens+0*16(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@@ -178,8 +178,8 @@ LABEL skip_ %I
vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1
- vmovdqa %xmm0, _lens+0*16(state)
- vmovdqa %xmm1, _lens+1*16(state)
+ vmovdqu %xmm0, _lens+0*16(state)
+ vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1
# len is arg2
@@ -235,8 +235,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2)
jc .return_null
# Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
index 8fe6338bcc84..16c4ccb1f154 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -155,8 +155,8 @@ LABEL skip_ %I
.endr
# Find min length
- vmovdqa _lens+0*16(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens+0*16(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@@ -176,8 +176,8 @@ LABEL skip_ %I
vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1
- vmovdqa %xmm0, _lens+0*16(state)
- vmovdqa %xmm1, _lens+1*16(state)
+ vmovdqu %xmm0, _lens+0*16(state)
+ vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1
# len is arg2
@@ -234,8 +234,8 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
jc .return_null
# Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 72d867f6b518..8d0ec9df1cbe 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -150,6 +150,8 @@ static inline void disable_acpi(void) { }
extern int x86_acpi_numa_init(void);
#endif /* CONFIG_ACPI_NUMA */
+#define acpi_unlazy_tlb(x) leave_mm(x)
+
#ifdef CONFIG_ACPI_APEI
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
{
diff --git a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
index 10cec43aac38..7f85b76f43bc 100644
--- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
@@ -24,14 +24,6 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
static char mce_helper[128];
static char *mce_helper_argv[2] = { mce_helper, NULL };
-#define mce_log_get_idx_check(p) \
-({ \
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
- !lockdep_is_held(&mce_chrdev_read_mutex), \
- "suspicious mce_log_get_idx_check() usage"); \
- smp_load_acquire(&(p)); \
-})
-
/*
* Lockless MCE logging infrastructure.
* This avoids deadlocks on printk locks without having to break locks. Also
@@ -53,43 +45,32 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val,
void *data)
{
struct mce *mce = (struct mce *)data;
- unsigned int next, entry;
-
- wmb();
- for (;;) {
- entry = mce_log_get_idx_check(mcelog.next);
- for (;;) {
-
- /*
- * When the buffer fills up discard new entries.
- * Assume that the earlier errors are the more
- * interesting ones:
- */
- if (entry >= MCE_LOG_LEN) {
- set_bit(MCE_OVERFLOW,
- (unsigned long *)&mcelog.flags);
- return NOTIFY_OK;
- }
- /* Old left over entry. Skip: */
- if (mcelog.entry[entry].finished) {
- entry++;
- continue;
- }
- break;
- }
- smp_rmb();
- next = entry + 1;
- if (cmpxchg(&mcelog.next, entry, next) == entry)
- break;
+ unsigned int entry;
+
+ mutex_lock(&mce_chrdev_read_mutex);
+
+ entry = mcelog.next;
+
+ /*
+ * When the buffer fills up discard new entries. Assume that the
+ * earlier errors are the more interesting ones:
+ */
+ if (entry >= MCE_LOG_LEN) {
+ set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
+ goto unlock;
}
+
+ mcelog.next = entry + 1;
+
memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
- wmb();
mcelog.entry[entry].finished = 1;
- wmb();
/* wake processes polling /dev/mcelog */
wake_up_interruptible(&mce_chrdev_wait);
+unlock:
+ mutex_unlock(&mce_chrdev_read_mutex);
+
return NOTIFY_OK;
}
@@ -177,13 +158,6 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
return 0;
}
-static void collect_tscs(void *data)
-{
- unsigned long *cpu_tsc = (unsigned long *)data;
-
- cpu_tsc[smp_processor_id()] = rdtsc();
-}
-
static int mce_apei_read_done;
/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
@@ -231,14 +205,9 @@ static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
size_t usize, loff_t *off)
{
char __user *buf = ubuf;
- unsigned long *cpu_tsc;
- unsigned prev, next;
+ unsigned next;
int i, err;
- cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
- if (!cpu_tsc)
- return -ENOMEM;
-
mutex_lock(&mce_chrdev_read_mutex);
if (!mce_apei_read_done) {
@@ -247,65 +216,29 @@ static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
goto out;
}
- next = mce_log_get_idx_check(mcelog.next);
-
/* Only supports full reads right now */
err = -EINVAL;
if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
goto out;
+ next = mcelog.next;
err = 0;
- prev = 0;
- do {
- for (i = prev; i < next; i++) {
- unsigned long start = jiffies;
- struct mce *m = &mcelog.entry[i];
-
- while (!m->finished) {
- if (time_after_eq(jiffies, start + 2)) {
- memset(m, 0, sizeof(*m));
- goto timeout;
- }
- cpu_relax();
- }
- smp_rmb();
- err |= copy_to_user(buf, m, sizeof(*m));
- buf += sizeof(*m);
-timeout:
- ;
- }
-
- memset(mcelog.entry + prev, 0,
- (next - prev) * sizeof(struct mce));
- prev = next;
- next = cmpxchg(&mcelog.next, prev, 0);
- } while (next != prev);
-
- synchronize_sched();
- /*
- * Collect entries that were still getting written before the
- * synchronize.
- */
- on_each_cpu(collect_tscs, cpu_tsc, 1);
-
- for (i = next; i < MCE_LOG_LEN; i++) {
+ for (i = 0; i < next; i++) {
struct mce *m = &mcelog.entry[i];
- if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
- err |= copy_to_user(buf, m, sizeof(*m));
- smp_rmb();
- buf += sizeof(*m);
- memset(m, 0, sizeof(*m));
- }
+ err |= copy_to_user(buf, m, sizeof(*m));
+ buf += sizeof(*m);
}
+ memset(mcelog.entry, 0, next * sizeof(struct mce));
+ mcelog.next = 0;
+
if (err)
err = -EFAULT;
out:
mutex_unlock(&mce_chrdev_read_mutex);
- kfree(cpu_tsc);
return err ? err : buf - ubuf;
}
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index d88967659098..5b609e28ce3f 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -79,7 +79,7 @@ static void kvm_get_wallclock(struct timespec *now)
static int kvm_set_wallclock(const struct timespec *now)
{
- return -1;
+ return -ENODEV;
}
static u64 kvm_clock_read(void)
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 62e7d70aadd5..da0c160e5589 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -172,19 +172,27 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_X86_64_NONE:
break;
case R_X86_64_64:
+ if (*(u64 *)loc != 0)
+ goto invalid_relocation;
*(u64 *)loc = val;
break;
case R_X86_64_32:
+ if (*(u32 *)loc != 0)
+ goto invalid_relocation;
*(u32 *)loc = val;
if (val != *(u32 *)loc)
goto overflow;
break;
case R_X86_64_32S:
+ if (*(s32 *)loc != 0)
+ goto invalid_relocation;
*(s32 *)loc = val;
if ((s64)val != *(s32 *)loc)
goto overflow;
break;
case R_X86_64_PC32:
+ if (*(u32 *)loc != 0)
+ goto invalid_relocation;
val -= (u64)loc;
*(u32 *)loc = val;
#if 0
@@ -200,6 +208,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
}
return 0;
+invalid_relocation:
+ pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
+ (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
+ return -ENOEXEC;
+
overflow:
pr_err("overflow in relocation type %d val %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info), val);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 69c5612be786..36c90d631096 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1992,6 +1992,11 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
vcpu->arch.pv_eoi.msr_val = 0;
apic_update_ppr(apic);
+ if (vcpu->arch.apicv_active) {
+ kvm_x86_ops->apicv_post_state_restore(vcpu);
+ kvm_x86_ops->hwapic_irr_update(vcpu, -1);
+ kvm_x86_ops->hwapic_isr_update(vcpu, -1);
+ }
vcpu->arch.apic_arb_prio = 0;
vcpu->arch.apic_attention = 0;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 95a01609d7ee..a6f4f095f8f4 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5619,9 +5619,6 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
- if (kvm_vcpu_apicv_active(vcpu))
- memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
-
if (vmx->vpid != 0)
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 16c5f37933a2..0286327e65fa 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -40,7 +40,7 @@ static char sme_cmdline_off[] __initdata = "off";
* section is later cleared.
*/
u64 sme_me_mask __section(.data) = 0;
-EXPORT_SYMBOL_GPL(sme_me_mask);
+EXPORT_SYMBOL(sme_me_mask);
/* Buffer used for early in-place encryption by BSP, no locking needed */
static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 0f3d0cea4d00..3118392cdf75 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -85,6 +85,7 @@ void leave_mm(int cpu)
switch_mm(NULL, &init_mm, NULL);
}
+EXPORT_SYMBOL_GPL(leave_mm);
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
@@ -195,12 +196,22 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
write_cr3(build_cr3(next, new_asid));
- trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
- TLB_FLUSH_ALL);
+
+ /*
+ * NB: This gets called via leave_mm() in the idle path
+ * where RCU functions differently. Tracing normally
+ * uses RCU, so we need to use the _rcuidle variant.
+ *
+ * (There is no good reason for this. The idle code should
+ * be rearranged to call this before rcu_idle_enter().)
+ */
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
} else {
/* The new ASID is already up to date. */
write_cr3(build_cr3_noflush(next, new_asid));
- trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
+
+ /* See above wrt _rcuidle. */
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
this_cpu_write(cpu_tlbstate.loaded_mm, next);