summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-03-18 12:03:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-03-18 12:03:15 -0700
commit9e1909b9da04fb582b20d3805e16fad3f6ebf984 (patch)
tree3752e397308f50d9d42eb2b409a8b0b9f1336af3 /arch
parentdf4fe17802a306a17f5a6f4db9cdd2a3b661264d (diff)
parentbb8c13d61a629276a162c1d2b1a20a815cbcfbb7 (diff)
downloadlinux-9e1909b9da04fb582b20d3805e16fad3f6ebf984.tar.gz
linux-9e1909b9da04fb582b20d3805e16fad3f6ebf984.tar.bz2
linux-9e1909b9da04fb582b20d3805e16fad3f6ebf984.zip
Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86/pti updates from Thomas Gleixner: "Another set of melted spectrum updates: - Iron out the last late microcode loading issues by actually checking whether new microcode is present and preventing the CPU synchronization to run into a timeout induced hang. - Remove Skylake C2 from the microcode blacklist according to the latest Intel documentation - Fix the VM86 POPF emulation which traps if VIP is set, but VIF is not. Enhance the selftests to catch that kind of issue - Annotate indirect calls/jumps for objtool on 32bit. This is not a functional issue, but for consistency sake its the right thing to do. - Fix a jump label build warning observed on SPARC64 which uses 32bit storage for the code location which is casted to 64 bit pointer w/o extending it to 64bit first. - Add two new cpufeature bits. Not really an urgent issue, but provides them for both x86 and x86/kvm work. No impact on the current kernel" * 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/microcode: Fix CPU synchronization routine x86/microcode: Attempt late loading only when new microcode is present x86/speculation: Remove Skylake C2 from Speculation Control microcode blacklist jump_label: Fix sparc64 warning x86/speculation, objtool: Annotate indirect calls/jumps for objtool on 32-bit kernels x86/vm86/32: Fix POPF emulation selftests/x86/entry_from_vm86: Add test cases for POPF selftests/x86/entry_from_vm86: Exit with 1 if we fail x86/cpufeatures: Add Intel PCONFIG cpufeature x86/cpufeatures: Add Intel Total Memory Encryption cpufeature
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/microcode.h1
-rw-r--r--arch/x86/include/asm/nospec-branch.h5
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c34
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c76
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c4
-rw-r--r--arch/x86/kernel/vm86_32.c3
8 files changed, 78 insertions, 50 deletions
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index f41079da38c5..d554c11e01ff 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -316,6 +316,7 @@
#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
+#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
@@ -328,6 +329,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 7fb1047d61c7..6cf0e4cb7b97 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -39,6 +39,7 @@ struct device;
enum ucode_state {
UCODE_OK = 0,
+ UCODE_NEW,
UCODE_UPDATED,
UCODE_NFOUND,
UCODE_ERROR,
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index d0dabeae0505..f928ad9b143f 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -183,7 +183,10 @@
* otherwise we'll run out of registers. We don't care about CET
* here, anyway.
*/
-# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
+# define CALL_NOSPEC \
+ ALTERNATIVE( \
+ ANNOTATE_RETPOLINE_SAFE \
+ "call *%[thunk_target]\n", \
" jmp 904f;\n" \
" .align 16\n" \
"901: call 903f;\n" \
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 4aa9fd379390..c3af167d0a70 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -105,7 +105,7 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
/*
* Early microcode releases for the Spectre v2 mitigation were broken.
* Information taken from;
- * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
+ * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
* - https://kb.vmware.com/s/article/52345
* - Microcode revisions observed in the wild
* - Release note from 20180108 microcode release
@@ -123,7 +123,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
{ INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
{ INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
- { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
{ INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
{ INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
{ INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index a998e1a7d46f..48179928ff38 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
return -EINVAL;
ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
- if (ret != UCODE_OK)
+ if (ret > UCODE_UPDATED)
return -EINVAL;
return 0;
@@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
static enum ucode_state
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
{
+ struct ucode_patch *p;
enum ucode_state ret;
/* free old equiv table */
free_equiv_cpu_table();
ret = __load_microcode_amd(family, data, size);
-
- if (ret != UCODE_OK)
+ if (ret != UCODE_OK) {
cleanup();
+ return ret;
+ }
-#ifdef CONFIG_X86_32
- /* save BSP's matching patch for early load */
- if (save) {
- struct ucode_patch *p = find_patch(0);
- if (p) {
- memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
- memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
- PATCH_MAX_SIZE));
- }
+ p = find_patch(0);
+ if (!p) {
+ return ret;
+ } else {
+ if (boot_cpu_data.microcode == p->patch_id)
+ return ret;
+
+ ret = UCODE_NEW;
}
-#endif
+
+ /* save BSP's matching patch for early load */
+ if (!save)
+ return ret;
+
+ memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
+ memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
+
return ret;
}
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 70ecbc8099c9..10c4fc2c91f8 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -517,7 +517,29 @@ static int check_online_cpus(void)
return -EINVAL;
}
-static atomic_t late_cpus;
+static atomic_t late_cpus_in;
+static atomic_t late_cpus_out;
+
+static int __wait_for_cpus(atomic_t *t, long long timeout)
+{
+ int all_cpus = num_online_cpus();
+
+ atomic_inc(t);
+
+ while (atomic_read(t) < all_cpus) {
+ if (timeout < SPINUNIT) {
+ pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
+ all_cpus - atomic_read(t));
+ return 1;
+ }
+
+ ndelay(SPINUNIT);
+ timeout -= SPINUNIT;
+
+ touch_nmi_watchdog();
+ }
+ return 0;
+}
/*
* Returns:
@@ -527,30 +549,16 @@ static atomic_t late_cpus;
*/
static int __reload_late(void *info)
{
- unsigned int timeout = NSEC_PER_SEC;
- int all_cpus = num_online_cpus();
int cpu = smp_processor_id();
enum ucode_state err;
int ret = 0;
- atomic_dec(&late_cpus);
-
/*
* Wait for all CPUs to arrive. A load will not be attempted unless all
* CPUs show up.
* */
- while (atomic_read(&late_cpus)) {
- if (timeout < SPINUNIT) {
- pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
- atomic_read(&late_cpus));
- return -1;
- }
-
- ndelay(SPINUNIT);
- timeout -= SPINUNIT;
-
- touch_nmi_watchdog();
- }
+ if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
+ return -1;
spin_lock(&update_lock);
apply_microcode_local(&err);
@@ -558,15 +566,22 @@ static int __reload_late(void *info)
if (err > UCODE_NFOUND) {
pr_warn("Error reloading microcode on CPU %d\n", cpu);
- ret = -1;
- } else if (err == UCODE_UPDATED) {
+ return -1;
+ /* siblings return UCODE_OK because their engine got updated already */
+ } else if (err == UCODE_UPDATED || err == UCODE_OK) {
ret = 1;
+ } else {
+ return ret;
}
- atomic_inc(&late_cpus);
-
- while (atomic_read(&late_cpus) != all_cpus)
- cpu_relax();
+ /*
+ * Increase the wait timeout to a safe value here since we're
+ * serializing the microcode update and that could take a while on a
+ * large number of CPUs. And that is fine as the *actual* timeout will
+ * be determined by the last CPU finished updating and thus cut short.
+ */
+ if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
+ panic("Timeout during microcode update!\n");
return ret;
}
@@ -579,12 +594,11 @@ static int microcode_reload_late(void)
{
int ret;
- atomic_set(&late_cpus, num_online_cpus());
+ atomic_set(&late_cpus_in, 0);
+ atomic_set(&late_cpus_out, 0);
ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
- if (ret < 0)
- return ret;
- else if (ret > 0)
+ if (ret > 0)
microcode_check();
return ret;
@@ -607,7 +621,7 @@ static ssize_t reload_store(struct device *dev,
return size;
tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
- if (tmp_ret != UCODE_OK)
+ if (tmp_ret != UCODE_NEW)
return size;
get_online_cpus();
@@ -691,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
if (system_state != SYSTEM_RUNNING)
return UCODE_NFOUND;
- ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev,
- refresh_fw);
-
- if (ustate == UCODE_OK) {
+ ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, refresh_fw);
+ if (ustate == UCODE_NEW) {
pr_debug("CPU%d updated upon init\n", cpu);
apply_microcode_on_target(cpu);
}
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 2aded9db1d42..32b8e5724f96 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -862,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
unsigned int leftover = size;
unsigned int curr_mc_size = 0, new_mc_size = 0;
unsigned int csig, cpf;
+ enum ucode_state ret = UCODE_OK;
while (leftover) {
struct microcode_header_intel mc_header;
@@ -903,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
new_mc = mc;
new_mc_size = mc_size;
mc = NULL; /* trigger new vmalloc */
+ ret = UCODE_NEW;
}
ucode_ptr += mc_size;
@@ -932,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
- return UCODE_OK;
+ return ret;
}
static int get_ucode_fw(void *to, const void *from, size_t n)
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 5edb27f1a2c4..9d0b5af7db91 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -727,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
return;
check_vip:
- if (VEFLAGS & X86_EFLAGS_VIP) {
+ if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
+ (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
save_v86_state(regs, VM86_STI);
return;
}