summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/boot/compressed/mem.c5
-rw-r--r--arch/x86/boot/compressed/sev.c67
-rw-r--r--arch/x86/boot/compressed/sev.h2
-rw-r--r--arch/x86/entry/entry.S9
-rw-r--r--arch/x86/events/intel/core.c24
-rw-r--r--arch/x86/events/intel/ds.c8
-rw-r--r--arch/x86/events/intel/uncore_snbep.c107
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h7
-rw-r--r--arch/x86/include/asm/nospec-branch.h12
-rw-r--r--arch/x86/include/asm/smap.h12
-rw-r--r--arch/x86/kernel/acpi/boot.c11
-rw-r--r--arch/x86/kernel/cpu/amd.c20
-rw-r--r--arch/x86/kernel/cpu/bugs.c101
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c9
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c48
-rw-r--r--arch/x86/kernel/e820.c17
-rw-r--r--arch/x86/kernel/early_printk.c10
-rw-r--r--arch/x86/kernel/i8253.c3
-rw-r--r--arch/x86/kvm/cpuid.c8
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c8
-rw-r--r--arch/x86/kvm/vmx/posted_intr.c37
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/mm/tlb.c6
-rw-r--r--arch/x86/power/hibernate_asm_64.S4
-rw-r--r--arch/x86/xen/enlighten.c17
-rw-r--r--arch/x86/xen/enlighten_pvh.c19
-rw-r--r--arch/x86/xen/multicalls.c26
-rw-r--r--arch/x86/xen/setup.c3
-rw-r--r--arch/x86/xen/smp_pv.c1
-rw-r--r--arch/x86/xen/xen-asm.S4
-rw-r--r--arch/x86/xen/xen-ops.h3
32 files changed, 278 insertions, 336 deletions
diff --git a/arch/x86/boot/compressed/mem.c b/arch/x86/boot/compressed/mem.c
index dbba332e4a12..f676156d9f3d 100644
--- a/arch/x86/boot/compressed/mem.c
+++ b/arch/x86/boot/compressed/mem.c
@@ -34,11 +34,14 @@ static bool early_is_tdx_guest(void)
void arch_accept_memory(phys_addr_t start, phys_addr_t end)
{
+ static bool sevsnp;
+
/* Platform-specific memory-acceptance call goes here */
if (early_is_tdx_guest()) {
if (!tdx_accept_memory(start, end))
panic("TDX: Failed to accept memory\n");
- } else if (sev_snp_enabled()) {
+ } else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
+ sevsnp = true;
snp_accept_memory(start, end);
} else {
error("Cannot accept memory: unknown platform\n");
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index bb55934c1cee..89ba168f4f0f 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -164,10 +164,7 @@ bool sev_snp_enabled(void)
static void __page_state_change(unsigned long paddr, enum psc_op op)
{
- u64 val;
-
- if (!sev_snp_enabled())
- return;
+ u64 val, msr;
/*
* If private -> shared then invalidate the page before requesting the
@@ -176,6 +173,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
if (op == SNP_PAGE_STATE_SHARED)
pvalidate_4k_page(paddr, paddr, false);
+ /* Save the current GHCB MSR value */
+ msr = sev_es_rd_ghcb_msr();
+
/* Issue VMGEXIT to change the page state in RMP table. */
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
VMGEXIT();
@@ -185,6 +185,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
+ /* Restore the GHCB MSR value */
+ sev_es_wr_ghcb_msr(msr);
+
/*
* Now that page state is changed in the RMP table, validate it so that it is
* consistent with the RMP entry.
@@ -195,11 +198,17 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
void snp_set_page_private(unsigned long paddr)
{
+ if (!sev_snp_enabled())
+ return;
+
__page_state_change(paddr, SNP_PAGE_STATE_PRIVATE);
}
void snp_set_page_shared(unsigned long paddr)
{
+ if (!sev_snp_enabled())
+ return;
+
__page_state_change(paddr, SNP_PAGE_STATE_SHARED);
}
@@ -223,56 +232,10 @@ static bool early_setup_ghcb(void)
return true;
}
-static phys_addr_t __snp_accept_memory(struct snp_psc_desc *desc,
- phys_addr_t pa, phys_addr_t pa_end)
-{
- struct psc_hdr *hdr;
- struct psc_entry *e;
- unsigned int i;
-
- hdr = &desc->hdr;
- memset(hdr, 0, sizeof(*hdr));
-
- e = desc->entries;
-
- i = 0;
- while (pa < pa_end && i < VMGEXIT_PSC_MAX_ENTRY) {
- hdr->end_entry = i;
-
- e->gfn = pa >> PAGE_SHIFT;
- e->operation = SNP_PAGE_STATE_PRIVATE;
- if (IS_ALIGNED(pa, PMD_SIZE) && (pa_end - pa) >= PMD_SIZE) {
- e->pagesize = RMP_PG_SIZE_2M;
- pa += PMD_SIZE;
- } else {
- e->pagesize = RMP_PG_SIZE_4K;
- pa += PAGE_SIZE;
- }
-
- e++;
- i++;
- }
-
- if (vmgexit_psc(boot_ghcb, desc))
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
-
- pvalidate_pages(desc);
-
- return pa;
-}
-
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
{
- struct snp_psc_desc desc = {};
- unsigned int i;
- phys_addr_t pa;
-
- if (!boot_ghcb && !early_setup_ghcb())
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
-
- pa = start;
- while (pa < end)
- pa = __snp_accept_memory(&desc, pa, end);
+ for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
+ __page_state_change(pa, SNP_PAGE_STATE_PRIVATE);
}
void sev_es_shutdown_ghcb(void)
diff --git a/arch/x86/boot/compressed/sev.h b/arch/x86/boot/compressed/sev.h
index fc725a981b09..4e463f33186d 100644
--- a/arch/x86/boot/compressed/sev.h
+++ b/arch/x86/boot/compressed/sev.h
@@ -12,11 +12,13 @@
bool sev_snp_enabled(void);
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
+u64 sev_get_status(void);
#else
static inline bool sev_snp_enabled(void) { return false; }
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
+static inline u64 sev_get_status(void) { return 0; }
#endif
diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
index d3caa31240ed..175958b02f2b 100644
--- a/arch/x86/entry/entry.S
+++ b/arch/x86/entry/entry.S
@@ -17,19 +17,20 @@
.pushsection .noinstr.text, "ax"
-SYM_FUNC_START(entry_ibpb)
+/* Clobbers AX, CX, DX */
+SYM_FUNC_START(write_ibpb)
ANNOTATE_NOENDBR
movl $MSR_IA32_PRED_CMD, %ecx
- movl $PRED_CMD_IBPB, %eax
+ movl _ASM_RIP(x86_pred_cmd), %eax
xorl %edx, %edx
wrmsr
/* Make sure IBPB clears return stack preductions too. */
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
RET
-SYM_FUNC_END(entry_ibpb)
+SYM_FUNC_END(write_ibpb)
/* For KVM */
-EXPORT_SYMBOL_GPL(entry_ibpb);
+EXPORT_SYMBOL_GPL(write_ibpb);
.popsection
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 09d2d66c9f21..00dfe487bd00 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3049,7 +3049,6 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int bit;
int handled = 0;
- u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
inc_irq_stat(apic_perf_irqs);
@@ -3093,7 +3092,6 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
handled++;
x86_pmu_handle_guest_pebs(regs, &data);
static_call(x86_pmu_drain_pebs)(regs, &data);
- status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
/*
* PMI throttle may be triggered, which stops the PEBS event.
@@ -3104,6 +3102,15 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
*/
if (pebs_enabled != cpuc->pebs_enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
+
+ /*
+ * Above PEBS handler (PEBS counters snapshotting) has updated fixed
+ * counter 3 and perf metrics counts if they are in counter group,
+ * unnecessary to update again.
+ */
+ if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] &&
+ is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS]))
+ status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT;
}
/*
@@ -3123,6 +3130,8 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
static_call(intel_pmu_update_topdown_event)(NULL, NULL);
}
+ status &= hybrid(cpuc->pmu, intel_ctrl);
+
/*
* Checkpointed counters can lead to 'spurious' PMIs because the
* rollback caused by the PMI will have cleared the overflow status
@@ -7305,8 +7314,17 @@ __init int intel_pmu_init(void)
name = "meteorlake_hybrid";
break;
+ case INTEL_PANTHERLAKE_L:
+ pr_cont("Pantherlake Hybrid events, ");
+ name = "pantherlake_hybrid";
+ goto lnl_common;
+
case INTEL_LUNARLAKE_M:
case INTEL_ARROWLAKE:
+ pr_cont("Lunarlake Hybrid events, ");
+ name = "lunarlake_hybrid";
+
+ lnl_common:
intel_pmu_init_hybrid(hybrid_big_small);
x86_pmu.pebs_latency_data = lnl_latency_data;
@@ -7328,8 +7346,6 @@ __init int intel_pmu_init(void)
intel_pmu_init_skt(&pmu->pmu);
intel_pmu_pebs_data_source_lnl();
- pr_cont("Lunarlake Hybrid events, ");
- name = "lunarlake_hybrid";
break;
case INTEL_ARROWLAKE_H:
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 1f7e1a692a7a..18c3ab579b8b 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1399,8 +1399,10 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event)
* + precise_ip < 2 for the non event IP
* + For RTM TSX weight we need GPRs for the abort code.
*/
- gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
- (attr->sample_regs_intr & PEBS_GP_REGS);
+ gprs = ((sample_type & PERF_SAMPLE_REGS_INTR) &&
+ (attr->sample_regs_intr & PEBS_GP_REGS)) ||
+ ((sample_type & PERF_SAMPLE_REGS_USER) &&
+ (attr->sample_regs_user & PEBS_GP_REGS));
tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
((attr->config & INTEL_ARCH_EVENT_MASK) ==
@@ -2123,7 +2125,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
regs->flags &= ~PERF_EFLAGS_EXACT;
}
- if (sample_type & PERF_SAMPLE_REGS_INTR)
+ if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER))
adaptive_pebs_save_regs(regs, gprs);
}
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 60973c209c0e..76d96df1475a 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -4891,28 +4891,28 @@ static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
/* Free-Running IIO BANDWIDTH IN Counters */
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
{ /* end: all zeroes */ },
};
@@ -5485,37 +5485,6 @@ static struct freerunning_counters icx_iio_freerunning[] = {
[ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
};
-static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
- /* Free-Running IIO CLOCKS Counter */
- INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
- /* Free-Running IIO BANDWIDTH IN Counters */
- INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
- { /* end: all zeroes */ },
-};
-
static struct intel_uncore_type icx_uncore_iio_free_running = {
.name = "iio_free_running",
.num_counters = 9,
@@ -5523,7 +5492,7 @@ static struct intel_uncore_type icx_uncore_iio_free_running = {
.num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
.freerunning = icx_iio_freerunning,
.ops = &skx_uncore_iio_freerunning_ops,
- .event_descs = icx_uncore_iio_freerunning_events,
+ .event_descs = snr_uncore_iio_freerunning_events,
.format_group = &skx_uncore_iio_freerunning_format_group,
};
@@ -6320,69 +6289,13 @@ static struct freerunning_counters spr_iio_freerunning[] = {
[SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 },
};
-static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
- /* Free-Running IIO CLOCKS Counter */
- INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
- /* Free-Running IIO BANDWIDTH IN Counters */
- INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
- /* Free-Running IIO BANDWIDTH OUT Counters */
- INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x30"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x31"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x32"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x33"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port4, "event=0xff,umask=0x34"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port5, "event=0xff,umask=0x35"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port6, "event=0xff,umask=0x36"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port7, "event=0xff,umask=0x37"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit, "MiB"),
- { /* end: all zeroes */ },
-};
-
static struct intel_uncore_type spr_uncore_iio_free_running = {
.name = "iio_free_running",
.num_counters = 17,
.num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX,
.freerunning = spr_iio_freerunning,
.ops = &skx_uncore_iio_freerunning_ops,
- .event_descs = spr_uncore_iio_freerunning_events,
+ .event_descs = snr_uncore_iio_freerunning_events,
.format_group = &skx_uncore_iio_freerunning_format_group,
};
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 3a97a7eefb51..be10c188614f 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -126,6 +126,8 @@
#define INTEL_GRANITERAPIDS_X IFM(6, 0xAD) /* Redwood Cove */
#define INTEL_GRANITERAPIDS_D IFM(6, 0xAE)
+#define INTEL_BARTLETTLAKE IFM(6, 0xD7) /* Raptor Cove */
+
/* "Hybrid" Processors (P-Core/E-Core) */
#define INTEL_LAKEFIELD IFM(6, 0x8A) /* Sunny Cove / Tremont */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a884ab544335..3bdae454a959 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1472,8 +1472,13 @@ struct kvm_arch {
struct once nx_once;
#ifdef CONFIG_X86_64
- /* The number of TDP MMU pages across all roots. */
+#ifdef CONFIG_KVM_PROVE_MMU
+ /*
+ * The number of TDP MMU pages across all roots. Used only to sanity
+ * check that KVM isn't leaking TDP MMU pages.
+ */
atomic64_t tdp_mmu_pages;
+#endif
/*
* List of struct kvm_mmu_pages being used as roots.
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 8a5cc8e70439..5c43f145454d 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -269,7 +269,7 @@
* typically has NO_MELTDOWN).
*
* While retbleed_untrain_ret() doesn't clobber anything but requires stack,
- * entry_ibpb() will clobber AX, CX, DX.
+ * write_ibpb() will clobber AX, CX, DX.
*
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
* where we have a stack but before any RET instruction.
@@ -279,7 +279,7 @@
VALIDATE_UNRET_END
CALL_UNTRAIN_RET
ALTERNATIVE_2 "", \
- "call entry_ibpb", \ibpb_feature, \
+ "call write_ibpb", \ibpb_feature, \
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
#endif
.endm
@@ -368,7 +368,7 @@ extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);
extern void entry_untrain_ret(void);
-extern void entry_ibpb(void);
+extern void write_ibpb(void);
#ifdef CONFIG_X86_64
extern void clear_bhb_loop(void);
@@ -514,11 +514,11 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
: "memory");
}
-extern u64 x86_pred_cmd;
-
static inline void indirect_branch_prediction_barrier(void)
{
- alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_IBPB);
+ asm_inline volatile(ALTERNATIVE("", "call write_ibpb", X86_FEATURE_IBPB)
+ : ASM_CALL_CONSTRAINT
+ :: "rax", "rcx", "rdx", "memory");
}
/* The Intel SPEC CTRL MSR base value cache */
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index 55a5e656e4b9..4f84d421d1cf 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -16,23 +16,23 @@
#ifdef __ASSEMBLER__
#define ASM_CLAC \
- ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "clac", X86_FEATURE_SMAP
+ ALTERNATIVE "", "clac", X86_FEATURE_SMAP
#define ASM_STAC \
- ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "stac", X86_FEATURE_SMAP
+ ALTERNATIVE "", "stac", X86_FEATURE_SMAP
#else /* __ASSEMBLER__ */
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP);
+ alternative("", "clac", X86_FEATURE_SMAP);
}
static __always_inline void stac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP);
+ alternative("", "stac", X86_FEATURE_SMAP);
}
static __always_inline unsigned long smap_save(void)
@@ -59,9 +59,9 @@ static __always_inline void smap_restore(unsigned long flags)
/* These macros can be used in asm() statements */
#define ASM_CLAC \
- ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP)
+ ALTERNATIVE("", "clac", X86_FEATURE_SMAP)
#define ASM_STAC \
- ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP)
+ ALTERNATIVE("", "stac", X86_FEATURE_SMAP)
#define ASM_CLAC_UNSAFE \
ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "clac", X86_FEATURE_SMAP)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index dae6a73be40e..9fa321a95eb3 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -23,6 +23,8 @@
#include <linux/serial_core.h>
#include <linux/pgtable.h>
+#include <xen/xen.h>
+
#include <asm/e820/api.h>
#include <asm/irqdomain.h>
#include <asm/pci_x86.h>
@@ -1729,6 +1731,15 @@ int __init acpi_mps_check(void)
{
#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
/* mptable code is not built-in*/
+
+ /*
+ * Xen disables ACPI in PV DomU guests but it still emulates APIC and
+ * supports SMP. Returning early here ensures that APIC is not disabled
+ * unnecessarily and the guest is not limited to a single vCPU.
+ */
+ if (xen_pv_domain() && !xen_initial_domain())
+ return 0;
+
if (acpi_disabled || acpi_noirq) {
pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n");
return 1;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 79569f72b8ee..2b36379ff675 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -805,6 +805,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
static const struct x86_cpu_id erratum_1386_microcode[] = {
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x01), 0x2, 0x2, 0x0800126e),
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x31), 0x0, 0x0, 0x08301052),
+ {}
};
static void fix_erratum_1386(struct cpuinfo_x86 *c)
@@ -868,6 +869,16 @@ static void init_amd_zen1(struct cpuinfo_x86 *c)
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
setup_force_cpu_bug(X86_BUG_DIV0);
+
+ /*
+ * Turn off the Instructions Retired free counter on machines that are
+ * susceptible to erratum #1054 "Instructions Retired Performance
+ * Counter May Be Inaccurate".
+ */
+ if (c->x86_model < 0x30) {
+ msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
+ clear_cpu_cap(c, X86_FEATURE_IRPERF);
+ }
}
static bool cpu_has_zenbleed_microcode(void)
@@ -1051,13 +1062,8 @@ static void init_amd(struct cpuinfo_x86 *c)
if (!cpu_feature_enabled(X86_FEATURE_XENPV))
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
- /*
- * Turn on the Instructions Retired free counter on machines not
- * susceptible to erratum #1054 "Instructions Retired Performance
- * Counter May Be Inaccurate".
- */
- if (cpu_has(c, X86_FEATURE_IRPERF) &&
- (boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
+ /* Enable the Instructions Retired free counter */
+ if (cpu_has(c, X86_FEATURE_IRPERF))
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
check_null_seg_clears_base(c);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 4386aa6c69e1..362602b705cc 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -59,7 +59,6 @@ DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
-EXPORT_SYMBOL_GPL(x86_pred_cmd);
static u64 __ro_after_init x86_arch_cap_msr;
@@ -1142,7 +1141,7 @@ do_cmd_auto:
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
@@ -1592,51 +1591,54 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
rrsba_disabled = true;
}
-static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
+static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
{
/*
- * Similar to context switches, there are two types of RSB attacks
- * after VM exit:
+ * WARNING! There are many subtleties to consider when changing *any*
+ * code related to RSB-related mitigations. Before doing so, carefully
+ * read the following document, and update if necessary:
*
- * 1) RSB underflow
+ * Documentation/admin-guide/hw-vuln/rsb.rst
*
- * 2) Poisoned RSB entry
+ * In an overly simplified nutshell:
*
- * When retpoline is enabled, both are mitigated by filling/clearing
- * the RSB.
+ * - User->user RSB attacks are conditionally mitigated during
+ * context switches by cond_mitigation -> write_ibpb().
*
- * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
- * prediction isolation protections, RSB still needs to be cleared
- * because of #2. Note that SMEP provides no protection here, unlike
- * user-space-poisoned RSB entries.
+ * - User->kernel and guest->host attacks are mitigated by eIBRS or
+ * RSB filling.
*
- * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
- * bug is present then a LITE version of RSB protection is required,
- * just a single call needs to retire before a RET is executed.
+ * Though, depending on config, note that other alternative
+ * mitigations may end up getting used instead, e.g., IBPB on
+ * entry/vmexit, call depth tracking, or return thunks.
*/
+
switch (mode) {
case SPECTRE_V2_NONE:
- return;
+ break;
- case SPECTRE_V2_EIBRS_LFENCE:
case SPECTRE_V2_EIBRS:
+ case SPECTRE_V2_EIBRS_LFENCE:
+ case SPECTRE_V2_EIBRS_RETPOLINE:
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
- setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
+ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
}
- return;
+ break;
- case SPECTRE_V2_EIBRS_RETPOLINE:
case SPECTRE_V2_RETPOLINE:
case SPECTRE_V2_LFENCE:
case SPECTRE_V2_IBRS:
+ pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
- pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
- return;
- }
+ break;
- pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
- dump_stack();
+ default:
+ pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
+ dump_stack();
+ break;
+ }
}
/*
@@ -1830,48 +1832,7 @@ static void __init spectre_v2_select_mitigation(void)
spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]);
- /*
- * If Spectre v2 protection has been enabled, fill the RSB during a
- * context switch. In general there are two types of RSB attacks
- * across context switches, for which the CALLs/RETs may be unbalanced.
- *
- * 1) RSB underflow
- *
- * Some Intel parts have "bottomless RSB". When the RSB is empty,
- * speculated return targets may come from the branch predictor,
- * which could have a user-poisoned BTB or BHB entry.
- *
- * AMD has it even worse: *all* returns are speculated from the BTB,
- * regardless of the state of the RSB.
- *
- * When IBRS or eIBRS is enabled, the "user -> kernel" attack
- * scenario is mitigated by the IBRS branch prediction isolation
- * properties, so the RSB buffer filling wouldn't be necessary to
- * protect against this type of attack.
- *
- * The "user -> user" attack scenario is mitigated by RSB filling.
- *
- * 2) Poisoned RSB entry
- *
- * If the 'next' in-kernel return stack is shorter than 'prev',
- * 'next' could be tricked into speculating with a user-poisoned RSB
- * entry.
- *
- * The "user -> kernel" attack scenario is mitigated by SMEP and
- * eIBRS.
- *
- * The "user -> user" scenario, also known as SpectreBHB, requires
- * RSB clearing.
- *
- * So to mitigate all cases, unconditionally fill RSB on context
- * switches.
- *
- * FIXME: Is this pointless for retbleed-affected AMD?
- */
- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
- pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
-
- spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
+ spectre_v2_select_rsb_mitigation(mode);
/*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
@@ -2676,7 +2637,7 @@ static void __init srso_select_mitigation(void)
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
@@ -2701,7 +2662,7 @@ ibpb_on_vmexit:
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index b61028cf5c8a..4a10d35e70aa 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -199,6 +199,12 @@ static bool need_sha_check(u32 cur_rev)
case 0xa70c0: return cur_rev <= 0xa70C009; break;
case 0xaa001: return cur_rev <= 0xaa00116; break;
case 0xaa002: return cur_rev <= 0xaa00218; break;
+ case 0xb0021: return cur_rev <= 0xb002146; break;
+ case 0xb1010: return cur_rev <= 0xb101046; break;
+ case 0xb2040: return cur_rev <= 0xb204031; break;
+ case 0xb4040: return cur_rev <= 0xb404031; break;
+ case 0xb6000: return cur_rev <= 0xb600031; break;
+ case 0xb7000: return cur_rev <= 0xb700031; break;
default: break;
}
@@ -214,8 +220,7 @@ static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsi
struct sha256_state s;
int i;
- if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
- x86_family(bsp_cpuid_1_eax) > 0x19)
+ if (x86_family(bsp_cpuid_1_eax) < 0x17)
return true;
if (!need_sha_check(cur_rev))
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 93ec829015f1..cc4a54145c83 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -3553,6 +3553,22 @@ static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp)
free_rmid(rgrp->closid, rgrp->mon.rmid);
}
+/*
+ * We allow creating mon groups only with in a directory called "mon_groups"
+ * which is present in every ctrl_mon group. Check if this is a valid
+ * "mon_groups" directory.
+ *
+ * 1. The directory should be named "mon_groups".
+ * 2. The mon group itself should "not" be named "mon_groups".
+ * This makes sure "mon_groups" directory always has a ctrl_mon group
+ * as parent.
+ */
+static bool is_mon_groups(struct kernfs_node *kn, const char *name)
+{
+ return (!strcmp(rdt_kn_name(kn), "mon_groups") &&
+ strcmp(name, "mon_groups"));
+}
+
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
const char *name, umode_t mode,
enum rdt_group_type rtype, struct rdtgroup **r)
@@ -3568,6 +3584,15 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
goto out_unlock;
}
+ /*
+ * Check that the parent directory for a monitor group is a "mon_groups"
+ * directory.
+ */
+ if (rtype == RDTMON_GROUP && !is_mon_groups(parent_kn, name)) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
if (rtype == RDTMON_GROUP &&
(prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
@@ -3751,22 +3776,6 @@ out_unlock:
return ret;
}
-/*
- * We allow creating mon groups only with in a directory called "mon_groups"
- * which is present in every ctrl_mon group. Check if this is a valid
- * "mon_groups" directory.
- *
- * 1. The directory should be named "mon_groups".
- * 2. The mon group itself should "not" be named "mon_groups".
- * This makes sure "mon_groups" directory always has a ctrl_mon group
- * as parent.
- */
-static bool is_mon_groups(struct kernfs_node *kn, const char *name)
-{
- return (!strcmp(rdt_kn_name(kn), "mon_groups") &&
- strcmp(name, "mon_groups"));
-}
-
static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
umode_t mode)
{
@@ -3782,11 +3791,8 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn)
return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
- /*
- * If RDT monitoring is supported and the parent directory is a valid
- * "mon_groups" directory, add a monitoring subdirectory.
- */
- if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name))
+ /* Else, attempt to add a monitoring subdirectory. */
+ if (resctrl_arch_mon_capable())
return rdtgroup_mkdir_mon(parent_kn, name, mode);
return -EPERM;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 57120f0749cc..9d8dd8deb2a7 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -753,22 +753,21 @@ void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len)
void __init e820__register_nosave_regions(unsigned long limit_pfn)
{
int i;
- unsigned long pfn = 0;
+ u64 last_addr = 0;
for (i = 0; i < e820_table->nr_entries; i++) {
struct e820_entry *entry = &e820_table->entries[i];
- if (pfn < PFN_UP(entry->addr))
- register_nosave_region(pfn, PFN_UP(entry->addr));
-
- pfn = PFN_DOWN(entry->addr + entry->size);
-
if (entry->type != E820_TYPE_RAM)
- register_nosave_region(PFN_UP(entry->addr), pfn);
+ continue;
- if (pfn >= limit_pfn)
- break;
+ if (last_addr < entry->addr)
+ register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr));
+
+ last_addr = entry->addr + entry->size;
}
+
+ register_nosave_region(PFN_DOWN(last_addr), limit_pfn);
}
#ifdef CONFIG_ACPI
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 611f27e3890c..3aad78bfcb26 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -389,10 +389,10 @@ static int __init setup_early_printk(char *buf)
keep = (strstr(buf, "keep") != NULL);
while (*buf != '\0') {
- if (!strncmp(buf, "mmio", 4)) {
- early_mmio_serial_init(buf + 4);
+ if (!strncmp(buf, "mmio32", 6)) {
+ buf += 6;
+ early_mmio_serial_init(buf);
early_console_register(&early_serial_console, keep);
- buf += 4;
}
if (!strncmp(buf, "serial", 6)) {
buf += 6;
@@ -407,9 +407,9 @@ static int __init setup_early_printk(char *buf)
}
#ifdef CONFIG_PCI
if (!strncmp(buf, "pciserial", 9)) {
- early_pci_serial_init(buf + 9);
+ buf += 9; /* Keep from match the above "pciserial" */
+ early_pci_serial_init(buf);
early_console_register(&early_serial_console, keep);
- buf += 9; /* Keep from match the above "serial" */
}
#endif
if (!strncmp(buf, "vga", 3) &&
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 80e262bb627f..cb9852ad6098 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -46,7 +46,8 @@ bool __init pit_timer_init(void)
* VMMs otherwise steal CPU time just to pointlessly waggle
* the (masked) IRQ.
*/
- clockevent_i8253_disable();
+ scoped_guard(irq)
+ clockevent_i8253_disable();
return false;
}
clockevent_i8253_init(true);
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 5e4d4934c0d3..571c906ffcbf 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -1427,8 +1427,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
}
break;
case 0xa: { /* Architectural Performance Monitoring */
- union cpuid10_eax eax;
- union cpuid10_edx edx;
+ union cpuid10_eax eax = { };
+ union cpuid10_edx edx = { };
if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
@@ -1444,8 +1444,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
if (kvm_pmu_cap.version)
edx.split.anythread_deprecated = 1;
- edx.split.reserved1 = 0;
- edx.split.reserved2 = 0;
entry->eax = eax.full;
entry->ebx = kvm_pmu_cap.events_mask;
@@ -1763,7 +1761,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
break;
/* AMD Extended Performance Monitoring and Debug */
case 0x80000022: {
- union cpuid_0x80000022_ebx ebx;
+ union cpuid_0x80000022_ebx ebx = { };
entry->ecx = entry->edx = 0;
if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 7cc0564f5f97..21a3b8166242 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -40,7 +40,9 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
kvm_tdp_mmu_invalidate_roots(kvm, KVM_VALID_ROOTS);
kvm_tdp_mmu_zap_invalidated_roots(kvm, false);
- WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
+#ifdef CONFIG_KVM_PROVE_MMU
+ KVM_MMU_WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
+#endif
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
/*
@@ -325,13 +327,17 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
kvm_account_pgtable_pages((void *)sp->spt, +1);
+#ifdef CONFIG_KVM_PROVE_MMU
atomic64_inc(&kvm->arch.tdp_mmu_pages);
+#endif
}
static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
kvm_account_pgtable_pages((void *)sp->spt, -1);
+#ifdef CONFIG_KVM_PROVE_MMU
atomic64_dec(&kvm->arch.tdp_mmu_pages);
+#endif
}
/**
diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
index ec08fa3caf43..51116fe69a50 100644
--- a/arch/x86/kvm/vmx/posted_intr.c
+++ b/arch/x86/kvm/vmx/posted_intr.c
@@ -31,6 +31,8 @@ static DEFINE_PER_CPU(struct list_head, wakeup_vcpus_on_cpu);
*/
static DEFINE_PER_CPU(raw_spinlock_t, wakeup_vcpus_on_cpu_lock);
+#define PI_LOCK_SCHED_OUT SINGLE_DEPTH_NESTING
+
static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
{
return &(to_vmx(vcpu)->pi_desc);
@@ -89,9 +91,20 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
* current pCPU if the task was migrated.
*/
if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR) {
- raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
+ raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu);
+
+ /*
+ * In addition to taking the wakeup lock for the regular/IRQ
+ * context, tell lockdep it is being taken for the "sched out"
+ * context as well. vCPU loads happens in task context, and
+ * this is taking the lock of the *previous* CPU, i.e. can race
+ * with both the scheduler and the wakeup handler.
+ */
+ raw_spin_lock(spinlock);
+ spin_acquire(&spinlock->dep_map, PI_LOCK_SCHED_OUT, 0, _RET_IP_);
list_del(&vmx->pi_wakeup_list);
- raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
+ spin_release(&spinlock->dep_map, _RET_IP_);
+ raw_spin_unlock(spinlock);
}
dest = cpu_physical_id(cpu);
@@ -148,11 +161,23 @@ static void pi_enable_wakeup_handler(struct kvm_vcpu *vcpu)
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct pi_desc old, new;
- unsigned long flags;
- local_irq_save(flags);
+ lockdep_assert_irqs_disabled();
- raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
+ /*
+ * Acquire the wakeup lock using the "sched out" context to workaround
+ * a lockdep false positive. When this is called, schedule() holds
+ * various per-CPU scheduler locks. When the wakeup handler runs, it
+ * holds this CPU's wakeup lock while calling try_to_wake_up(), which
+ * can eventually take the aforementioned scheduler locks, which causes
+ * lockdep to assume there is deadlock.
+ *
+ * Deadlock can't actually occur because IRQs are disabled for the
+ * entirety of the sched_out critical section, i.e. the wakeup handler
+ * can't run while the scheduler locks are held.
+ */
+ raw_spin_lock_nested(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu),
+ PI_LOCK_SCHED_OUT);
list_add_tail(&vmx->pi_wakeup_list,
&per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu));
raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
@@ -176,8 +201,6 @@ static void pi_enable_wakeup_handler(struct kvm_vcpu *vcpu)
*/
if (pi_test_on(&new))
__apic_send_IPI_self(POSTED_INTR_WAKEUP_VECTOR);
-
- local_irq_restore(flags);
}
static bool vmx_needs_pi_wakeup(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c841817a914a..3712dde0bf9d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -11786,6 +11786,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
if (kvm_mpx_supported())
kvm_load_guest_fpu(vcpu);
+ kvm_vcpu_srcu_read_lock(vcpu);
+
r = kvm_apic_accept_events(vcpu);
if (r < 0)
goto out;
@@ -11799,6 +11801,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
mp_state->mp_state = vcpu->arch.mp_state;
out:
+ kvm_vcpu_srcu_read_unlock(vcpu);
+
if (kvm_mpx_supported())
kvm_put_guest_fpu(vcpu);
vcpu_put(vcpu);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index e459d97ef397..eb83348f9305 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -667,9 +667,9 @@ static void cond_mitigation(struct task_struct *next)
prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
/*
- * Avoid user/user BTB poisoning by flushing the branch predictor
- * when switching between processes. This stops one process from
- * doing Spectre-v2 attacks on another.
+ * Avoid user->user BTB/RSB poisoning by flushing them when switching
+ * between processes. This stops one process from doing Spectre-v2
+ * attacks on another.
*
* Both, the conditional and the always IBPB mode use the mm
* pointer to avoid the IBPB when switching between tasks of the
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index 8c534c36adfa..66f066b8feda 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -26,7 +26,7 @@
/* code below belongs to the image kernel */
.align PAGE_SIZE
SYM_FUNC_START(restore_registers)
- ANNOTATE_NOENDBR
+ ENDBR
/* go back to the original page tables */
movq %r9, %cr3
@@ -120,7 +120,7 @@ SYM_FUNC_END(restore_image)
/* code below has been relocated to a safe page */
SYM_FUNC_START(core_restore_code)
- ANNOTATE_NOENDBR
+ ENDBR
/* switch to temporary page tables */
movq %rax, %cr3
/* flush TLB */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 43dcd8c7badc..53282dc7d5ac 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -70,6 +70,9 @@ EXPORT_SYMBOL(xen_start_flags);
*/
struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
+/* Number of pages released from the initial allocation. */
+unsigned long xen_released_pages;
+
static __ref void xen_get_vendor(void)
{
init_cpu_devs();
@@ -100,10 +103,6 @@ noinstr void *__xen_hypercall_setfunc(void)
void (*func)(void);
/*
- * Xen is supported only on CPUs with CPUID, so testing for
- * X86_FEATURE_CPUID is a test for early_cpu_init() having been
- * run.
- *
* Note that __xen_hypercall_setfunc() is noinstr only due to a nasty
* dependency chain: it is being called via the xen_hypercall static
* call when running as a PVH or HVM guest. Hypercalls need to be
@@ -115,8 +114,7 @@ noinstr void *__xen_hypercall_setfunc(void)
*/
instrumentation_begin();
- if (!boot_cpu_has(X86_FEATURE_CPUID))
- xen_get_vendor();
+ xen_get_vendor();
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
@@ -466,6 +464,13 @@ int __init arch_xen_unpopulated_init(struct resource **res)
xen_free_unpopulated_pages(1, &pg);
}
+ /*
+ * Account for the region being in the physmap but unpopulated.
+ * The value in xen_released_pages is used by the balloon
+ * driver to know how much of the physmap is unpopulated and
+ * set an accurate initial memory target.
+ */
+ xen_released_pages += xen_extra_mem[i].n_pfns;
/* Zero so region is not also added to the balloon driver. */
xen_extra_mem[i].n_pfns = 0;
}
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index 0e3d930bcb89..9d25d9373945 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
+#include <linux/cpufreq.h>
+#include <linux/cpuidle.h>
#include <linux/export.h>
#include <linux/mm.h>
@@ -123,8 +125,23 @@ static void __init pvh_arch_setup(void)
{
pvh_reserve_extra_memory();
- if (xen_initial_domain())
+ if (xen_initial_domain()) {
xen_add_preferred_consoles();
+
+ /*
+ * Disable usage of CPU idle and frequency drivers: when
+ * running as hardware domain the exposed native ACPI tables
+ * causes idle and/or frequency drivers to attach and
+ * malfunction. It's Xen the entity that controls the idle and
+ * frequency states.
+ *
+ * For unprivileged domains the exposed ACPI tables are
+ * fabricated and don't contain such data.
+ */
+ disable_cpuidle();
+ disable_cpufreq();
+ WARN_ON(xen_set_default_idle());
+ }
}
void __init xen_pvh_init(struct boot_params *boot_params)
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 10c660fae8b3..7237d56a9d3f 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -54,14 +54,20 @@ struct mc_debug_data {
static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
static struct mc_debug_data mc_debug_data_early __initdata;
-static DEFINE_PER_CPU(struct mc_debug_data *, mc_debug_data) =
- &mc_debug_data_early;
static struct mc_debug_data __percpu *mc_debug_data_ptr;
DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
static struct static_key mc_debug __ro_after_init;
static bool mc_debug_enabled __initdata;
+static struct mc_debug_data * __ref get_mc_debug(void)
+{
+ if (!mc_debug_data_ptr)
+ return &mc_debug_data_early;
+
+ return this_cpu_ptr(mc_debug_data_ptr);
+}
+
static int __init xen_parse_mc_debug(char *arg)
{
mc_debug_enabled = true;
@@ -71,20 +77,16 @@ static int __init xen_parse_mc_debug(char *arg)
}
early_param("xen_mc_debug", xen_parse_mc_debug);
-void mc_percpu_init(unsigned int cpu)
-{
- per_cpu(mc_debug_data, cpu) = per_cpu_ptr(mc_debug_data_ptr, cpu);
-}
-
static int __init mc_debug_enable(void)
{
unsigned long flags;
+ struct mc_debug_data __percpu *mcdb;
if (!mc_debug_enabled)
return 0;
- mc_debug_data_ptr = alloc_percpu(struct mc_debug_data);
- if (!mc_debug_data_ptr) {
+ mcdb = alloc_percpu(struct mc_debug_data);
+ if (!mcdb) {
pr_err("xen_mc_debug inactive\n");
static_key_slow_dec(&mc_debug);
return -ENOMEM;
@@ -93,7 +95,7 @@ static int __init mc_debug_enable(void)
/* Be careful when switching to percpu debug data. */
local_irq_save(flags);
xen_mc_flush();
- mc_percpu_init(0);
+ mc_debug_data_ptr = mcdb;
local_irq_restore(flags);
pr_info("xen_mc_debug active\n");
@@ -155,7 +157,7 @@ void xen_mc_flush(void)
trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
if (static_key_false(&mc_debug)) {
- mcdb = __this_cpu_read(mc_debug_data);
+ mcdb = get_mc_debug();
memcpy(mcdb->entries, b->entries,
b->mcidx * sizeof(struct multicall_entry));
}
@@ -235,7 +237,7 @@ struct multicall_space __xen_mc_entry(size_t args)
ret.mc = &b->entries[b->mcidx];
if (static_key_false(&mc_debug)) {
- struct mc_debug_data *mcdb = __this_cpu_read(mc_debug_data);
+ struct mc_debug_data *mcdb = get_mc_debug();
mcdb->caller[b->mcidx] = __builtin_return_address(0);
mcdb->argsz[b->mcidx] = args;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index c3db71d96c43..3823e52aef52 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -37,9 +37,6 @@
#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
-/* Number of pages released from the initial allocation. */
-unsigned long xen_released_pages;
-
/* Memory map would allow PCI passthrough. */
bool xen_pv_pci_possible;
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 688ff59318ae..9bb8ff8bff30 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -305,7 +305,6 @@ static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle)
return rc;
xen_pmu_init(cpu);
- mc_percpu_init(cpu);
/*
* Why is this a BUG? If the hypercall fails then everything can be
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 109af12f7647..461bb1526502 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -226,9 +226,7 @@ SYM_CODE_END(xen_early_idt_handler_array)
push %rax
mov $__HYPERVISOR_iret, %eax
syscall /* Do the IRET. */
-#ifdef CONFIG_MITIGATION_SLS
- int3
-#endif
+ ud2 /* The SYSCALL should never return. */
.endm
SYM_CODE_START(xen_iret)
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 63c13a2ccf55..25e318ef27d6 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -261,9 +261,6 @@ void xen_mc_callback(void (*fn)(void *), void *data);
*/
struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size);
-/* Do percpu data initialization for multicalls. */
-void mc_percpu_init(unsigned int cpu);
-
extern bool is_xen_pmu;
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);