summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolai Stange <nstange@suse.de>2018-07-18 19:07:38 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-08-15 17:37:31 +0200
commit62de9c5ed2a73c1692829e83e7aa72a73742247a (patch)
tree96d326c01fc4e92dbec7f5945a1bd19ab78ed346
parentb8b75ff14fb09357937e1781f5bc830ed14b7033 (diff)
downloadlinux-stable-62de9c5ed2a73c1692829e83e7aa72a73742247a.tar.gz
linux-stable-62de9c5ed2a73c1692829e83e7aa72a73742247a.tar.bz2
linux-stable-62de9c5ed2a73c1692829e83e7aa72a73742247a.zip
x86/KVM/VMX: Initialize the vmx_l1d_flush_pages' content
commit 288d152c23dcf3c09da46c5c481903ca10ebfef7 upstream. The slow path in vmx_l1d_flush() reads from vmx_l1d_flush_pages in order to evict the L1d cache. However, these pages are never cleared and, in theory, their data could be leaked. More importantly, KSM could merge a nested hypervisor's vmx_l1d_flush_pages to fewer than 1 << L1D_CACHE_ORDER host physical pages and this would break the L1d flushing algorithm: L1D on x86_64 is tagged by physical addresses. Fix this by initializing the individual vmx_l1d_flush_pages with a different pattern each. Rename the "empty_zp" asm constraint identifier in vmx_l1d_flush() to "flush_pages" to reflect this change. Fixes: a47dd5f06714 ("x86/KVM/VMX: Add L1D flush algorithm") Signed-off-by: Nicolai Stange <nstange@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/x86/kvm/vmx.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 505b314c1f5f..b2f595afc1f4 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -211,6 +211,7 @@ static void *vmx_l1d_flush_pages;
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
{
struct page *page;
+ unsigned int i;
if (!enable_ept) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
@@ -243,6 +244,16 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
if (!page)
return -ENOMEM;
vmx_l1d_flush_pages = page_address(page);
+
+ /*
+ * Initialize each page with a different pattern in
+ * order to protect against KSM in the nested
+ * virtualization case.
+ */
+ for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
+ memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
+ PAGE_SIZE);
+ }
}
l1tf_vmx_mitigation = l1tf;
@@ -9737,7 +9748,7 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
/* First ensure the pages are in the TLB */
"xorl %%eax, %%eax\n"
".Lpopulate_tlb:\n\t"
- "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
+ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
"addl $4096, %%eax\n\t"
"cmpl %%eax, %[size]\n\t"
"jne .Lpopulate_tlb\n\t"
@@ -9746,12 +9757,12 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
/* Now fill the cache */
"xorl %%eax, %%eax\n"
".Lfill_cache:\n"
- "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
+ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
"addl $64, %%eax\n\t"
"cmpl %%eax, %[size]\n\t"
"jne .Lfill_cache\n\t"
"lfence\n"
- :: [empty_zp] "r" (vmx_l1d_flush_pages),
+ :: [flush_pages] "r" (vmx_l1d_flush_pages),
[size] "r" (size)
: "eax", "ebx", "ecx", "edx");
}