summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-07-10 17:50:55 +0300
committerAvi Kivity <avi@qumranet.com>2007-07-20 20:16:29 +0300
commitd55e2cb20123cdb5020ec4a2b2f1eace5038c292 (patch)
tree6a02bb893638ded6448ee78687ee25072ea82360
parent2cb7e714229681408e323852bed939989faf6991 (diff)
downloadlinux-stable-d55e2cb20123cdb5020ec4a2b2f1eace5038c292.tar.gz
linux-stable-d55e2cb20123cdb5020ec4a2b2f1eace5038c292.tar.bz2
linux-stable-d55e2cb20123cdb5020ec4a2b2f1eace5038c292.zip
KVM: MMU: Store nx bit for large page shadows
We need to distinguish between large page shadows which have the nx bit set and those which don't. The problem shows up when booting a newer smp Linux kernel, where the trampoline page (which is in real mode, which uses the same shadow pages as large pages) is using the same mapping as a kernel data page, which is mapped using nx, causing kvm to spin on that page. Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--drivers/kvm/kvm.h4
-rw-r--r--drivers/kvm/paging_tmpl.h2
2 files changed, 4 insertions, 2 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index a7c5e6bee034..65ab268d4256 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -121,7 +121,7 @@ struct kvm_pte_chain {
* bits 4:7 - page table level for this shadow (1-4)
* bits 8:9 - page table quadrant for 2-level guests
* bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
- * bits 17:18 - "access" - the user and writable bits of a huge page pde
+ * bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde
*/
union kvm_mmu_page_role {
unsigned word;
@@ -131,7 +131,7 @@ union kvm_mmu_page_role {
unsigned quadrant : 2;
unsigned pad_for_nice_hex_output : 6;
unsigned metaphysical : 1;
- unsigned hugepage_access : 2;
+ unsigned hugepage_access : 3;
};
};
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index a7c5cb0319ea..4b5391c717f8 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -366,6 +366,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
metaphysical = 1;
hugepage_access = *guest_ent;
hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
+ if (*guest_ent & PT64_NX_MASK)
+ hugepage_access |= (1 << 2);
hugepage_access >>= PT_WRITABLE_SHIFT;
table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
>> PAGE_SHIFT;