summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-02-05 13:01:15 -0800
committerPaolo Bonzini <pbonzini@redhat.com>2019-02-20 22:48:35 +0100
commit5192f9b976f9687569a90602b8a6c053da4498f6 (patch)
tree13152edf16a5627d5ca9ade3704e01b6120a6c7a /arch/x86/kvm/mmu.c
parent361209e054a2c9f34da090ee1ee4c1e8bfe76a64 (diff)
downloadlinux-stable-5192f9b976f9687569a90602b8a6c053da4498f6.tar.gz
linux-stable-5192f9b976f9687569a90602b8a6c053da4498f6.tar.bz2
linux-stable-5192f9b976f9687569a90602b8a6c053da4498f6.zip
KVM: x86: Use a u64 when passing the MMIO gen around
KVM currently uses an 'unsigned int' for the MMIO generation number despite it being derived from the 64-bit memslots generation and being propagated to (potentially) 64-bit sptes. There is no hidden agenda behind using an 'unsigned int', it's done simply because the MMIO generation will never set bits above bit 19. Passing a u64 will allow the "update in-progress" flag to be relocated from bit 0 to bit 63 and removes the need to cast the generation back to a u64 when propagating it to a spte. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c2f2c9de63ed..54bb706e40e8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -348,20 +348,20 @@ static inline bool is_access_track_spte(u64 spte)
#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2)
#define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1)
-static u64 generation_mmio_spte_mask(unsigned int gen)
+static u64 generation_mmio_spte_mask(u64 gen)
{
u64 mask;
WARN_ON(gen & ~MMIO_GEN_MASK);
mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT;
- mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT;
+ mask |= (gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT;
return mask;
}
-static unsigned int get_mmio_spte_generation(u64 spte)
+static u64 get_mmio_spte_generation(u64 spte)
{
- unsigned int gen;
+ u64 gen;
spte &= ~shadow_mmio_mask;
@@ -370,7 +370,7 @@ static unsigned int get_mmio_spte_generation(u64 spte)
return gen;
}
-static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu)
+static u64 kvm_current_mmio_generation(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_memslots(vcpu)->generation & MMIO_GEN_MASK;
}
@@ -378,7 +378,7 @@ static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu)
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
unsigned access)
{
- unsigned int gen = kvm_current_mmio_generation(vcpu);
+ u64 gen = kvm_current_mmio_generation(vcpu);
u64 mask = generation_mmio_spte_mask(gen);
u64 gpa = gfn << PAGE_SHIFT;
@@ -426,7 +426,7 @@ static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
{
- unsigned int kvm_gen, spte_gen;
+ u64 kvm_gen, spte_gen;
kvm_gen = kvm_current_mmio_generation(vcpu);
spte_gen = get_mmio_spte_generation(spte);