diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2017-05-02 16:20:18 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2017-05-03 16:30:26 +0200 |
commit | 4e335d9e7ddbcf83d03e7fbe65797ebed2272c18 (patch) | |
tree | ef791b6623e77a8d3abef05ff0410709def329d8 /include/linux/kvm_host.h | |
parent | ee5f7d79a80550179b258417442b7bdbccaf476a (diff) | |
download | linux-stable-4e335d9e7ddbcf83d03e7fbe65797ebed2272c18.tar.gz linux-stable-4e335d9e7ddbcf83d03e7fbe65797ebed2272c18.tar.bz2 linux-stable-4e335d9e7ddbcf83d03e7fbe65797ebed2272c18.zip |
Revert "KVM: Support vCPU-based gfn->hva cache"
This reverts commit bbd6411513aa8ef3ea02abab61318daf87c1af1e.
I've been sitting on this revert for too long and it unfortunately
missed 4.11. It's also the reason why I haven't merged ring-based
dirty tracking for 4.12.
Using kvm_vcpu_memslots in kvm_gfn_to_hva_cache_init and
kvm_vcpu_write_guest_offset_cached means that the MSR value can
now be used to access SMRAM, simply by making it point to an SMRAM
physical address. This is problematic because it lets the guest
OS overwrite memory that it shouldn't be able to touch.
Cc: stable@vger.kernel.org
Fixes: bbd6411513aa8ef3ea02abab61318daf87c1af1e
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r-- | include/linux/kvm_host.h | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 25cf258a1c9b..3727afdf614d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -650,18 +650,18 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); -int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, - void *data, unsigned long len); +int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len); int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len); int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); -int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, - void *data, unsigned long len); -int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, - void *data, int offset, unsigned long len); -int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, - gpa_t gpa, unsigned long len); +int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len); +int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, int offset, unsigned long len); +int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + gpa_t gpa, unsigned long len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |