summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2009-09-01 12:15:12 -0300
committerGreg Kroah-Hartman <gregkh@suse.de>2009-09-08 20:33:36 -0700
commit64fdbdd67cf0779b79ba763ee3b4d1ca560006d1 (patch)
tree872ffa7d25e9c40d1b78135e2d92b978b884ed4e /arch
parenta6582e7e2ab9b52b331e40fc6d0226ff0707a9f4 (diff)
downloadlinux-stable-64fdbdd67cf0779b79ba763ee3b4d1ca560006d1.tar.gz
linux-stable-64fdbdd67cf0779b79ba763ee3b4d1ca560006d1.tar.bz2
linux-stable-64fdbdd67cf0779b79ba763ee3b4d1ca560006d1.zip
KVM: MMU: handle n_free_mmu_pages > n_alloc_mmu_pages in kvm_mmu_change_mmu_pages
(cherry picked from commit 025dbbf36a7680bffe54d9dcbf0a8bc01a7cbd10) kvm_mmu_change_mmu_pages mishandles the case where n_alloc_mmu_pages is smaller then n_free_mmu_pages, by not checking if the result of the subtraction is negative. Its a valid condition which can happen if a large number of pages has been recently freed. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b13758aed5af..6d66a6ff5443 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1417,24 +1417,25 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
*/
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
{
+ int used_pages;
+
+ used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
+ used_pages = max(0, used_pages);
+
/*
* If we set the number of mmu pages to be smaller be than the
* number of actived pages , we must to free some mmu pages before we
* change the value
*/
- if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
- kvm_nr_mmu_pages) {
- int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
- - kvm->arch.n_free_mmu_pages;
-
- while (n_used_mmu_pages > kvm_nr_mmu_pages) {
+ if (used_pages > kvm_nr_mmu_pages) {
+ while (used_pages > kvm_nr_mmu_pages) {
struct kvm_mmu_page *page;
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_zap_page(kvm, page);
- n_used_mmu_pages--;
+ used_pages--;
}
kvm->arch.n_free_mmu_pages = 0;
}