summaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2010-04-23 17:48:35 +0900
committerAvi Kivity <avi@redhat.com>2010-05-17 12:19:13 +0300
commitd14769377a247d4e7b570592a090474c8a059938 (patch)
treefd7e0de9712a29f99962b789ab3ab70a1338087f /virt/kvm/kvm_main.c
parent039091875ce4629d83db64c055528e7b86337d50 (diff)
downloadlinux-stable-d14769377a247d4e7b570592a090474c8a059938.tar.gz
linux-stable-d14769377a247d4e7b570592a090474c8a059938.tar.bz2
linux-stable-d14769377a247d4e7b570592a090474c8a059938.zip
KVM: Remove test-before-set optimization for dirty bits
As Avi pointed out, testing bit part in mark_page_dirty() was important in the days of shadow paging, but currently EPT and NPT has already become common and the chance of faulting a page more that once per iteration is small. So let's remove the test bit to avoid extra access. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c4
1 files changed, 1 insertions, 3 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6dc940455e8b..9ab1a77941ef 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1192,9 +1192,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
- /* avoid RMW */
- if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
- generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
+ generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
}
}