summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-03-17 09:44:57 +0100
committerIngo Molnar <mingo@kernel.org>2016-03-17 09:44:57 +0100
commit00f526850151e91fdad0896a1436341687ad2582 (patch)
tree67c28260020440ade83d6f4aa003ae4f0c683e67 /arch/x86/kvm/mmu.c
parentcbf8b5a2b649a501758291cb4d4ba1e5711771ba (diff)
parentd89abe2a1f0c92499eedc815e4a9b2881f4959a5 (diff)
downloadlinux-00f526850151e91fdad0896a1436341687ad2582.tar.gz
linux-00f526850151e91fdad0896a1436341687ad2582.tar.bz2
linux-00f526850151e91fdad0896a1436341687ad2582.zip
Merge branch 'x86/cleanups' into x86/urgent
Pull in some merge window leftovers. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1e7a49bfc94f..ddb3291d49c9 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -478,7 +478,7 @@ static bool spte_is_locklessly_modifiable(u64 spte)
static bool spte_has_volatile_bits(u64 spte)
{
/*
- * Always atomicly update spte if it can be updated
+ * Always atomically update spte if it can be updated
* out of mmu-lock, it can ensure dirty bit is not lost,
* also, it can help us to get a stable is_writable_pte()
* to ensure tlb flush is not missed.
@@ -549,7 +549,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
/*
* For the spte updated out of mmu-lock is safe, since
- * we always atomicly update it, see the comments in
+ * we always atomically update it, see the comments in
* spte_has_volatile_bits().
*/
if (spte_is_locklessly_modifiable(old_spte) &&