diff options
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/mmu.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 704e35f312a4..a2d63247d1bb 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -295,6 +295,13 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) assert_spin_locked(&kvm->mmu_lock); pgd = kvm->arch.pgd + stage2_pgd_index(addr); do { + /* + * Make sure the page table is still active, as another thread + * could have possibly freed the page table, while we released + * the lock. + */ + if (!READ_ONCE(kvm->arch.pgd)) + break; next = stage2_pgd_addr_end(addr, end); if (!stage2_pgd_none(*pgd)) unmap_stage2_puds(kvm, pgd, addr, next); @@ -1170,11 +1177,13 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) * large. Otherwise, we may see kernel panics with * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, * CONFIG_LOCKDEP. Additionally, holding the lock too long - * will also starve other vCPUs. + * will also starve other vCPUs. We have to also make sure + * that the page tables are not freed while we released + * the lock. */ - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) - cond_resched_lock(&kvm->mmu_lock); - + cond_resched_lock(&kvm->mmu_lock); + if (!READ_ONCE(kvm->arch.pgd)) + break; next = stage2_pgd_addr_end(addr, end); if (stage2_pgd_present(*pgd)) stage2_wp_puds(pgd, addr, next); |