diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 30 |
1 files changed, 8 insertions, 22 deletions
diff --git a/mm/rmap.c b/mm/rmap.c index c399a0d41b31..307b555024ef 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -569,19 +569,6 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma) } #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH -static void percpu_flush_tlb_batch_pages(void *data) -{ - /* - * All TLB entries are flushed on the assumption that it is - * cheaper to flush all TLBs and let them be refilled than - * flushing individual PFNs. Note that we do not track mm's - * to flush as that might simply be multiple full TLB flushes - * for no gain. - */ - count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); - flush_tlb_local(); -} - /* * Flush TLB entries for recently unmapped pages from remote CPUs. It is * important if a PTE was dirty when it was unmapped that it's flushed @@ -598,15 +585,14 @@ void try_to_unmap_flush(void) cpu = get_cpu(); - trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL); - - if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) - percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask); - - if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) { - smp_call_function_many(&tlb_ubc->cpumask, - percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true); + if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) { + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); + local_flush_tlb(); + trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); } + + if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) + flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL); cpumask_clear(&tlb_ubc->cpumask); tlb_ubc->flush_required = false; tlb_ubc->writable = false; @@ -1555,7 +1541,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, discard: page_remove_rmap(page, PageHuge(page)); - page_cache_release(page); + put_page(page); out_unmap: pte_unmap_unlock(pte, ptl); |