summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c12
-rw-r--r--mm/mmap.c23
2 files changed, 16 insertions, 19 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 24ba688876d6..4ea89a2e3a83 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -260,6 +260,12 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
struct vm_area_struct *next = vma->vm_next;
unsigned long addr = vma->vm_start;
+ /*
+ * Hide vma from rmap and vmtruncate before freeing pgtables
+ */
+ anon_vma_unlink(vma);
+ unlink_file_vma(vma);
+
if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
floor, next? next->vm_start: ceiling);
@@ -272,6 +278,8 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
HPAGE_SIZE)) {
vma = next;
next = vma->vm_next;
+ anon_vma_unlink(vma);
+ unlink_file_vma(vma);
}
free_pgd_range(tlb, addr, vma->vm_end,
floor, next? next->vm_start: ceiling);
@@ -798,12 +806,12 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
}
lru_add_drain();
- spin_lock(&mm->page_table_lock);
tlb = tlb_gather_mmu(mm, 0);
update_hiwater_rss(mm);
+ spin_lock(&mm->page_table_lock);
end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
- tlb_finish_mmu(tlb, address, end);
spin_unlock(&mm->page_table_lock);
+ tlb_finish_mmu(tlb, address, end);
return end;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index d931d7e49ac9..fa35323a3c5b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -203,14 +203,6 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
{
struct vm_area_struct *next = vma->vm_next;
- /*
- * Hide vma from rmap and vmtruncate before freeing page tables:
- * to be moved into free_pgtables once page_table_lock is lifted
- * from it, but until then lock ordering forbids that move.
- */
- anon_vma_unlink(vma);
- unlink_file_vma(vma);
-
might_sleep();
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
@@ -1679,15 +1671,15 @@ static void unmap_region(struct mm_struct *mm,
unsigned long nr_accounted = 0;
lru_add_drain();
- spin_lock(&mm->page_table_lock);
tlb = tlb_gather_mmu(mm, 0);
update_hiwater_rss(mm);
+ spin_lock(&mm->page_table_lock);
unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
+ spin_unlock(&mm->page_table_lock);
vm_unacct_memory(nr_accounted);
free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
next? next->vm_start: 0);
tlb_finish_mmu(tlb, start, end);
- spin_unlock(&mm->page_table_lock);
}
/*
@@ -1962,23 +1954,20 @@ void exit_mmap(struct mm_struct *mm)
unsigned long end;
lru_add_drain();
-
- spin_lock(&mm->page_table_lock);
-
flush_cache_mm(mm);
tlb = tlb_gather_mmu(mm, 1);
/* Don't update_hiwater_rss(mm) here, do_exit already did */
/* Use -1 here to ensure all VMAs in the mm are unmapped */
+ spin_lock(&mm->page_table_lock);
end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL);
+ spin_unlock(&mm->page_table_lock);
vm_unacct_memory(nr_accounted);
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
tlb_finish_mmu(tlb, 0, end);
- spin_unlock(&mm->page_table_lock);
-
/*
- * Walk the list again, actually closing and freeing it
- * without holding any MM locks.
+ * Walk the list again, actually closing and freeing it,
+ * with preemption enabled, without holding any MM locks.
*/
while (vma)
vma = remove_vma(vma);