summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-02-21 12:19:56 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-21 12:19:56 -0800
commit7b15c27e2f7b6d114770c2922b2c49d2e8f3867c (patch)
tree96be1dfb322e23fca3de86b1e27244c74a6132fc /mm
parent9eef02334505411667a7b51a8f349f8c6c4f3b66 (diff)
parent8cf55f24ce6cf90eb8828421e15e9efcd508bd2c (diff)
downloadlinux-stable-7b15c27e2f7b6d114770c2922b2c49d2e8f3867c.tar.gz
linux-stable-7b15c27e2f7b6d114770c2922b2c49d2e8f3867c.tar.bz2
linux-stable-7b15c27e2f7b6d114770c2922b2c49d2e8f3867c.zip
Merge tag 'core-mm-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull tlb gather updates from Ingo Molnar: "Theses fix MM (soft-)dirty bit management in the procfs code & clean up the TLB gather API" * tag 'core-mm-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/ldt: Use tlb_gather_mmu_fullmm() when freeing LDT page-tables tlb: arch: Remove empty __tlb_remove_tlb_entry() stubs tlb: mmu_gather: Remove start/end arguments from tlb_gather_mmu() tlb: mmu_gather: Introduce tlb_gather_mmu_fullmm() tlb: mmu_gather: Remove unused start/end arguments from tlb_finish_mmu() mm: proc: Invalidate TLB after clearing soft-dirty page state
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c18
-rw-r--r--mm/madvise.c12
-rw-r--r--mm/memory.c8
-rw-r--r--mm/mmap.c8
-rw-r--r--mm/mmu_gather.c31
-rw-r--r--mm/oom_kill.c6
6 files changed, 36 insertions, 47 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4bdb58ab14cb..905a7d549b00 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4008,25 +4008,11 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
- struct mm_struct *mm;
struct mmu_gather tlb;
- unsigned long tlb_start = start;
- unsigned long tlb_end = end;
- /*
- * If shared PMDs were possibly used within this vma range, adjust
- * start/end for worst case tlb flushing.
- * Note that we can not be sure if PMDs are shared until we try to
- * unmap pages. However, we want to make sure TLB flushing covers
- * the largest possible range.
- */
- adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
-
- mm = vma->vm_mm;
-
- tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
+ tlb_gather_mmu(&tlb, vma->vm_mm);
__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
- tlb_finish_mmu(&tlb, tlb_start, tlb_end);
+ tlb_finish_mmu(&tlb);
}
/*
diff --git a/mm/madvise.c b/mm/madvise.c
index 6a660858784b..0938fd3ad228 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -506,9 +506,9 @@ static long madvise_cold(struct vm_area_struct *vma,
return -EINVAL;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
+ tlb_gather_mmu(&tlb, mm);
madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
- tlb_finish_mmu(&tlb, start_addr, end_addr);
+ tlb_finish_mmu(&tlb);
return 0;
}
@@ -558,9 +558,9 @@ static long madvise_pageout(struct vm_area_struct *vma,
return 0;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
+ tlb_gather_mmu(&tlb, mm);
madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
- tlb_finish_mmu(&tlb, start_addr, end_addr);
+ tlb_finish_mmu(&tlb);
return 0;
}
@@ -723,7 +723,7 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
range.start, range.end);
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, range.start, range.end);
+ tlb_gather_mmu(&tlb, mm);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(&range);
@@ -732,7 +732,7 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
&madvise_free_walk_ops, &tlb);
tlb_end_vma(&tlb, vma);
mmu_notifier_invalidate_range_end(&range);
- tlb_finish_mmu(&tlb, range.start, range.end);
+ tlb_finish_mmu(&tlb);
return 0;
}
diff --git a/mm/memory.c b/mm/memory.c
index feff48e1465a..9e8576a83147 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1534,13 +1534,13 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
lru_add_drain();
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
start, start + size);
- tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
+ tlb_gather_mmu(&tlb, vma->vm_mm);
update_hiwater_rss(vma->vm_mm);
mmu_notifier_invalidate_range_start(&range);
for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
unmap_single_vma(&tlb, vma, start, range.end, NULL);
mmu_notifier_invalidate_range_end(&range);
- tlb_finish_mmu(&tlb, start, range.end);
+ tlb_finish_mmu(&tlb);
}
/**
@@ -1561,12 +1561,12 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
lru_add_drain();
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
address, address + size);
- tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
+ tlb_gather_mmu(&tlb, vma->vm_mm);
update_hiwater_rss(vma->vm_mm);
mmu_notifier_invalidate_range_start(&range);
unmap_single_vma(&tlb, vma, address, range.end, details);
mmu_notifier_invalidate_range_end(&range);
- tlb_finish_mmu(&tlb, address, range.end);
+ tlb_finish_mmu(&tlb);
}
/**
diff --git a/mm/mmap.c b/mm/mmap.c
index dc7206032387..90673febce6a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2671,12 +2671,12 @@ static void unmap_region(struct mm_struct *mm,
struct mmu_gather tlb;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, start, end);
+ tlb_gather_mmu(&tlb, mm);
update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end);
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
next ? next->vm_start : USER_PGTABLES_CEILING);
- tlb_finish_mmu(&tlb, start, end);
+ tlb_finish_mmu(&tlb);
}
/*
@@ -3214,12 +3214,12 @@ void exit_mmap(struct mm_struct *mm)
lru_add_drain();
flush_cache_mm(mm);
- tlb_gather_mmu(&tlb, mm, 0, -1);
+ tlb_gather_mmu_fullmm(&tlb, mm);
/* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1);
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
- tlb_finish_mmu(&tlb, 0, -1);
+ tlb_finish_mmu(&tlb);
/*
* Walk the list again, actually closing and freeing it,
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 03c33c93a582..0dc7149b0c61 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -253,21 +253,17 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
* tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
* @tlb: the mmu_gather structure to initialize
* @mm: the mm_struct of the target address space
- * @start: start of the region that will be removed from the page-table
- * @end: end of the region that will be removed from the page-table
+ * @fullmm: @mm is without users and we're going to destroy the full address
+ * space (exit/execve)
*
* Called to initialize an (on-stack) mmu_gather structure for page-table
- * tear-down from @mm. The @start and @end are set to 0 and -1
- * respectively when @mm is without users and we're going to destroy
- * the full address space (exit/execve).
+ * tear-down from @mm.
*/
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ bool fullmm)
{
tlb->mm = mm;
-
- /* Is it from 0 to ~0? */
- tlb->fullmm = !(start | (end+1));
+ tlb->fullmm = fullmm;
#ifndef CONFIG_MMU_GATHER_NO_GATHER
tlb->need_flush_all = 0;
@@ -287,17 +283,24 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
inc_tlb_flush_pending(tlb->mm);
}
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
+{
+ __tlb_gather_mmu(tlb, mm, false);
+}
+
+void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm)
+{
+ __tlb_gather_mmu(tlb, mm, true);
+}
+
/**
* tlb_finish_mmu - finish an mmu_gather structure
* @tlb: the mmu_gather structure to finish
- * @start: start of the region that will be removed from the page-table
- * @end: end of the region that will be removed from the page-table
*
* Called at the end of the shootdown operation to free up any resources that
* were required.
*/
-void tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end)
+void tlb_finish_mmu(struct mmu_gather *tlb)
{
/*
* If there are parallel threads are doing PTE changes on same range
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 04b19b7b5435..c9a33ffe38b7 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -546,15 +546,15 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
vma, mm, vma->vm_start,
vma->vm_end);
- tlb_gather_mmu(&tlb, mm, range.start, range.end);
+ tlb_gather_mmu(&tlb, mm);
if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
- tlb_finish_mmu(&tlb, range.start, range.end);
+ tlb_finish_mmu(&tlb);
ret = false;
continue;
}
unmap_page_range(&tlb, vma, range.start, range.end, NULL);
mmu_notifier_invalidate_range_end(&range);
- tlb_finish_mmu(&tlb, range.start, range.end);
+ tlb_finish_mmu(&tlb);
}
}