summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c376
1 files changed, 254 insertions, 122 deletions
diff --git a/mm/memory.c b/mm/memory.c
index d10e616d7389..4bcd79619574 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -365,6 +365,8 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *vma, unsigned long floor,
unsigned long ceiling, bool mm_wr_locked)
{
+ struct unlink_vma_file_batch vb;
+
do {
unsigned long addr = vma->vm_start;
struct vm_area_struct *next;
@@ -384,12 +386,15 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
if (mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
- unlink_file_vma(vma);
if (is_vm_hugetlb_page(vma)) {
+ unlink_file_vma(vma);
hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
floor, next ? next->vm_start : ceiling);
} else {
+ unlink_file_vma_batch_init(&vb);
+ unlink_file_vma_batch_add(&vb, vma);
+
/*
* Optimization: gather nearby vmas into one call down
*/
@@ -402,8 +407,9 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
if (mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
- unlink_file_vma(vma);
+ unlink_file_vma_batch_add(&vb, vma);
}
+ unlink_file_vma_batch_final(&vb);
free_pgd_range(tlb, addr, vma->vm_end,
floor, next ? next->vm_start : ceiling);
}
@@ -575,10 +581,13 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
* VM_MIXEDMAP mappings can likewise contain memory with or without "struct
* page" backing, however the difference is that _all_ pages with a struct
* page (that is, those where pfn_valid is true) are refcounted and considered
- * normal pages by the VM. The disadvantage is that pages are refcounted
- * (which can be slower and simply not an option for some PFNMAP users). The
- * advantage is that we don't have to follow the strict linearity rule of
- * PFNMAP mappings in order to support COWable mappings.
+ * normal pages by the VM. The only exception are zeropages, which are
+ * *never* refcounted.
+ *
+ * The disadvantage is that pages are refcounted (which can be slower and
+ * simply not an option for some PFNMAP users). The advantage is that we
+ * don't have to follow the strict linearity rule of PFNMAP mappings in
+ * order to support COWable mappings.
*
*/
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
@@ -616,6 +625,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
if (vma->vm_flags & VM_MIXEDMAP) {
if (!pfn_valid(pfn))
return NULL;
+ if (is_zero_pfn(pfn))
+ return NULL;
goto out;
} else {
unsigned long off;
@@ -641,6 +652,7 @@ check_pfn:
* eg. VDSO mappings can cause them to exist.
*/
out:
+ VM_WARN_ON_ONCE(is_zero_pfn(pfn));
return pfn_to_page(pfn);
}
@@ -918,7 +930,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
*prealloc = NULL;
copy_user_highpage(&new_folio->page, page, addr, src_vma);
__folio_mark_uptodate(new_folio);
- folio_add_new_anon_rmap(new_folio, dst_vma, addr);
+ folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE);
folio_add_lru_vma(new_folio, dst_vma);
rss[MM_ANONPAGES]++;
@@ -1977,10 +1989,48 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
return pte_alloc_map_lock(mm, pmd, addr, ptl);
}
-static int validate_page_before_insert(struct page *page)
+static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma)
+{
+ VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP);
+ /*
+ * Whoever wants to forbid the zeropage after some zeropages
+ * might already have been mapped has to scan the page tables and
+ * bail out on any zeropages. Zeropages in COW mappings can
+ * be unshared using FAULT_FLAG_UNSHARE faults.
+ */
+ if (mm_forbids_zeropage(vma->vm_mm))
+ return false;
+ /* zeropages in COW mappings are common and unproblematic. */
+ if (is_cow_mapping(vma->vm_flags))
+ return true;
+ /* Mappings that do not allow for writable PTEs are unproblematic. */
+ if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE)))
+ return true;
+ /*
+ * Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could
+ * find the shared zeropage and longterm-pin it, which would
+ * be problematic as soon as the zeropage gets replaced by a different
+ * page due to vma->vm_ops->pfn_mkwrite, because what's mapped would
+ * now differ to what GUP looked up. FSDAX is incompatible to
+ * FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see
+ * check_vma_flags).
+ */
+ return vma->vm_ops && vma->vm_ops->pfn_mkwrite &&
+ (vma_is_fsdax(vma) || vma->vm_flags & VM_IO);
+}
+
+static int validate_page_before_insert(struct vm_area_struct *vma,
+ struct page *page)
{
struct folio *folio = page_folio(page);
+ if (!folio_ref_count(folio))
+ return -EINVAL;
+ if (unlikely(is_zero_folio(folio))) {
+ if (!vm_mixed_zeropage_allowed(vma))
+ return -EINVAL;
+ return 0;
+ }
if (folio_test_anon(folio) || folio_test_slab(folio) ||
page_has_type(page))
return -EINVAL;
@@ -1992,24 +2042,23 @@ static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
unsigned long addr, struct page *page, pgprot_t prot)
{
struct folio *folio = page_folio(page);
+ pte_t pteval;
if (!pte_none(ptep_get(pte)))
return -EBUSY;
/* Ok, finally just insert the thing.. */
- folio_get(folio);
- inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
- folio_add_file_rmap_pte(folio, page, vma);
- set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot));
+ pteval = mk_pte(page, prot);
+ if (unlikely(is_zero_folio(folio))) {
+ pteval = pte_mkspecial(pteval);
+ } else {
+ folio_get(folio);
+ inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
+ folio_add_file_rmap_pte(folio, page, vma);
+ }
+ set_pte_at(vma->vm_mm, addr, pte, pteval);
return 0;
}
-/*
- * This is the old fallback for page remapping.
- *
- * For historical reasons, it only allows reserved pages. Only
- * old drivers should use this, and they needed to mark their
- * pages reserved for the old functions anyway.
- */
static int insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page, pgprot_t prot)
{
@@ -2017,7 +2066,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
pte_t *pte;
spinlock_t *ptl;
- retval = validate_page_before_insert(page);
+ retval = validate_page_before_insert(vma, page);
if (retval)
goto out;
retval = -ENOMEM;
@@ -2035,9 +2084,7 @@ static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
{
int err;
- if (!page_count(page))
- return -EINVAL;
- err = validate_page_before_insert(page);
+ err = validate_page_before_insert(vma, page);
if (err)
return err;
return insert_page_into_pte_locked(vma, pte, addr, page, prot);
@@ -2143,7 +2190,8 @@ EXPORT_SYMBOL(vm_insert_pages);
* @page: source kernel page
*
* This allows drivers to insert individual pages they've allocated
- * into a user vma.
+ * into a user vma. The zeropage is supported in some VMAs,
+ * see vm_mixed_zeropage_allowed().
*
* The page has to be a nice clean _individual_ kernel allocation.
* If you allocate a compound page, you need to have marked it as
@@ -2170,8 +2218,6 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
{
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
- if (!page_count(page))
- return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(mmap_read_trylock(vma->vm_mm));
BUG_ON(vma->vm_flags & VM_PFNMAP);
@@ -2189,6 +2235,8 @@ EXPORT_SYMBOL(vm_insert_page);
* @offset: user's requested vm_pgoff
*
* This allows drivers to map range of kernel pages into a user vma.
+ * The zeropage is supported in some VMAs, see
+ * vm_mixed_zeropage_allowed().
*
* Return: 0 on success and error code otherwise.
*/
@@ -2404,8 +2452,11 @@ vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
}
EXPORT_SYMBOL(vmf_insert_pfn);
-static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
+static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite)
{
+ if (unlikely(is_zero_pfn(pfn_t_to_pfn(pfn))) &&
+ (mkwrite || !vm_mixed_zeropage_allowed(vma)))
+ return false;
/* these checks mirror the abort conditions in vm_normal_page */
if (vma->vm_flags & VM_MIXEDMAP)
return true;
@@ -2424,7 +2475,8 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
pgprot_t pgprot = vma->vm_page_prot;
int err;
- BUG_ON(!vm_mixed_ok(vma, pfn));
+ if (!vm_mixed_ok(vma, pfn, mkwrite))
+ return VM_FAULT_SIGBUS;
if (addr < vma->vm_start || addr >= vma->vm_end)
return VM_FAULT_SIGBUS;
@@ -2481,7 +2533,6 @@ vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
{
return __vm_insert_mixed(vma, addr, pfn, true);
}
-EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
/*
* maps a range of physical memory into the requested pages. the old
@@ -2970,10 +3021,8 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src,
unsigned long addr = vmf->address;
if (likely(src)) {
- if (copy_mc_user_highpage(dst, src, addr, vma)) {
- memory_failure_queue(page_to_pfn(src), 0);
+ if (copy_mc_user_highpage(dst, src, addr, vma))
return -EHWPOISON;
- }
return 0;
}
@@ -3172,6 +3221,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio)
pte_t entry;
VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
+ VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte)));
if (folio) {
VM_BUG_ON(folio_test_anon(folio) &&
@@ -3349,7 +3399,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* some TLBs while the old PTE remains in others.
*/
ptep_clear_flush(vma, vmf->address, vmf->pte);
- folio_add_new_anon_rmap(new_folio, vma, vmf->address);
+ folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE);
folio_add_lru_vma(new_folio, vma);
BUG_ON(unshare && pte_write(entry));
set_pte_at(mm, vmf->address, vmf->pte, entry);
@@ -3866,7 +3916,7 @@ static inline bool should_try_to_free_swap(struct folio *folio,
* reference only in case it's likely that we'll be the exlusive user.
*/
return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
- folio_ref_count(folio) == 2;
+ folio_ref_count(folio) == (1 + folio_nr_pages(folio));
}
static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
@@ -3957,6 +4007,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
pte_t pte;
vm_fault_t ret = 0;
void *shadow = NULL;
+ int nr_pages;
+ unsigned long page_idx;
+ unsigned long address;
+ pte_t *ptep;
if (!pte_unmap_same(vmf))
goto out;
@@ -4058,7 +4112,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
/* To provide entry to swap_read_folio() */
folio->swap = entry;
- swap_read_folio(folio, true, NULL);
+ swap_read_folio(folio, NULL);
folio->private = NULL;
}
} else {
@@ -4155,6 +4209,38 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out_nomap;
}
+ nr_pages = 1;
+ page_idx = 0;
+ address = vmf->address;
+ ptep = vmf->pte;
+ if (folio_test_large(folio) && folio_test_swapcache(folio)) {
+ int nr = folio_nr_pages(folio);
+ unsigned long idx = folio_page_idx(folio, page);
+ unsigned long folio_start = address - idx * PAGE_SIZE;
+ unsigned long folio_end = folio_start + nr * PAGE_SIZE;
+ pte_t *folio_ptep;
+ pte_t folio_pte;
+
+ if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start)))
+ goto check_folio;
+ if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end)))
+ goto check_folio;
+
+ folio_ptep = vmf->pte - idx;
+ folio_pte = ptep_get(folio_ptep);
+ if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
+ swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
+ goto check_folio;
+
+ page_idx = idx;
+ address = folio_start;
+ ptep = folio_ptep;
+ nr_pages = nr;
+ entry = folio->swap;
+ page = &folio->page;
+ }
+
+check_folio:
/*
* PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
* must never point at an anonymous page in the swapcache that is
@@ -4214,13 +4300,17 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* We're already holding a reference on the page but haven't mapped it
* yet.
*/
- swap_free(entry);
+ swap_free_nr(entry, nr_pages);
if (should_try_to_free_swap(folio, vma, vmf->flags))
folio_free_swap(folio);
- inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
- dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
+ add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
pte = mk_pte(page, vma->vm_page_prot);
+ if (pte_swp_soft_dirty(vmf->orig_pte))
+ pte = pte_mksoft_dirty(pte);
+ if (pte_swp_uffd_wp(vmf->orig_pte))
+ pte = pte_mkuffd_wp(pte);
/*
* Same logic as in do_wp_page(); however, optimize for pages that are
@@ -4230,32 +4320,43 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
*/
if (!folio_test_ksm(folio) &&
(exclusive || folio_ref_count(folio) == 1)) {
- if (vmf->flags & FAULT_FLAG_WRITE) {
- pte = maybe_mkwrite(pte_mkdirty(pte), vma);
- vmf->flags &= ~FAULT_FLAG_WRITE;
+ if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) &&
+ !pte_needs_soft_dirty_wp(vma, pte)) {
+ pte = pte_mkwrite(pte, vma);
+ if (vmf->flags & FAULT_FLAG_WRITE) {
+ pte = pte_mkdirty(pte);
+ vmf->flags &= ~FAULT_FLAG_WRITE;
+ }
}
rmap_flags |= RMAP_EXCLUSIVE;
}
- flush_icache_page(vma, page);
- if (pte_swp_soft_dirty(vmf->orig_pte))
- pte = pte_mksoft_dirty(pte);
- if (pte_swp_uffd_wp(vmf->orig_pte))
- pte = pte_mkuffd_wp(pte);
- vmf->orig_pte = pte;
+ folio_ref_add(folio, nr_pages - 1);
+ flush_icache_pages(vma, page, nr_pages);
+ vmf->orig_pte = pte_advance_pfn(pte, page_idx);
/* ksm created a completely new copy */
if (unlikely(folio != swapcache && swapcache)) {
- folio_add_new_anon_rmap(folio, vma, vmf->address);
+ folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
+ } else if (!folio_test_anon(folio)) {
+ /*
+ * We currently only expect small !anon folios, which are either
+ * fully exclusive or fully shared. If we ever get large folios
+ * here, we have to be careful.
+ */
+ VM_WARN_ON_ONCE(folio_test_large(folio));
+ VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
+ folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
} else {
- folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
+ folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
rmap_flags);
}
VM_BUG_ON(!folio_test_anon(folio) ||
(pte_write(pte) && !PageAnonExclusive(page)));
- set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
- arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
+ set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
+ arch_do_swap_page_nr(vma->vm_mm, vma, address,
+ pte, pte, nr_pages);
folio_unlock(folio);
if (folio != swapcache && swapcache) {
@@ -4279,7 +4380,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
}
/* No need to invalidate - it was non-present before */
- update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
+ update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
unlock:
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -4384,7 +4485,7 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
goto next;
}
folio_throttle_swaprate(folio, gfp);
- clear_huge_page(&folio->page, vmf->address, 1 << order);
+ folio_zero_user(folio, vmf->address);
return folio;
}
next:
@@ -4410,7 +4511,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
vm_fault_t ret = 0;
int nr_pages = 1;
pte_t entry;
- int i;
/* File mapping without ->vm_ops ? */
if (vma->vm_flags & VM_SHARED)
@@ -4480,8 +4580,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
update_mmu_tlb(vma, addr, vmf->pte);
goto release;
} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
- for (i = 0; i < nr_pages; i++)
- update_mmu_tlb(vma, addr + PAGE_SIZE * i, vmf->pte + i);
+ update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
goto release;
}
@@ -4501,7 +4600,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
#endif
- folio_add_new_anon_rmap(folio, vma, addr);
+ folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
setpte:
if (vmf_orig_pte_uffd_wp(vmf))
@@ -4541,7 +4640,7 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
* lock_page(B)
* lock_page(B)
* pte_alloc_one
- * shrink_page_list
+ * shrink_folio_list
* wait_on_page_writeback(A)
* SetPageWriteback(B)
* unlock_page(B)
@@ -4699,7 +4798,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
/* copy-on-write page */
if (write && !(vma->vm_flags & VM_SHARED)) {
VM_BUG_ON_FOLIO(nr != 1, folio);
- folio_add_new_anon_rmap(folio, vma, addr);
+ folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
} else {
folio_add_file_rmap_ptes(folio, page, nr, vma);
@@ -4737,9 +4836,12 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page;
+ struct folio *folio;
vm_fault_t ret;
bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) &&
!(vma->vm_flags & VM_SHARED);
+ int type, nr_pages;
+ unsigned long addr = vmf->address;
/* Did we COW the page? */
if (is_cow)
@@ -4770,24 +4872,62 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
return VM_FAULT_OOM;
}
+ folio = page_folio(page);
+ nr_pages = folio_nr_pages(folio);
+
+ /*
+ * Using per-page fault to maintain the uffd semantics, and same
+ * approach also applies to non-anonymous-shmem faults to avoid
+ * inflating the RSS of the process.
+ */
+ if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) {
+ nr_pages = 1;
+ } else if (nr_pages > 1) {
+ pgoff_t idx = folio_page_idx(folio, page);
+ /* The page offset of vmf->address within the VMA. */
+ pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
+ /* The index of the entry in the pagetable for fault page. */
+ pgoff_t pte_off = pte_index(vmf->address);
+
+ /*
+ * Fallback to per-page fault in case the folio size in page
+ * cache beyond the VMA limits and PMD pagetable limits.
+ */
+ if (unlikely(vma_off < idx ||
+ vma_off + (nr_pages - idx) > vma_pages(vma) ||
+ pte_off < idx ||
+ pte_off + (nr_pages - idx) > PTRS_PER_PTE)) {
+ nr_pages = 1;
+ } else {
+ /* Now we can set mappings for the whole large folio. */
+ addr = vmf->address - idx * PAGE_SIZE;
+ page = &folio->page;
+ }
+ }
+
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
- vmf->address, &vmf->ptl);
+ addr, &vmf->ptl);
if (!vmf->pte)
return VM_FAULT_NOPAGE;
/* Re-check under ptl */
- if (likely(!vmf_pte_changed(vmf))) {
- struct folio *folio = page_folio(page);
- int type = is_cow ? MM_ANONPAGES : mm_counter_file(folio);
-
- set_pte_range(vmf, folio, page, 1, vmf->address);
- add_mm_counter(vma->vm_mm, type, 1);
- ret = 0;
- } else {
- update_mmu_tlb(vma, vmf->address, vmf->pte);
+ if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) {
+ update_mmu_tlb(vma, addr, vmf->pte);
+ ret = VM_FAULT_NOPAGE;
+ goto unlock;
+ } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
+ update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
ret = VM_FAULT_NOPAGE;
+ goto unlock;
}
+ folio_ref_add(folio, nr_pages - 1);
+ set_pte_range(vmf, folio, page, nr_pages, addr);
+ type = is_cow ? MM_ANONPAGES : mm_counter_file(folio);
+ add_mm_counter(vma->vm_mm, type, nr_pages);
+ ret = 0;
+
+unlock:
pte_unmap_unlock(vmf->pte, vmf->ptl);
return ret;
}
@@ -5067,8 +5207,6 @@ int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
{
struct vm_area_struct *vma = vmf->vma;
- folio_get(folio);
-
/* Record the current PID acceesing VMA */
vma_set_access_pid_bit(vma);
@@ -5205,16 +5343,19 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
else
last_cpupid = folio_last_cpupid(folio);
target_nid = numa_migrate_prep(folio, vmf, vmf->address, nid, &flags);
- if (target_nid == NUMA_NO_NODE) {
- folio_put(folio);
+ if (target_nid == NUMA_NO_NODE)
+ goto out_map;
+ if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
+ flags |= TNF_MIGRATE_FAIL;
goto out_map;
}
+ /* The folio is isolated and isolation code holds a folio reference. */
pte_unmap_unlock(vmf->pte, vmf->ptl);
writable = false;
ignore_writable = true;
/* Migrate to the requested node */
- if (migrate_misplaced_folio(folio, vma, target_nid)) {
+ if (!migrate_misplaced_folio(folio, vma, target_nid)) {
nid = target_nid;
flags |= TNF_MIGRATED;
} else {
@@ -6244,23 +6385,23 @@ EXPORT_SYMBOL(__might_fault);
* cache lines hot.
*/
static inline int process_huge_page(
- unsigned long addr_hint, unsigned int pages_per_huge_page,
+ unsigned long addr_hint, unsigned int nr_pages,
int (*process_subpage)(unsigned long addr, int idx, void *arg),
void *arg)
{
int i, n, base, l, ret;
unsigned long addr = addr_hint &
- ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
+ ~(((unsigned long)nr_pages << PAGE_SHIFT) - 1);
/* Process target subpage last to keep its cache lines hot */
might_sleep();
n = (addr_hint - addr) / PAGE_SIZE;
- if (2 * n <= pages_per_huge_page) {
+ if (2 * n <= nr_pages) {
/* If target subpage in first half of huge page */
base = 0;
l = n;
/* Process subpages at the end of huge page */
- for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
+ for (i = nr_pages - 1; i >= 2 * n; i--) {
cond_resched();
ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
if (ret)
@@ -6268,8 +6409,8 @@ static inline int process_huge_page(
}
} else {
/* If target subpage in second half of huge page */
- base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
- l = pages_per_huge_page - n;
+ base = nr_pages - 2 * (nr_pages - n);
+ l = nr_pages - n;
/* Process subpages at the begin of huge page */
for (i = 0; i < base; i++) {
cond_resched();
@@ -6298,102 +6439,93 @@ static inline int process_huge_page(
return 0;
}
-static void clear_gigantic_page(struct page *page,
- unsigned long addr,
- unsigned int pages_per_huge_page)
+static void clear_gigantic_page(struct folio *folio, unsigned long addr,
+ unsigned int nr_pages)
{
int i;
- struct page *p;
might_sleep();
- for (i = 0; i < pages_per_huge_page; i++) {
- p = nth_page(page, i);
+ for (i = 0; i < nr_pages; i++) {
cond_resched();
- clear_user_highpage(p, addr + i * PAGE_SIZE);
+ clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
}
}
static int clear_subpage(unsigned long addr, int idx, void *arg)
{
- struct page *page = arg;
+ struct folio *folio = arg;
- clear_user_highpage(nth_page(page, idx), addr);
+ clear_user_highpage(folio_page(folio, idx), addr);
return 0;
}
-void clear_huge_page(struct page *page,
- unsigned long addr_hint, unsigned int pages_per_huge_page)
+/**
+ * folio_zero_user - Zero a folio which will be mapped to userspace.
+ * @folio: The folio to zero.
+ * @addr_hint: The address will be accessed or the base address if uncelar.
+ */
+void folio_zero_user(struct folio *folio, unsigned long addr_hint)
{
- unsigned long addr = addr_hint &
- ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
-
- if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
- clear_gigantic_page(page, addr, pages_per_huge_page);
- return;
- }
+ unsigned int nr_pages = folio_nr_pages(folio);
- process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
+ if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
+ clear_gigantic_page(folio, addr_hint, nr_pages);
+ else
+ process_huge_page(addr_hint, nr_pages, clear_subpage, folio);
}
static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
- unsigned long addr,
- struct vm_area_struct *vma,
- unsigned int pages_per_huge_page)
+ unsigned long addr,
+ struct vm_area_struct *vma,
+ unsigned int nr_pages)
{
int i;
struct page *dst_page;
struct page *src_page;
- for (i = 0; i < pages_per_huge_page; i++) {
+ for (i = 0; i < nr_pages; i++) {
dst_page = folio_page(dst, i);
src_page = folio_page(src, i);
cond_resched();
if (copy_mc_user_highpage(dst_page, src_page,
- addr + i*PAGE_SIZE, vma)) {
- memory_failure_queue(page_to_pfn(src_page), 0);
+ addr + i*PAGE_SIZE, vma))
return -EHWPOISON;
- }
}
return 0;
}
struct copy_subpage_arg {
- struct page *dst;
- struct page *src;
+ struct folio *dst;
+ struct folio *src;
struct vm_area_struct *vma;
};
static int copy_subpage(unsigned long addr, int idx, void *arg)
{
struct copy_subpage_arg *copy_arg = arg;
- struct page *dst = nth_page(copy_arg->dst, idx);
- struct page *src = nth_page(copy_arg->src, idx);
+ struct page *dst = folio_page(copy_arg->dst, idx);
+ struct page *src = folio_page(copy_arg->src, idx);
- if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma)) {
- memory_failure_queue(page_to_pfn(src), 0);
+ if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma))
return -EHWPOISON;
- }
return 0;
}
int copy_user_large_folio(struct folio *dst, struct folio *src,
unsigned long addr_hint, struct vm_area_struct *vma)
{
- unsigned int pages_per_huge_page = folio_nr_pages(dst);
- unsigned long addr = addr_hint &
- ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
+ unsigned int nr_pages = folio_nr_pages(dst);
struct copy_subpage_arg arg = {
- .dst = &dst->page,
- .src = &src->page,
+ .dst = dst,
+ .src = src,
.vma = vma,
};
- if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES))
- return copy_user_gigantic_page(dst, src, addr, vma,
- pages_per_huge_page);
+ if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
+ return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages);
- return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
+ return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg);
}
long copy_folio_from_user(struct folio *dst_folio,