summaryrefslogtreecommitdiffstats
path: root/mm/userfaultfd.c
diff options
context:
space:
mode:
authorVishal Moola (Oracle) <vishal.moola@gmail.com>2022-11-01 10:53:24 -0700
committerAndrew Morton <akpm@linux-foundation.org>2022-12-11 18:12:13 -0800
commit28965f0f8be62e1ed8296fe0240b5d5dc064b681 (patch)
treefda658fdc4bfe705bd9c9d6509548ebca74e7dbc /mm/userfaultfd.c
parent063aaad792eef49a11d7575dc9914b43c0fa3792 (diff)
downloadlinux-stable-28965f0f8be62e1ed8296fe0240b5d5dc064b681.tar.gz
linux-stable-28965f0f8be62e1ed8296fe0240b5d5dc064b681.tar.bz2
linux-stable-28965f0f8be62e1ed8296fe0240b5d5dc064b681.zip
userfaultfd: replace lru_cache functions with folio_add functions
Replaces lru_cache_add() and lru_cache_add_inactive_or_unevictable() with folio_add_lru() and folio_add_lru_vma(). This is in preparation for the removal of lru_cache_add(). Link: https://lkml.kernel.org/r/20221101175326.13265-4-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Miklos Szeredi <mszeredi@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/userfaultfd.c')
-rw-r--r--mm/userfaultfd.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 650ab6cfd5f4..b7a9479bece2 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -66,6 +66,7 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
bool vm_shared = dst_vma->vm_flags & VM_SHARED;
bool page_in_cache = page_mapping(page);
spinlock_t *ptl;
+ struct folio *folio;
struct inode *inode;
pgoff_t offset, max_off;
@@ -113,14 +114,15 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
if (!pte_none_mostly(*dst_pte))
goto out_unlock;
+ folio = page_folio(page);
if (page_in_cache) {
/* Usually, cache pages are already added to LRU */
if (newly_allocated)
- lru_cache_add(page);
+ folio_add_lru(folio);
page_add_file_rmap(page, dst_vma, false);
} else {
page_add_new_anon_rmap(page, dst_vma, dst_addr);
- lru_cache_add_inactive_or_unevictable(page, dst_vma);
+ folio_add_lru_vma(folio, dst_vma);
}
/*