diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2021-11-28 14:53:35 -0500 |
---|---|---|
committer | Matthew Wilcox (Oracle) <willy@infradead.org> | 2022-01-08 00:28:32 -0500 |
commit | 3506659e18a61ae525f3b9b4f5af23b4b149d4db (patch) | |
tree | a6edd50d2dcb142c0e1d2a0c3f5fb8afcaf0d91a /mm/internal.h | |
parent | efe99bba2862aef24f1b05b786f6bf5acb076209 (diff) | |
download | linux-3506659e18a61ae525f3b9b4f5af23b4b149d4db.tar.gz linux-3506659e18a61ae525f3b9b4f5af23b4b149d4db.tar.bz2 linux-3506659e18a61ae525f3b9b4f5af23b4b149d4db.zip |
mm: Add unmap_mapping_folio()
Convert both callers of unmap_mapping_page() to call unmap_mapping_folio()
instead. Also move zap_details from linux/mm.h to mm/memory.c
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Diffstat (limited to 'mm/internal.h')
-rw-r--r-- | mm/internal.h | 4 |
1 files changed, 3 insertions, 1 deletions
diff --git a/mm/internal.h b/mm/internal.h index 3b79a5c9427a..1ca93c6cb18c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -74,6 +74,7 @@ static inline bool can_madv_lru_vma(struct vm_area_struct *vma) return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); } +struct zap_details; void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, @@ -388,6 +389,7 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma); #ifdef CONFIG_MMU +void unmap_mapping_folio(struct folio *folio); extern long populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *locked); extern long faultin_vma_page_range(struct vm_area_struct *vma, @@ -491,8 +493,8 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, } return fpin; } - #else /* !CONFIG_MMU */ +static inline void unmap_mapping_folio(struct folio *folio) { } static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } static inline void vunmap_range_noflush(unsigned long start, unsigned long end) |