diff options
author | Yang Shi <shy828301@gmail.com> | 2022-05-19 14:08:50 -0700 |
---|---|---|
committer | akpm <akpm@linux-foundation.org> | 2022-05-19 14:08:50 -0700 |
commit | c791576c60288c89b351ea2d2098f6a872d78fa7 (patch) | |
tree | 3e901ba5f6e94c631960f9cf10fa575fbaee8ff5 | |
parent | 2647d11b9e71d495b2d4985ebaf7a734373a4b80 (diff) | |
download | linux-c791576c60288c89b351ea2d2098f6a872d78fa7.tar.gz linux-c791576c60288c89b351ea2d2098f6a872d78fa7.tar.bz2 linux-c791576c60288c89b351ea2d2098f6a872d78fa7.zip |
mm: khugepaged: introduce khugepaged_enter_vma() helper
The khugepaged_enter_vma_merge() actually does as the same thing as the
khugepaged_enter() section called by shmem_mmap(), so consolidate them
into one helper and rename it to khugepaged_enter_vma().
Link: https://lkml.kernel.org/r/20220510203222.24246-8-shy828301@gmail.com
Signed-off-by: Yang Shi <shy828301@gmail.com>
Acked-by: Vlastmil Babka <vbabka@suse.cz>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Song Liu <song@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | include/linux/khugepaged.h | 8 | ||||
-rw-r--r-- | mm/khugepaged.c | 6 | ||||
-rw-r--r-- | mm/mmap.c | 8 | ||||
-rw-r--r-- | mm/shmem.c | 12 |
4 files changed, 13 insertions, 21 deletions
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index c340f6bb39d6..392d34c3c59a 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -14,8 +14,8 @@ extern bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags); extern void __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); -extern void khugepaged_enter_vma_merge(struct vm_area_struct *vma, - unsigned long vm_flags); +extern void khugepaged_enter_vma(struct vm_area_struct *vma, + unsigned long vm_flags); extern void khugepaged_min_free_kbytes_update(void); #ifdef CONFIG_SHMEM extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); @@ -72,8 +72,8 @@ static inline void khugepaged_enter(struct vm_area_struct *vma, unsigned long vm_flags) { } -static inline void khugepaged_enter_vma_merge(struct vm_area_struct *vma, - unsigned long vm_flags) +static inline void khugepaged_enter_vma(struct vm_area_struct *vma, + unsigned long vm_flags) { } static inline void collapse_pte_mapped_thp(struct mm_struct *mm, diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 9ef626e2c750..16be62d493cd 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -365,7 +365,7 @@ int hugepage_madvise(struct vm_area_struct *vma, * register it here without waiting a page fault that * may not happen any time soon. */ - khugepaged_enter_vma_merge(vma, *vm_flags); + khugepaged_enter_vma(vma, *vm_flags); break; case MADV_NOHUGEPAGE: *vm_flags &= ~VM_HUGEPAGE; @@ -505,8 +505,8 @@ void __khugepaged_enter(struct mm_struct *mm) wake_up_interruptible(&khugepaged_wait); } -void khugepaged_enter_vma_merge(struct vm_area_struct *vma, - unsigned long vm_flags) +void khugepaged_enter_vma(struct vm_area_struct *vma, + unsigned long vm_flags) { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && khugepaged_enabled() && diff --git a/mm/mmap.c b/mm/mmap.c index 7f7d982721b9..4456bf81e11e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1223,7 +1223,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, end, prev->vm_pgoff, NULL, prev); if (err) return NULL; - khugepaged_enter_vma_merge(prev, vm_flags); + khugepaged_enter_vma(prev, vm_flags); return prev; } @@ -1250,7 +1250,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, } if (err) return NULL; - khugepaged_enter_vma_merge(area, vm_flags); + khugepaged_enter_vma(area, vm_flags); return area; } @@ -2450,7 +2450,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) } } anon_vma_unlock_write(vma->anon_vma); - khugepaged_enter_vma_merge(vma, vma->vm_flags); + khugepaged_enter_vma(vma, vma->vm_flags); validate_mm(mm); return error; } @@ -2528,7 +2528,7 @@ int expand_downwards(struct vm_area_struct *vma, } } anon_vma_unlock_write(vma->anon_vma); - khugepaged_enter_vma_merge(vma, vma->vm_flags); + khugepaged_enter_vma(vma, vma->vm_flags); validate_mm(mm); return error; } diff --git a/mm/shmem.c b/mm/shmem.c index 29701be579f8..89f6f4fec3f9 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2232,11 +2232,7 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) file_accessed(file); vma->vm_ops = &shmem_vm_ops; - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < - (vma->vm_end & HPAGE_PMD_MASK)) { - khugepaged_enter(vma, vma->vm_flags); - } + khugepaged_enter_vma(vma, vma->vm_flags); return 0; } @@ -4137,11 +4133,7 @@ int shmem_zero_setup(struct vm_area_struct *vma) vma->vm_file = file; vma->vm_ops = &shmem_vm_ops; - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < - (vma->vm_end & HPAGE_PMD_MASK)) { - khugepaged_enter(vma, vma->vm_flags); - } + khugepaged_enter_vma(vma, vma->vm_flags); return 0; } |