diff options
Diffstat (limited to 'mm/damon/vaddr.c')
-rw-r--r-- | mm/damon/vaddr.c | 68 |
1 files changed, 33 insertions, 35 deletions
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 15f03df66db6..1fec16d7263e 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -335,9 +335,9 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm, { bool referenced = false; pte_t entry = huge_ptep_get(pte); - struct page *page = pte_page(entry); + struct folio *folio = pfn_folio(pte_pfn(entry)); - get_page(page); + folio_get(folio); if (pte_young(entry)) { referenced = true; @@ -352,10 +352,10 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm, #endif /* CONFIG_MMU_NOTIFIER */ if (referenced) - set_page_young(page); + folio_set_young(folio); - set_page_idle(page); - put_page(page); + folio_set_idle(folio); + folio_put(folio); } static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask, @@ -422,7 +422,8 @@ static void damon_va_prepare_access_checks(struct damon_ctx *ctx) } struct damon_young_walk_private { - unsigned long *page_sz; + /* size of the folio for the access checked virtual memory address */ + unsigned long *folio_sz; bool young; }; @@ -431,7 +432,7 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, { pte_t *pte; spinlock_t *ptl; - struct page *page; + struct folio *folio; struct damon_young_walk_private *priv = walk->private; #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -446,16 +447,15 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, spin_unlock(ptl); goto regular_page; } - page = damon_get_page(pmd_pfn(*pmd)); - if (!page) + folio = damon_get_folio(pmd_pfn(*pmd)); + if (!folio) goto huge_out; - if (pmd_young(*pmd) || !page_is_idle(page) || + if (pmd_young(*pmd) || !folio_test_idle(folio) || mmu_notifier_test_young(walk->mm, - addr)) { - *priv->page_sz = HPAGE_PMD_SIZE; + addr)) priv->young = true; - } - put_page(page); + *priv->folio_sz = HPAGE_PMD_SIZE; + folio_put(folio); huge_out: spin_unlock(ptl); return 0; @@ -469,15 +469,14 @@ regular_page: pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); if (!pte_present(*pte)) goto out; - page = damon_get_page(pte_pfn(*pte)); - if (!page) + folio = damon_get_folio(pte_pfn(*pte)); + if (!folio) goto out; - if (pte_young(*pte) || !page_is_idle(page) || - mmu_notifier_test_young(walk->mm, addr)) { - *priv->page_sz = PAGE_SIZE; + if (pte_young(*pte) || !folio_test_idle(folio) || + mmu_notifier_test_young(walk->mm, addr)) priv->young = true; - } - put_page(page); + *priv->folio_sz = folio_size(folio); + folio_put(folio); out: pte_unmap_unlock(pte, ptl); return 0; @@ -490,7 +489,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask, { struct damon_young_walk_private *priv = walk->private; struct hstate *h = hstate_vma(walk->vma); - struct page *page; + struct folio *folio; spinlock_t *ptl; pte_t entry; @@ -499,16 +498,15 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask, if (!pte_present(entry)) goto out; - page = pte_page(entry); - get_page(page); + folio = pfn_folio(pte_pfn(entry)); + folio_get(folio); - if (pte_young(entry) || !page_is_idle(page) || - mmu_notifier_test_young(walk->mm, addr)) { - *priv->page_sz = huge_page_size(h); + if (pte_young(entry) || !folio_test_idle(folio) || + mmu_notifier_test_young(walk->mm, addr)) priv->young = true; - } + *priv->folio_sz = huge_page_size(h); - put_page(page); + folio_put(folio); out: spin_unlock(ptl); @@ -524,10 +522,10 @@ static const struct mm_walk_ops damon_young_ops = { }; static bool damon_va_young(struct mm_struct *mm, unsigned long addr, - unsigned long *page_sz) + unsigned long *folio_sz) { struct damon_young_walk_private arg = { - .page_sz = page_sz, + .folio_sz = folio_sz, .young = false, }; @@ -547,18 +545,18 @@ static void __damon_va_check_access(struct mm_struct *mm, struct damon_region *r, bool same_target) { static unsigned long last_addr; - static unsigned long last_page_sz = PAGE_SIZE; + static unsigned long last_folio_sz = PAGE_SIZE; static bool last_accessed; /* If the region is in the last checked page, reuse the result */ - if (same_target && (ALIGN_DOWN(last_addr, last_page_sz) == - ALIGN_DOWN(r->sampling_addr, last_page_sz))) { + if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) == + ALIGN_DOWN(r->sampling_addr, last_folio_sz))) { if (last_accessed) r->nr_accesses++; return; } - last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz); + last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz); if (last_accessed) r->nr_accesses++; |