From 4aed23a2f8aaaafad0232d3392afcf493c3c3df3 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 29 Jan 2022 15:53:59 -0500 Subject: mm/page_idle: Convert page_idle_clear_pte_refs() to use a folio The PG_idle and PG_young bits are ignored if they're set on tail pages, so ensure we're passing a folio around. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- mm/page_idle.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'mm/page_idle.c') diff --git a/mm/page_idle.c b/mm/page_idle.c index 3e05bf1ce825..2427d832f5d6 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -13,6 +13,8 @@ #include #include +#include "internal.h" + #define BITMAP_CHUNK_SIZE sizeof(u64) #define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE) @@ -48,7 +50,8 @@ static bool page_idle_clear_pte_refs_one(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *arg) { - DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0); + struct folio *folio = page_folio(page); + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); bool referenced = false; while (page_vma_mapped_walk(&pvmw)) { @@ -70,19 +73,20 @@ static bool page_idle_clear_pte_refs_one(struct page *page, } if (referenced) { - clear_page_idle(page); + folio_clear_idle(folio); /* * We cleared the referenced bit in a mapping to this page. To * avoid interference with page reclaim, mark it young so that * page_referenced() will return > 0. */ - set_page_young(page); + folio_set_young(folio); } return true; } static void page_idle_clear_pte_refs(struct page *page) { + struct folio *folio = page_folio(page); /* * Since rwc.arg is unused, rwc is effectively immutable, so we * can make it static const to save some cycles and stack. @@ -93,18 +97,17 @@ static void page_idle_clear_pte_refs(struct page *page) }; bool need_lock; - if (!page_mapped(page) || - !page_rmapping(page)) + if (!folio_mapped(folio) || !folio_raw_mapping(folio)) return; - need_lock = !PageAnon(page) || PageKsm(page); - if (need_lock && !trylock_page(page)) + need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); + if (need_lock && !folio_trylock(folio)) return; - rmap_walk(page, (struct rmap_walk_control *)&rwc); + rmap_walk(&folio->page, (struct rmap_walk_control *)&rwc); if (need_lock) - unlock_page(page); + folio_unlock(folio); } static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj, -- cgit v1.2.3