diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-08-16 16:11:56 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-08-21 14:28:44 -0700 |
commit | de53c05f2ae3d47d30db58e9c4e54e3bbc868377 (patch) | |
tree | 6ffd037a95b93907bb1d8e01e3a2d995f545b061 /mm | |
parent | 9c5ccf2db04b8d7c3df363fdd4856c2b79ab2c6a (diff) | |
download | linux-de53c05f2ae3d47d30db58e9c4e54e3bbc868377.tar.gz linux-de53c05f2ae3d47d30db58e9c4e54e3bbc868377.tar.bz2 linux-de53c05f2ae3d47d30db58e9c4e54e3bbc868377.zip |
mm: add large_rmappable page flag
Stored in the first tail page's flags, this flag replaces the destructor.
That removes the last of the destructors, so remove all references to
folio_dtor and compound_dtor.
Link: https://lkml.kernel.org/r/20230816151201.3655946-9-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Yanteng Si <siyanteng@loongson.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 4 | ||||
-rw-r--r-- | mm/internal.h | 1 | ||||
-rw-r--r-- | mm/page_alloc.c | 7 |
3 files changed, 3 insertions, 9 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 5817bf77f1f0..2fe1ea187b6b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -581,7 +581,7 @@ void folio_prep_large_rmappable(struct folio *folio) { VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio); INIT_LIST_HEAD(&folio->_deferred_list); - folio_set_compound_dtor(folio, TRANSHUGE_PAGE_DTOR); + folio_set_large_rmappable(folio); } static inline bool is_transparent_hugepage(struct page *page) @@ -593,7 +593,7 @@ static inline bool is_transparent_hugepage(struct page *page) folio = page_folio(page); return is_huge_zero_page(&folio->page) || - folio->_folio_dtor == TRANSHUGE_PAGE_DTOR; + folio_test_large_rmappable(folio); } static unsigned long __thp_get_unmapped_area(struct file *filp, diff --git a/mm/internal.h b/mm/internal.h index 30bbfcacc909..5c0daea731f3 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -419,7 +419,6 @@ static inline void prep_compound_head(struct page *page, unsigned int order) { struct folio *folio = (struct folio *)page; - folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR); folio_set_order(folio, order); atomic_set(&folio->_entire_mapcount, -1); atomic_set(&folio->_nr_pages_mapped, 0); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d96dc6a3077a..452459836b71 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -572,9 +572,6 @@ static inline void free_the_page(struct page *page, unsigned int order) * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded * in bit 0 of page->compound_head. The rest of bits is pointer to head page. * - * The first tail page's ->compound_dtor describes how to destroy the - * compound page. - * * The first tail page's ->compound_order holds the order of allocation. * This usage means that zero-order pages may not be compound. */ @@ -593,14 +590,12 @@ void prep_compound_page(struct page *page, unsigned int order) void destroy_large_folio(struct folio *folio) { - enum compound_dtor_id dtor = folio->_folio_dtor; - if (folio_test_hugetlb(folio)) { free_huge_folio(folio); return; } - if (folio_test_transhuge(folio) && dtor == TRANSHUGE_PAGE_DTOR) + if (folio_test_large_rmappable(folio)) folio_undo_large_rmappable(folio); mem_cgroup_uncharge(folio); |