diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-08-16 16:11:51 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-08-21 14:28:43 -0700 |
commit | 454a00c40a21c59e99c526fe8cc57bd029cf8f0e (patch) | |
tree | 7295e10b862852ef94483d0343371daa9c7cbb91 /mm | |
parent | dd6fa0b61814492476463149c91110e529364e82 (diff) | |
download | linux-454a00c40a21c59e99c526fe8cc57bd029cf8f0e.tar.gz linux-454a00c40a21c59e99c526fe8cc57bd029cf8f0e.tar.bz2 linux-454a00c40a21c59e99c526fe8cc57bd029cf8f0e.zip |
mm: convert free_huge_page() to free_huge_folio()
Pass a folio instead of the head page to save a few instructions. Update
the documentation, at least in English.
Link: https://lkml.kernel.org/r/20230816151201.3655946-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Yanteng Si <siyanteng@loongson.cn>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 48 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 |
2 files changed, 24 insertions, 26 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5f498e8025cc..6a3c80026ab3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1706,10 +1706,10 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio, zeroed = folio_put_testzero(folio); if (unlikely(!zeroed)) /* - * It is VERY unlikely soneone else has taken a ref on - * the page. In this case, we simply return as the - * hugetlb destructor (free_huge_page) will be called - * when this other ref is dropped. + * It is VERY unlikely soneone else has taken a ref + * on the folio. In this case, we simply return as + * free_huge_folio() will be called when this other ref + * is dropped. */ return; @@ -1875,13 +1875,12 @@ struct hstate *size_to_hstate(unsigned long size) return NULL; } -void free_huge_page(struct page *page) +void free_huge_folio(struct folio *folio) { /* * Can't pass hstate in here because it is called from the * compound page destructor. */ - struct folio *folio = page_folio(page); struct hstate *h = folio_hstate(folio); int nid = folio_nid(folio); struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); @@ -1936,7 +1935,7 @@ void free_huge_page(struct page *page) spin_unlock_irqrestore(&hugetlb_lock, flags); update_and_free_hugetlb_folio(h, folio, true); } else { - arch_clear_hugepage_flags(page); + arch_clear_hugepage_flags(&folio->page); enqueue_hugetlb_folio(h, folio); spin_unlock_irqrestore(&hugetlb_lock, flags); } @@ -2246,7 +2245,7 @@ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node, nodes_allowed, node_alloc_noretry); if (folio) { - free_huge_page(&folio->page); /* free it into the hugepage allocator */ + free_huge_folio(folio); /* free it into the hugepage allocator */ return 1; } } @@ -2429,13 +2428,13 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, * We could have raced with the pool size change. * Double check that and simply deallocate the new page * if we would end up overcommiting the surpluses. Abuse - * temporary page to workaround the nasty free_huge_page + * temporary page to workaround the nasty free_huge_folio * codeflow */ if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { folio_set_hugetlb_temporary(folio); spin_unlock_irq(&hugetlb_lock); - free_huge_page(&folio->page); + free_huge_folio(folio); return NULL; } @@ -2547,8 +2546,7 @@ static int gather_surplus_pages(struct hstate *h, long delta) __must_hold(&hugetlb_lock) { LIST_HEAD(surplus_list); - struct folio *folio; - struct page *page, *tmp; + struct folio *folio, *tmp; int ret; long i; long needed, allocated; @@ -2608,21 +2606,21 @@ retry: ret = 0; /* Free the needed pages to the hugetlb pool */ - list_for_each_entry_safe(page, tmp, &surplus_list, lru) { + list_for_each_entry_safe(folio, tmp, &surplus_list, lru) { if ((--needed) < 0) break; /* Add the page to the hugetlb allocator */ - enqueue_hugetlb_folio(h, page_folio(page)); + enqueue_hugetlb_folio(h, folio); } free: spin_unlock_irq(&hugetlb_lock); /* * Free unnecessary surplus pages to the buddy allocator. - * Pages have no ref count, call free_huge_page directly. + * Pages have no ref count, call free_huge_folio directly. */ - list_for_each_entry_safe(page, tmp, &surplus_list, lru) - free_huge_page(page); + list_for_each_entry_safe(folio, tmp, &surplus_list, lru) + free_huge_folio(folio); spin_lock_irq(&hugetlb_lock); return ret; @@ -2836,11 +2834,11 @@ static long vma_del_reservation(struct hstate *h, * 2) No reservation was in place for the page, so hugetlb_restore_reserve is * not set. However, alloc_hugetlb_folio always updates the reserve map. * - * In case 1, free_huge_page later in the error path will increment the - * global reserve count. But, free_huge_page does not have enough context + * In case 1, free_huge_folio later in the error path will increment the + * global reserve count. But, free_huge_folio does not have enough context * to adjust the reservation map. This case deals primarily with private * mappings. Adjust the reserve map here to be consistent with global - * reserve count adjustments to be made by free_huge_page. Make sure the + * reserve count adjustments to be made by free_huge_folio. Make sure the * reserve map indicates there is a reservation present. * * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio. @@ -2856,7 +2854,7 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, * Rare out of memory condition in reserve map * manipulation. Clear hugetlb_restore_reserve so * that global reserve count will not be incremented - * by free_huge_page. This will make it appear + * by free_huge_folio. This will make it appear * as though the reservation for this folio was * consumed. This may prevent the task from * faulting in the folio at a later time. This @@ -3232,7 +3230,7 @@ static void __init gather_bootmem_prealloc(void) if (prep_compound_gigantic_folio(folio, huge_page_order(h))) { WARN_ON(folio_test_reserved(folio)); prep_new_hugetlb_folio(h, folio, folio_nid(folio)); - free_huge_page(page); /* add to the hugepage allocator */ + free_huge_folio(folio); /* add to the hugepage allocator */ } else { /* VERY unlikely inflated ref count on a tail page */ free_gigantic_folio(folio, huge_page_order(h)); @@ -3264,7 +3262,7 @@ static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) &node_states[N_MEMORY], NULL); if (!folio) break; - free_huge_page(&folio->page); /* free it into the hugepage allocator */ + free_huge_folio(folio); /* free it into the hugepage allocator */ } cond_resched(); } @@ -3542,7 +3540,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, while (count > persistent_huge_pages(h)) { /* * If this allocation races such that we no longer need the - * page, free_huge_page will handle it by freeing the page + * page, free_huge_folio will handle it by freeing the page * and reducing the surplus. */ spin_unlock_irq(&hugetlb_lock); @@ -3658,7 +3656,7 @@ static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio) prep_compound_page(subpage, target_hstate->order); folio_change_private(inner_folio, NULL); prep_new_hugetlb_folio(target_hstate, inner_folio, nid); - free_huge_page(subpage); + free_huge_folio(inner_folio); } mutex_unlock(&target_hstate->resize_lock); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 744848593360..30dc444436cc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -610,7 +610,7 @@ void destroy_large_folio(struct folio *folio) enum compound_dtor_id dtor = folio->_folio_dtor; if (folio_test_hugetlb(folio)) { - free_huge_page(&folio->page); + free_huge_folio(folio); return; } |