diff options
Diffstat (limited to 'mm/internal.h')
-rw-r--r-- | mm/internal.h | 27 |
1 files changed, 26 insertions, 1 deletions
diff --git a/mm/internal.h b/mm/internal.h index ef8d787a510c..b30907537801 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -413,7 +413,30 @@ static inline void folio_set_order(struct folio *folio, unsigned int order) #endif } -void folio_undo_large_rmappable(struct folio *folio); +bool __folio_unqueue_deferred_split(struct folio *folio); +static inline bool folio_unqueue_deferred_split(struct folio *folio) +{ + if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio)) + return false; + + /* + * At this point, there is no one trying to add the folio to + * deferred_list. If folio is not in deferred_list, it's safe + * to check without acquiring the split_queue_lock. + */ + if (data_race(list_empty(&folio->_deferred_list))) + return false; + + return __folio_unqueue_deferred_split(folio); +} + +static inline struct folio *page_rmappable_folio(struct page *page) +{ + struct folio *folio = (struct folio *)page; + + folio_prep_large_rmappable(folio); + return folio; +} static inline void prep_compound_head(struct page *page, unsigned int order) { @@ -423,6 +446,8 @@ static inline void prep_compound_head(struct page *page, unsigned int order) atomic_set(&folio->_entire_mapcount, -1); atomic_set(&folio->_nr_pages_mapped, 0); atomic_set(&folio->_pincount, 0); + if (order > 1) + INIT_LIST_HEAD(&folio->_deferred_list); } static inline void prep_compound_tail(struct page *head, int tail_idx) |