summaryrefslogtreecommitdiffstats
path: root/mm/internal.h
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-11-17 15:08:59 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-11-17 15:08:59 +0100
commit8bdd75d4a6f9680d77c703b07b8fb7a539a52766 (patch)
tree18075fe1080e73efc3247d6818c0e74e6a96b941 /mm/internal.h
parent0e651c03eabd082f8b8e16cd2da9664740899796 (diff)
parentc1036e4f14d03aba549cdd9b186148d331013056 (diff)
downloadlinux-stable-linux-rolling-lts.tar.gz
linux-stable-linux-rolling-lts.tar.bz2
linux-stable-linux-rolling-lts.zip
Merge v6.6.62linux-rolling-lts
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm/internal.h')
-rw-r--r--mm/internal.h27
1 files changed, 26 insertions, 1 deletions
diff --git a/mm/internal.h b/mm/internal.h
index ef8d787a510c..b30907537801 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -413,7 +413,30 @@ static inline void folio_set_order(struct folio *folio, unsigned int order)
#endif
}
-void folio_undo_large_rmappable(struct folio *folio);
+bool __folio_unqueue_deferred_split(struct folio *folio);
+static inline bool folio_unqueue_deferred_split(struct folio *folio)
+{
+ if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
+ return false;
+
+ /*
+ * At this point, there is no one trying to add the folio to
+ * deferred_list. If folio is not in deferred_list, it's safe
+ * to check without acquiring the split_queue_lock.
+ */
+ if (data_race(list_empty(&folio->_deferred_list)))
+ return false;
+
+ return __folio_unqueue_deferred_split(folio);
+}
+
+static inline struct folio *page_rmappable_folio(struct page *page)
+{
+ struct folio *folio = (struct folio *)page;
+
+ folio_prep_large_rmappable(folio);
+ return folio;
+}
static inline void prep_compound_head(struct page *page, unsigned int order)
{
@@ -423,6 +446,8 @@ static inline void prep_compound_head(struct page *page, unsigned int order)
atomic_set(&folio->_entire_mapcount, -1);
atomic_set(&folio->_nr_pages_mapped, 0);
atomic_set(&folio->_pincount, 0);
+ if (order > 1)
+ INIT_LIST_HEAD(&folio->_deferred_list);
}
static inline void prep_compound_tail(struct page *head, int tail_idx)