summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/page-writeback.c36
-rw-r--r--mm/page_io.c15
-rw-r--r--mm/readahead.c2
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/secretmem.c2
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/truncate.c42
9 files changed, 54 insertions, 59 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 1752ef1266f3..d2e6a79fe69d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -72,7 +72,7 @@
* Lock ordering:
*
* ->i_mmap_rwsem (truncate_pagecache)
- * ->private_lock (__free_pte->__set_page_dirty_buffers)
+ * ->private_lock (__free_pte->block_dirty_folio)
* ->swap_lock (exclusive_swap_page, others)
* ->i_pages lock
*
@@ -115,7 +115,7 @@
* ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
* ->inode->i_lock (zap_pte_range->set_page_dirty)
- * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
+ * ->private_lock (zap_pte_range->block_dirty_folio)
*
* ->i_mmap_rwsem
* ->tasklist_lock (memory_failure, collect_procs_ao)
@@ -2464,7 +2464,7 @@ static bool filemap_range_uptodate(struct address_space *mapping,
pos -= folio_pos(folio);
}
- return mapping->a_ops->is_partially_uptodate(&folio->page, pos, count);
+ return mapping->a_ops->is_partially_uptodate(folio, pos, count);
}
static int filemap_update_page(struct kiocb *iocb,
@@ -2856,7 +2856,7 @@ static inline loff_t folio_seek_hole_data(struct xa_state *xas,
offset = offset_in_folio(folio, start) & ~(bsz - 1);
do {
- if (ops->is_partially_uptodate(&folio->page, offset, bsz) ==
+ if (ops->is_partially_uptodate(folio, offset, bsz) ==
seek_data)
break;
start = (start + bsz) & ~(bsz - 1);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index f13ed7639941..435c02630593 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2418,13 +2418,13 @@ EXPORT_SYMBOL(folio_write_one);
/*
* For address_spaces which do not use buffers nor write back.
*/
-int __set_page_dirty_no_writeback(struct page *page)
+bool noop_dirty_folio(struct address_space *mapping, struct folio *folio)
{
- if (!PageDirty(page))
- return !TestSetPageDirty(page);
- return 0;
+ if (!folio_test_dirty(folio))
+ return !folio_test_set_dirty(folio);
+ return false;
}
-EXPORT_SYMBOL(__set_page_dirty_no_writeback);
+EXPORT_SYMBOL(noop_dirty_folio);
/*
* Helper function for set_page_dirty family.
@@ -2518,7 +2518,7 @@ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
* This is also sometimes used by filesystems which use buffer_heads when
* a single buffer is being dirtied: we want to set the folio dirty in
* that case, but not all the buffers. This is a "bottom-up" dirtying,
- * whereas __set_page_dirty_buffers() is a "top-down" dirtying.
+ * whereas block_dirty_folio() is a "top-down" dirtying.
*
* The caller must ensure this doesn't race with truncation. Most will
* simply hold the folio lock, but e.g. zap_pte_range() calls with the
@@ -2604,7 +2604,7 @@ EXPORT_SYMBOL(folio_redirty_for_writepage);
* folio_mark_dirty - Mark a folio as being modified.
* @folio: The folio.
*
- * For folios with a mapping this should be done under the page lock
+ * For folios with a mapping this should be done with the folio lock held
* for the benefit of asynchronous memory errors who prefer a consistent
* dirty state. This rule can be broken in some special cases,
* but should be better not to.
@@ -2618,23 +2618,21 @@ bool folio_mark_dirty(struct folio *folio)
if (likely(mapping)) {
/*
* readahead/lru_deactivate_page could remain
- * PG_readahead/PG_reclaim due to race with end_page_writeback
- * About readahead, if the page is written, the flags would be
+ * PG_readahead/PG_reclaim due to race with folio_end_writeback
+ * About readahead, if the folio is written, the flags would be
* reset. So no problem.
- * About lru_deactivate_page, if the page is redirty, the flag
- * will be reset. So no problem. but if the page is used by readahead
- * it will confuse readahead and make it restart the size rampup
- * process. But it's a trivial problem.
+ * About lru_deactivate_page, if the folio is redirtied,
+ * the flag will be reset. So no problem. but if the
+ * folio is used by readahead it will confuse readahead
+ * and make it restart the size rampup process. But it's
+ * a trivial problem.
*/
if (folio_test_reclaim(folio))
folio_clear_reclaim(folio);
- return mapping->a_ops->set_page_dirty(&folio->page);
- }
- if (!folio_test_dirty(folio)) {
- if (!folio_test_set_dirty(folio))
- return true;
+ return mapping->a_ops->dirty_folio(mapping, folio);
}
- return false;
+
+ return noop_dirty_folio(mapping, folio);
}
EXPORT_SYMBOL(folio_mark_dirty);
diff --git a/mm/page_io.c b/mm/page_io.c
index f6296ee25014..b417f000b49e 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -439,16 +439,19 @@ out:
return ret;
}
-int swap_set_page_dirty(struct page *page)
+bool swap_dirty_folio(struct address_space *mapping, struct folio *folio)
{
- struct swap_info_struct *sis = page_swap_info(page);
+ struct swap_info_struct *sis = swp_swap_info(folio_swap_entry(folio));
if (data_race(sis->flags & SWP_FS_OPS)) {
- struct address_space *mapping = sis->swap_file->f_mapping;
+ const struct address_space_operations *aops;
+
+ mapping = sis->swap_file->f_mapping;
+ aops = mapping->a_ops;
- VM_BUG_ON_PAGE(!PageSwapCache(page), page);
- return mapping->a_ops->set_page_dirty(page);
+ VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
+ return aops->dirty_folio(mapping, folio);
} else {
- return __set_page_dirty_no_writeback(page);
+ return noop_dirty_folio(mapping, folio);
}
}
diff --git a/mm/readahead.c b/mm/readahead.c
index 21e5f9161cf2..d3a47546d17d 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -156,7 +156,7 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping,
if (!trylock_page(page))
BUG();
page->mapping = mapping;
- do_invalidatepage(page, 0, PAGE_SIZE);
+ folio_invalidate(page_folio(page), 0, PAGE_SIZE);
page->mapping = NULL;
unlock_page(page);
}
diff --git a/mm/rmap.c b/mm/rmap.c
index 9bdca9308e2f..615b5d323ee2 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -31,8 +31,8 @@
* mm->page_table_lock or pte_lock
* swap_lock (in swap_duplicate, swap_info_get)
* mmlist_lock (in mmput, drain_mmlist and others)
- * mapping->private_lock (in __set_page_dirty_buffers)
- * lock_page_memcg move_lock (in __set_page_dirty_buffers)
+ * mapping->private_lock (in block_dirty_folio)
+ * folio_lock_memcg move_lock (in block_dirty_folio)
* i_pages lock (widely used)
* lruvec->lru_lock (in folio_lruvec_lock_irq)
* inode->i_lock (in set_page_dirty's __mark_inode_dirty)
diff --git a/mm/secretmem.c b/mm/secretmem.c
index 22b310adb53d..098638d3b8a4 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -152,7 +152,7 @@ static void secretmem_freepage(struct page *page)
}
const struct address_space_operations secretmem_aops = {
- .set_page_dirty = __set_page_dirty_no_writeback,
+ .dirty_folio = noop_dirty_folio,
.freepage = secretmem_freepage,
.migratepage = secretmem_migratepage,
.isolate_page = secretmem_isolate_page,
diff --git a/mm/shmem.c b/mm/shmem.c
index 01fd227b6947..529c9ad3e926 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3756,7 +3756,7 @@ static int shmem_error_remove_page(struct address_space *mapping,
const struct address_space_operations shmem_aops = {
.writepage = shmem_writepage,
- .set_page_dirty = __set_page_dirty_no_writeback,
+ .dirty_folio = noop_dirty_folio,
#ifdef CONFIG_TMPFS
.write_begin = shmem_write_begin,
.write_end = shmem_write_end,
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ee67164531c0..013856004825 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -30,7 +30,7 @@
*/
static const struct address_space_operations swap_aops = {
.writepage = swap_writepage,
- .set_page_dirty = swap_set_page_dirty,
+ .dirty_folio = swap_dirty_folio,
#ifdef CONFIG_MIGRATION
.migratepage = migrate_page,
#endif
diff --git a/mm/truncate.c b/mm/truncate.c
index cace6e3e4e8c..ab50d0d59a2a 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -19,8 +19,7 @@
#include <linux/highmem.h>
#include <linux/pagevec.h>
#include <linux/task_io_accounting_ops.h>
-#include <linux/buffer_head.h> /* grr. try_to_release_page,
- do_invalidatepage */
+#include <linux/buffer_head.h> /* grr. try_to_release_page */
#include <linux/shmem_fs.h>
#include <linux/rmap.h>
#include "internal.h"
@@ -138,33 +137,28 @@ static int invalidate_exceptional_entry2(struct address_space *mapping,
}
/**
- * do_invalidatepage - invalidate part or all of a page
- * @page: the page which is affected
+ * folio_invalidate - Invalidate part or all of a folio.
+ * @folio: The folio which is affected.
* @offset: start of the range to invalidate
* @length: length of the range to invalidate
*
- * do_invalidatepage() is called when all or part of the page has become
+ * folio_invalidate() is called when all or part of the folio has become
* invalidated by a truncate operation.
*
- * do_invalidatepage() does not have to release all buffers, but it must
+ * folio_invalidate() does not have to release all buffers, but it must
* ensure that no dirty buffer is left outside @offset and that no I/O
* is underway against any of the blocks which are outside the truncation
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
-void do_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+void folio_invalidate(struct folio *folio, size_t offset, size_t length)
{
- void (*invalidatepage)(struct page *, unsigned int, unsigned int);
-
- invalidatepage = page->mapping->a_ops->invalidatepage;
-#ifdef CONFIG_BLOCK
- if (!invalidatepage)
- invalidatepage = block_invalidatepage;
-#endif
- if (invalidatepage)
- (*invalidatepage)(page, offset, length);
+ const struct address_space_operations *aops = folio->mapping->a_ops;
+
+ if (aops->invalidate_folio)
+ aops->invalidate_folio(folio, offset, length);
}
+EXPORT_SYMBOL_GPL(folio_invalidate);
/*
* If truncate cannot remove the fs-private metadata from the page, the page
@@ -182,7 +176,7 @@ static void truncate_cleanup_folio(struct folio *folio)
unmap_mapping_folio(folio);
if (folio_has_private(folio))
- do_invalidatepage(&folio->page, 0, folio_size(folio));
+ folio_invalidate(folio, 0, folio_size(folio));
/*
* Some filesystems seem to re-dirty the page even after
@@ -243,7 +237,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
folio_zero_range(folio, offset, length);
if (folio_has_private(folio))
- do_invalidatepage(&folio->page, offset, length);
+ folio_invalidate(folio, offset, length);
if (!folio_test_large(folio))
return true;
if (split_huge_page(&folio->page) == 0)
@@ -329,7 +323,7 @@ long invalidate_inode_page(struct page *page)
* mapping is large, it is probably the case that the final pages are the most
* recently touched, and freeing happens in ascending file offset order.
*
- * Note that since ->invalidatepage() accepts range to invalidate
+ * Note that since ->invalidate_folio() accepts range to invalidate
* truncate_inode_pages_range is able to handle cases where lend + 1 is not
* page aligned properly.
*/
@@ -611,13 +605,13 @@ failed:
return 0;
}
-static int do_launder_folio(struct address_space *mapping, struct folio *folio)
+static int folio_launder(struct address_space *mapping, struct folio *folio)
{
if (!folio_test_dirty(folio))
return 0;
- if (folio->mapping != mapping || mapping->a_ops->launder_page == NULL)
+ if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
return 0;
- return mapping->a_ops->launder_page(&folio->page);
+ return mapping->a_ops->launder_folio(folio);
}
/**
@@ -683,7 +677,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
unmap_mapping_folio(folio);
BUG_ON(folio_mapped(folio));
- ret2 = do_launder_folio(mapping, folio);
+ ret2 = folio_launder(mapping, folio);
if (ret2 == 0) {
if (!invalidate_complete_folio2(mapping, folio))
ret2 = -EBUSY;