summaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-02-15 07:36:42 +0100
committerAndrew Morton <akpm@linux-foundation.org>2024-02-23 17:48:36 -0800
commitb1793929b7dc17842a5e4377796b005d3efd61bc (patch)
treecd6784ec220ef5e3121ee6a8c5fe56d6c5ac26c1 /mm/page-writeback.c
parentf946e0d22e22813ef78af000486d1d7332cf14e4 (diff)
downloadlinux-b1793929b7dc17842a5e4377796b005d3efd61bc.tar.gz
linux-b1793929b7dc17842a5e4377796b005d3efd61bc.tar.bz2
linux-b1793929b7dc17842a5e4377796b005d3efd61bc.zip
writeback: factor folio_prepare_writeback() out of write_cache_pages()
Reduce write_cache_pages() by about 30 lines; much of it is commentary, but it all bundles nicely into an obvious function. [hch@lst.de: rename should_writeback_folio to folio_prepare_writeback per Jan] Link: https://lkml.kernel.org/r/20240215063649.2164017-8-hch@lst.de Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Jan Kara <jack@suse.cz> Acked-by: Dave Chinner <dchinner@redhat.com> Cc: Christian Brauner <brauner@kernel.org> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c61
1 files changed, 34 insertions, 27 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 3a1e23bed405..4bc55ce9597d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2360,6 +2360,38 @@ void tag_pages_for_writeback(struct address_space *mapping,
}
EXPORT_SYMBOL(tag_pages_for_writeback);
+static bool folio_prepare_writeback(struct address_space *mapping,
+ struct writeback_control *wbc, struct folio *folio)
+{
+ /*
+ * Folio truncated or invalidated. We can freely skip it then,
+ * even for data integrity operations: the folio has disappeared
+ * concurrently, so there could be no real expectation of this
+ * data integrity operation even if there is now a new, dirty
+ * folio at the same pagecache index.
+ */
+ if (unlikely(folio->mapping != mapping))
+ return false;
+
+ /*
+ * Did somebody else write it for us?
+ */
+ if (!folio_test_dirty(folio))
+ return false;
+
+ if (folio_test_writeback(folio)) {
+ if (wbc->sync_mode == WB_SYNC_NONE)
+ return false;
+ folio_wait_writeback(folio);
+ }
+ BUG_ON(folio_test_writeback(folio));
+
+ if (!folio_clear_dirty_for_io(folio))
+ return false;
+
+ return true;
+}
+
/**
* write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
* @mapping: address space structure to write
@@ -2430,38 +2462,13 @@ int write_cache_pages(struct address_space *mapping,
for (i = 0; i < nr_folios; i++) {
folio = fbatch.folios[i];
folio_lock(folio);
-
- /*
- * Page truncated or invalidated. We can freely skip it
- * then, even for data integrity operations: the page
- * has disappeared concurrently, so there could be no
- * real expectation of this data integrity operation
- * even if there is now a new, dirty page at the same
- * pagecache address.
- */
- if (unlikely(folio->mapping != mapping)) {
-continue_unlock:
+ if (!folio_prepare_writeback(mapping, wbc, folio)) {
folio_unlock(folio);
continue;
}
- if (!folio_test_dirty(folio)) {
- /* someone wrote it for us */
- goto continue_unlock;
- }
-
- if (folio_test_writeback(folio)) {
- if (wbc->sync_mode != WB_SYNC_NONE)
- folio_wait_writeback(folio);
- else
- goto continue_unlock;
- }
-
- BUG_ON(folio_test_writeback(folio));
- if (!folio_clear_dirty_for_io(folio))
- goto continue_unlock;
-
trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
+
error = writepage(folio, wbc, data);
wbc->nr_to_write -= folio_nr_pages(folio);