summaryrefslogtreecommitdiffstats
path: root/fs/iomap
diff options
context:
space:
mode:
authorRitesh Harjani (IBM) <ritesh.list@gmail.com>2023-07-10 14:12:24 -0700
committerRitesh Harjani (IBM) <ritesh.list@gmail.com>2023-07-25 10:55:56 +0530
commit7f79d85b525b3494ab0d28ba1fd2a01874d0fc8a (patch)
tree4abade11ee8d4d3add238e7c4ce8aab510db80f9 /fs/iomap
parent0af2b37d8e7a3da90c94862abb18c8b2853f81ea (diff)
downloadlinux-7f79d85b525b3494ab0d28ba1fd2a01874d0fc8a.tar.gz
linux-7f79d85b525b3494ab0d28ba1fd2a01874d0fc8a.tar.bz2
linux-7f79d85b525b3494ab0d28ba1fd2a01874d0fc8a.zip
iomap: Refactor iomap_write_delalloc_punch() function out
This patch factors iomap_write_delalloc_punch() function out. This function is resposible for actual punch out operation. The reason for doing this is, to avoid deep indentation when we bring punch-out of individual non-dirty blocks within a dirty folio in a later patch (which adds per-block dirty status handling to iomap) to avoid delalloc block leak. Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/iomap')
-rw-r--r--fs/iomap/buffered-io.c54
1 files changed, 34 insertions, 20 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 864a2358e5d2..baed68e2b03a 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -883,6 +883,33 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
}
EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
+static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
+ loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
+ iomap_punch_t punch)
+{
+ int ret = 0;
+
+ if (!folio_test_dirty(folio))
+ return ret;
+
+ /* if dirty, punch up to offset */
+ if (start_byte > *punch_start_byte) {
+ ret = punch(inode, *punch_start_byte,
+ start_byte - *punch_start_byte);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Make sure the next punch start is correctly bound to
+ * the end of this data range, not the end of the folio.
+ */
+ *punch_start_byte = min_t(loff_t, end_byte,
+ folio_pos(folio) + folio_size(folio));
+
+ return ret;
+}
+
/*
* Scan the data range passed to us for dirty page cache folios. If we find a
* dirty folio, punch out the preceeding range and update the offset from which
@@ -906,6 +933,7 @@ static int iomap_write_delalloc_scan(struct inode *inode,
{
while (start_byte < end_byte) {
struct folio *folio;
+ int ret;
/* grab locked page */
folio = filemap_lock_folio(inode->i_mapping,
@@ -916,26 +944,12 @@ static int iomap_write_delalloc_scan(struct inode *inode,
continue;
}
- /* if dirty, punch up to offset */
- if (folio_test_dirty(folio)) {
- if (start_byte > *punch_start_byte) {
- int error;
-
- error = punch(inode, *punch_start_byte,
- start_byte - *punch_start_byte);
- if (error) {
- folio_unlock(folio);
- folio_put(folio);
- return error;
- }
- }
-
- /*
- * Make sure the next punch start is correctly bound to
- * the end of this data range, not the end of the folio.
- */
- *punch_start_byte = min_t(loff_t, end_byte,
- folio_pos(folio) + folio_size(folio));
+ ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte,
+ start_byte, end_byte, punch);
+ if (ret) {
+ folio_unlock(folio);
+ folio_put(folio);
+ return ret;
}
/* move offset to start of next folio in range */