diff options
-rw-r--r-- | fs/iomap/buffered-io.c | 28 |
1 files changed, 24 insertions, 4 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index cb5aa3cded0e..0708be776740 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1403,6 +1403,10 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, .len = len, .flags = IOMAP_ZERO, }; + struct address_space *mapping = inode->i_mapping; + unsigned int blocksize = i_blocksize(inode); + unsigned int off = pos & (blocksize - 1); + loff_t plen = min_t(loff_t, len, blocksize - off); int ret; bool range_dirty; @@ -1412,12 +1416,28 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, * mapping converts on writeback completion and so must be zeroed. * * The simplest way to deal with this across a range is to flush - * pagecache and process the updated mappings. To avoid an unconditional - * flush, check pagecache state and only flush if dirty and the fs - * returns a mapping that might convert on writeback. + * pagecache and process the updated mappings. To avoid excessive + * flushing on partial eof zeroing, special case it to zero the + * unaligned start portion if already dirty in pagecache. + */ + if (off && + filemap_range_needs_writeback(mapping, pos, pos + plen - 1)) { + iter.len = plen; + while ((ret = iomap_iter(&iter, ops)) > 0) + iter.processed = iomap_zero_iter(&iter, did_zero); + + iter.len = len - (iter.pos - pos); + if (ret || !iter.len) + return ret; + } + + /* + * To avoid an unconditional flush, check pagecache state and only flush + * if dirty and the fs returns a mapping that might convert on + * writeback. */ range_dirty = filemap_range_needs_writeback(inode->i_mapping, - pos, pos + len - 1); + iter.pos, iter.pos + iter.len - 1); while ((ret = iomap_iter(&iter, ops)) > 0) { const struct iomap *srcmap = iomap_iter_srcmap(&iter); |