diff options
author | David Howells <dhowells@redhat.com> | 2024-02-23 08:04:33 +0000 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2024-04-29 15:01:42 +0100 |
commit | 5f24162f873f08681804059e6de70d77c3e4cea2 (patch) | |
tree | dfec3afaa1c7f65815dc458700d1fdd7efdd70e2 /fs | |
parent | e67572cd2204894179d89bd7b984072f19313b03 (diff) | |
download | linux-stable-5f24162f873f08681804059e6de70d77c3e4cea2.tar.gz linux-stable-5f24162f873f08681804059e6de70d77c3e4cea2.tar.bz2 linux-stable-5f24162f873f08681804059e6de70d77c3e4cea2.zip |
netfs: Update i_blocks when write committed to pagecache
Update i_blocks when i_size is updated when we finish making a write to the
pagecache to reflect the amount of space we think will be consumed.
This maintains cifs commit dbfdff402d89854126658376cbcb08363194d3cd ("smb3:
update allocation size more accurately on write completion") which would
otherwise be removed by the cifs part of the netfs writeback rewrite.
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: Steve French <sfrench@samba.org>
cc: Shyam Prasad N <nspmangalore@gmail.com>
cc: Rohith Surabattula <rohiths.msft@gmail.com>
cc: linux-cifs@vger.kernel.org
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
Diffstat (limited to 'fs')
-rw-r--r-- | fs/netfs/buffered_write.c | 45 |
1 files changed, 34 insertions, 11 deletions
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index 267b622d923b..f7455a579f21 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -130,6 +130,37 @@ static struct folio *netfs_grab_folio_for_write(struct address_space *mapping, mapping_gfp_mask(mapping)); } +/* + * Update i_size and estimate the update to i_blocks to reflect the additional + * data written into the pagecache until we can find out from the server what + * the values actually are. + */ +static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode, + loff_t i_size, loff_t pos, size_t copied) +{ + blkcnt_t add; + size_t gap; + + if (ctx->ops->update_i_size) { + ctx->ops->update_i_size(inode, pos); + return; + } + + i_size_write(inode, pos); +#if IS_ENABLED(CONFIG_FSCACHE) + fscache_update_cookie(ctx->cache, NULL, &pos); +#endif + + gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1)); + if (copied > gap) { + add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE); + + inode->i_blocks = min_t(blkcnt_t, + DIV_ROUND_UP(pos, SECTOR_SIZE), + inode->i_blocks + add); + } +} + /** * netfs_perform_write - Copy data into the pagecache. * @iocb: The operation parameters @@ -351,18 +382,10 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, trace_netfs_folio(folio, trace); /* Update the inode size if we moved the EOF marker */ - i_size = i_size_read(inode); pos += copied; - if (pos > i_size) { - if (ctx->ops->update_i_size) { - ctx->ops->update_i_size(inode, pos); - } else { - i_size_write(inode, pos); -#if IS_ENABLED(CONFIG_FSCACHE) - fscache_update_cookie(ctx->cache, NULL, &pos); -#endif - } - } + i_size = i_size_read(inode); + if (pos > i_size) + netfs_update_i_size(ctx, inode, i_size, pos, copied); written += copied; if (likely(!wreq)) { |