summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/verity.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-08-14 18:52:08 +0100
committerDavid Sterba <dsterba@suse.com>2023-09-13 18:40:54 +0200
commit06ed09351b67eb1114ae106a87a0ee3ea9adb3db (patch)
tree446f1bdc227effc4435c2fc322d521d03093f333 /fs/btrfs/verity.c
parent5facccc9402301d67d48bef06159b91f7e41efc0 (diff)
downloadlinux-stable-06ed09351b67eb1114ae106a87a0ee3ea9adb3db.tar.gz
linux-stable-06ed09351b67eb1114ae106a87a0ee3ea9adb3db.tar.bz2
linux-stable-06ed09351b67eb1114ae106a87a0ee3ea9adb3db.zip
btrfs: convert btrfs_read_merkle_tree_page() to use a folio
Remove a number of hidden calls to compound_head() by using a folio throughout. Also follow core kernel coding style by adding the folio to the page cache immediately after allocation instead of doing the read first, then adding it to the page cache. This ordering makes subsequent readers block waiting for the first reader instead of duplicating the work only to throw it away when they find out they lost the race. Reviewed-by: Boris Burkov <boris@bur.io> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/verity.c')
-rw-r--r--fs/btrfs/verity.c64
1 files changed, 31 insertions, 33 deletions
diff --git a/fs/btrfs/verity.c b/fs/btrfs/verity.c
index c5ff16f9e9fa..744f4f4d4c68 100644
--- a/fs/btrfs/verity.c
+++ b/fs/btrfs/verity.c
@@ -715,7 +715,7 @@ static struct page *btrfs_read_merkle_tree_page(struct inode *inode,
pgoff_t index,
unsigned long num_ra_pages)
{
- struct page *page;
+ struct folio *folio;
u64 off = (u64)index << PAGE_SHIFT;
loff_t merkle_pos = merkle_file_pos(inode);
int ret;
@@ -726,29 +726,36 @@ static struct page *btrfs_read_merkle_tree_page(struct inode *inode,
return ERR_PTR(-EFBIG);
index += merkle_pos >> PAGE_SHIFT;
again:
- page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
- if (page) {
- if (PageUptodate(page))
- return page;
+ folio = __filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
+ if (!IS_ERR(folio)) {
+ if (folio_test_uptodate(folio))
+ goto out;
- lock_page(page);
- /*
- * We only insert uptodate pages, so !Uptodate has to be
- * an error
- */
- if (!PageUptodate(page)) {
- unlock_page(page);
- put_page(page);
+ folio_lock(folio);
+ /* If it's not uptodate after we have the lock, we got a read error. */
+ if (!folio_test_uptodate(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
return ERR_PTR(-EIO);
}
- unlock_page(page);
- return page;
+ folio_unlock(folio);
+ goto out;
}
- page = __page_cache_alloc(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
- if (!page)
+ folio = filemap_alloc_folio(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS),
+ 0);
+ if (!folio)
return ERR_PTR(-ENOMEM);
+ ret = filemap_add_folio(inode->i_mapping, folio, index, GFP_NOFS);
+ if (ret) {
+ folio_put(folio);
+ /* Did someone else insert a folio here? */
+ if (ret == -EEXIST)
+ goto again;
+ return ERR_PTR(ret);
+ }
+
/*
* Merkle item keys are indexed from byte 0 in the merkle tree.
* They have the form:
@@ -756,28 +763,19 @@ again:
* [ inode objectid, BTRFS_MERKLE_ITEM_KEY, offset in bytes ]
*/
ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY, off,
- page_address(page), PAGE_SIZE, page);
+ folio_address(folio), PAGE_SIZE, &folio->page);
if (ret < 0) {
- put_page(page);
+ folio_put(folio);
return ERR_PTR(ret);
}
if (ret < PAGE_SIZE)
- memzero_page(page, ret, PAGE_SIZE - ret);
+ folio_zero_segment(folio, ret, PAGE_SIZE);
- SetPageUptodate(page);
- ret = add_to_page_cache_lru(page, inode->i_mapping, index, GFP_NOFS);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
- if (!ret) {
- /* Inserted and ready for fsverity */
- unlock_page(page);
- } else {
- put_page(page);
- /* Did someone race us into inserting this page? */
- if (ret == -EEXIST)
- goto again;
- page = ERR_PTR(ret);
- }
- return page;
+out:
+ return folio_file_page(folio, index);
}
/*