summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2023-12-07 09:39:27 +1030
committerDavid Sterba <dsterba@suse.com>2023-12-15 23:01:04 +0100
commit082d5bb9b336d533b7b968f4f8712e7755a9876a (patch)
tree458c3d9216a955d8247df19648c49b43b7cc83b5 /fs/btrfs/disk-io.c
parent09e6cef19c9fc0e10547135476865b5272aa0406 (diff)
downloadlinux-082d5bb9b336d533b7b968f4f8712e7755a9876a.tar.gz
linux-082d5bb9b336d533b7b968f4f8712e7755a9876a.tar.bz2
linux-082d5bb9b336d533b7b968f4f8712e7755a9876a.zip
btrfs: migrate extent_buffer::pages[] to folio
For now extent_buffer::pages[] are still only accepting single page pointer, thus we can migrate to folios pretty easily. As for single page, page and folio are 1:1 mapped, including their page flags. This patch would just do the conversion from struct page to struct folio, providing the first step to higher order folio in the future. This conversion is pretty simple: - extent_buffer::pages[] -> extent_buffer::folios[] - page_address(eb->pages[i]) -> folio_address(eb->pages[i]) - eb->pages[i] -> folio_page(eb->folios[i], 0) There would be more specific cleanups preparing for the incoming higher order folio support. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1b6afff66c32..74ccf43d47bc 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -89,7 +89,7 @@ static void csum_tree_block(struct extent_buffer *buf, u8 *result)
first_page_part = fs_info->nodesize;
num_pages = 1;
} else {
- kaddr = page_address(buf->pages[0]);
+ kaddr = folio_address(buf->folios[0]);
first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
num_pages = num_extent_pages(buf);
}
@@ -98,7 +98,7 @@ static void csum_tree_block(struct extent_buffer *buf, u8 *result)
first_page_part - BTRFS_CSUM_SIZE);
for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
- kaddr = page_address(buf->pages[i]);
+ kaddr = folio_address(buf->folios[i]);
crypto_shash_update(shash, kaddr, PAGE_SIZE);
}
memset(result, 0, BTRFS_CSUM_SIZE);
@@ -184,13 +184,14 @@ static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb,
return -EROFS;
for (i = 0; i < num_pages; i++) {
- struct page *p = eb->pages[i];
- u64 start = max_t(u64, eb->start, page_offset(p));
- u64 end = min_t(u64, eb->start + eb->len, page_offset(p) + PAGE_SIZE);
+ u64 start = max_t(u64, eb->start, folio_pos(eb->folios[i]));
+ u64 end = min_t(u64, eb->start + eb->len,
+ folio_pos(eb->folios[i]) + PAGE_SIZE);
u32 len = end - start;
ret = btrfs_repair_io_failure(fs_info, 0, start, len,
- start, p, offset_in_page(start), mirror_num);
+ start, folio_page(eb->folios[i], 0),
+ offset_in_page(start), mirror_num);
if (ret)
break;
}
@@ -277,8 +278,8 @@ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
if (WARN_ON_ONCE(found_start != eb->start))
return BLK_STS_IOERR;
- if (WARN_ON(!btrfs_page_test_uptodate(fs_info, eb->pages[0], eb->start,
- eb->len)))
+ if (WARN_ON(!btrfs_page_test_uptodate(fs_info, folio_page(eb->folios[0], 0),
+ eb->start, eb->len)))
return BLK_STS_IOERR;
ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
@@ -387,7 +388,7 @@ int btrfs_validate_extent_buffer(struct extent_buffer *eb,
}
csum_tree_block(eb, result);
- header_csum = page_address(eb->pages[0]) +
+ header_csum = folio_address(eb->folios[0]) +
get_eb_offset_in_page(eb, offsetof(struct btrfs_header, csum));
if (memcmp(result, header_csum, csum_size) != 0) {