summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2023-12-07 09:39:27 +1030
committerDavid Sterba <dsterba@suse.com>2023-12-15 23:01:04 +0100
commit082d5bb9b336d533b7b968f4f8712e7755a9876a (patch)
tree458c3d9216a955d8247df19648c49b43b7cc83b5 /fs/btrfs/extent_io.c
parent09e6cef19c9fc0e10547135476865b5272aa0406 (diff)
downloadlinux-stable-082d5bb9b336d533b7b968f4f8712e7755a9876a.tar.gz
linux-stable-082d5bb9b336d533b7b968f4f8712e7755a9876a.tar.bz2
linux-stable-082d5bb9b336d533b7b968f4f8712e7755a9876a.zip
btrfs: migrate extent_buffer::pages[] to folio
For now extent_buffer::pages[] are still only accepting single page pointer, thus we can migrate to folios pretty easily. As for single page, page and folio are 1:1 mapped, including their page flags. This patch would just do the conversion from struct page to struct folio, providing the first step to higher order folio in the future. This conversion is pretty simple: - extent_buffer::pages[] -> extent_buffer::folios[] - page_address(eb->pages[i]) -> folio_address(eb->pages[i]) - eb->pages[i] -> folio_page(eb->folios[i], 0) There would be more specific cleanups preparing for the incoming higher order folio support. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c125
1 files changed, 73 insertions, 52 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index cecd8939e99a..557b9c65840e 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -712,6 +712,26 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
return 0;
}
+/*
+ * Populate needed folios for the extent buffer.
+ *
+ * For now, the folios populated are always in order 0 (aka, single page).
+ */
+static int alloc_eb_folio_array(struct extent_buffer *eb, gfp_t extra_gfp)
+{
+ struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
+ int num_pages = num_extent_pages(eb);
+ int ret;
+
+ ret = btrfs_alloc_page_array(num_pages, page_array, extra_gfp);
+ if (ret < 0)
+ return ret;
+
+ for (int i = 0; i < num_pages; i++)
+ eb->folios[i] = page_folio(page_array[i]);
+ return 0;
+}
+
static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
struct page *page, u64 disk_bytenr,
unsigned int pg_offset)
@@ -1688,7 +1708,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
bbio->file_offset = eb->start;
if (fs_info->nodesize < PAGE_SIZE) {
- struct page *p = eb->pages[0];
+ struct page *p = folio_page(eb->folios[0], 0);
lock_page(p);
btrfs_subpage_set_writeback(fs_info, p, eb->start, eb->len);
@@ -1702,7 +1722,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
unlock_page(p);
} else {
for (int i = 0; i < num_extent_pages(eb); i++) {
- struct page *p = eb->pages[i];
+ struct page *p = folio_page(eb->folios[i], 0);
lock_page(p);
clear_page_dirty_for_io(p);
@@ -3160,7 +3180,7 @@ static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
- struct page *page = eb->pages[i];
+ struct page *page = folio_page(eb->folios[i], 0);
if (!page)
continue;
@@ -3222,7 +3242,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
*/
set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
- ret = btrfs_alloc_page_array(num_pages, new->pages, 0);
+ ret = alloc_eb_folio_array(new, 0);
if (ret) {
btrfs_release_extent_buffer(new);
return NULL;
@@ -3230,7 +3250,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
for (i = 0; i < num_pages; i++) {
int ret;
- struct page *p = new->pages[i];
+ struct page *p = folio_page(new->folios[i], 0);
ret = attach_extent_buffer_page(new, p, NULL);
if (ret < 0) {
@@ -3258,12 +3278,12 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
return NULL;
num_pages = num_extent_pages(eb);
- ret = btrfs_alloc_page_array(num_pages, eb->pages, 0);
+ ret = alloc_eb_folio_array(eb, 0);
if (ret)
goto err;
for (i = 0; i < num_pages; i++) {
- struct page *p = eb->pages[i];
+ struct page *p = folio_page(eb->folios[i], 0);
ret = attach_extent_buffer_page(eb, p, NULL);
if (ret < 0)
@@ -3277,9 +3297,9 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
return eb;
err:
for (i = 0; i < num_pages; i++) {
- if (eb->pages[i]) {
- detach_extent_buffer_page(eb, eb->pages[i]);
- __free_page(eb->pages[i]);
+ if (eb->folios[i]) {
+ detach_extent_buffer_page(eb, folio_page(eb->folios[i], 0));
+ __free_page(folio_page(eb->folios[i], 0));
}
}
__free_extent_buffer(eb);
@@ -3337,7 +3357,7 @@ static void mark_extent_buffer_accessed(struct extent_buffer *eb,
num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
- struct page *p = eb->pages[i];
+ struct page *p = folio_page(eb->folios[i], 0);
if (p != accessed)
mark_page_accessed(p);
@@ -3480,8 +3500,8 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
/*
- * Return 0 if eb->pages[i] is attached to btree inode successfully.
- * Return >0 if there is already annother extent buffer for the range,
+ * Return 0 if eb->folios[i] is attached to btree inode successfully.
+ * Return >0 if there is already another extent buffer for the range,
* and @found_eb_ret would be updated.
*/
static int attach_eb_page_to_filemap(struct extent_buffer *eb, int i,
@@ -3496,11 +3516,11 @@ static int attach_eb_page_to_filemap(struct extent_buffer *eb, int i,
ASSERT(found_eb_ret);
- /* Caller should ensure the page exists. */
- ASSERT(eb->pages[i]);
+ /* Caller should ensure the folio exists. */
+ ASSERT(eb->folios[i]);
retry:
- ret = filemap_add_folio(mapping, page_folio(eb->pages[i]), index + i,
+ ret = filemap_add_folio(mapping, eb->folios[i], index + i,
GFP_NOFS | __GFP_NOFAIL);
if (!ret)
return 0;
@@ -3518,8 +3538,8 @@ retry:
* We're going to reuse the existing page, can drop our page
* and subpage structure now.
*/
- __free_page(eb->pages[i]);
- eb->pages[i] = folio_page(existing_folio, 0);
+ __free_page(folio_page(eb->folios[i], 0));
+ eb->folios[i] = existing_folio;
} else {
struct extent_buffer *existing_eb;
@@ -3533,8 +3553,8 @@ retry:
return 1;
}
/* The extent buffer no longer exists, we can reuse the folio. */
- __free_page(eb->pages[i]);
- eb->pages[i] = folio_page(existing_folio, 0);
+ __free_page(folio_page(eb->folios[i], 0));
+ eb->folios[i] = existing_folio;
}
return 0;
}
@@ -3603,7 +3623,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
}
/* Allocate all pages first. */
- ret = btrfs_alloc_page_array(num_pages, eb->pages, __GFP_NOFAIL);
+ ret = alloc_eb_folio_array(eb, __GFP_NOFAIL);
if (ret < 0) {
btrfs_free_subpage(prealloc);
goto out;
@@ -3621,11 +3641,11 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
attached++;
/*
- * Only after attach_eb_page_to_filemap(), eb->pages[] is
+ * Only after attach_eb_page_to_filemap(), eb->folios[] is
* reliable, as we may choose to reuse the existing page cache
* and free the allocated page.
*/
- p = eb->pages[i];
+ p = folio_page(eb->folios[i], 0);
spin_lock(&mapping->private_lock);
/* Should not fail, as we have preallocated the memory */
ret = attach_extent_buffer_page(eb, p, prealloc);
@@ -3648,7 +3668,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
* Check if the current page is physically contiguous with previous eb
* page.
*/
- if (i && eb->pages[i - 1] + 1 != p)
+ if (i && folio_page(eb->folios[i - 1], 0) + 1 != p)
page_contig = false;
if (!btrfs_page_test_uptodate(fs_info, p, eb->start, eb->len))
@@ -3666,7 +3686,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
/* All pages are physically contiguous, can skip cross page handling. */
if (page_contig)
- eb->addr = page_address(eb->pages[0]) + offset_in_page(eb->start);
+ eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
again:
ret = radix_tree_preload(GFP_NOFS);
if (ret)
@@ -3695,15 +3715,15 @@ again:
* live buffer and won't free them prematurely.
*/
for (int i = 0; i < num_pages; i++)
- unlock_page(eb->pages[i]);
+ unlock_page(folio_page(eb->folios[i], 0));
return eb;
out:
WARN_ON(!atomic_dec_and_test(&eb->refs));
for (int i = 0; i < attached; i++) {
- ASSERT(eb->pages[i]);
- detach_extent_buffer_page(eb, eb->pages[i]);
- unlock_page(eb->pages[i]);
+ ASSERT(eb->folios[i]);
+ detach_extent_buffer_page(eb, folio_page(eb->folios[i], 0));
+ unlock_page(folio_page(eb->folios[i], 0));
}
/*
* Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
@@ -3822,7 +3842,7 @@ static void btree_clear_page_dirty(struct page *page)
static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
- struct page *page = eb->pages[0];
+ struct page *page = folio_page(eb->folios[0], 0);
bool last;
/* btree_clear_page_dirty() needs page locked */
@@ -3874,7 +3894,7 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
- page = eb->pages[i];
+ page = folio_page(eb->folios[i], 0);
if (!PageDirty(page))
continue;
lock_page(page);
@@ -3913,19 +3933,19 @@ void set_extent_buffer_dirty(struct extent_buffer *eb)
* the above race.
*/
if (subpage)
- lock_page(eb->pages[0]);
+ lock_page(folio_page(eb->folios[0], 0));
for (i = 0; i < num_pages; i++)
- btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
+ btrfs_page_set_dirty(eb->fs_info, folio_page(eb->folios[i], 0),
eb->start, eb->len);
if (subpage)
- unlock_page(eb->pages[0]);
+ unlock_page(folio_page(eb->folios[0], 0));
percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
eb->len,
eb->fs_info->dirty_metadata_batch);
}
#ifdef CONFIG_BTRFS_DEBUG
for (i = 0; i < num_pages; i++)
- ASSERT(PageDirty(eb->pages[i]));
+ ASSERT(PageDirty(folio_page(eb->folios[i], 0)));
#endif
}
@@ -3939,7 +3959,7 @@ void clear_extent_buffer_uptodate(struct extent_buffer *eb)
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
- page = eb->pages[i];
+ page = folio_page(eb->folios[i], 0);
if (!page)
continue;
@@ -3965,7 +3985,7 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb)
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
- page = eb->pages[i];
+ page = folio_page(eb->folios[i], 0);
/*
* This is special handling for metadata subpage, as regular
@@ -4056,11 +4076,12 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
bbio->file_offset = eb->start;
memcpy(&bbio->parent_check, check, sizeof(*check));
if (eb->fs_info->nodesize < PAGE_SIZE) {
- __bio_add_page(&bbio->bio, eb->pages[0], eb->len,
- eb->start - page_offset(eb->pages[0]));
+ __bio_add_page(&bbio->bio, folio_page(eb->folios[0], 0), eb->len,
+ eb->start - folio_pos(eb->folios[0]));
} else {
for (i = 0; i < num_pages; i++)
- __bio_add_page(&bbio->bio, eb->pages[i], PAGE_SIZE, 0);
+ __bio_add_page(&bbio->bio, folio_page(eb->folios[i], 0),
+ PAGE_SIZE, 0);
}
btrfs_submit_bio(bbio, mirror_num);
@@ -4131,7 +4152,7 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
offset = get_eb_offset_in_page(eb, start);
while (len > 0) {
- page = eb->pages[i];
+ page = folio_page(eb->folios[i], 0);
cur = min(len, (PAGE_SIZE - offset));
kaddr = page_address(page);
@@ -4168,7 +4189,7 @@ int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
offset = get_eb_offset_in_page(eb, start);
while (len > 0) {
- page = eb->pages[i];
+ page = folio_page(eb->folios[i], 0);
cur = min(len, (PAGE_SIZE - offset));
kaddr = page_address(page);
@@ -4206,7 +4227,7 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
offset = get_eb_offset_in_page(eb, start);
while (len > 0) {
- page = eb->pages[i];
+ page = folio_page(eb->folios[i], 0);
cur = min(len, (PAGE_SIZE - offset));
@@ -4281,7 +4302,7 @@ static void __write_extent_buffer(const struct extent_buffer *eb,
offset = get_eb_offset_in_page(eb, start);
while (len > 0) {
- page = eb->pages[i];
+ page = folio_page(eb->folios[i], 0);
if (check_uptodate)
assert_eb_page_uptodate(eb, page);
@@ -4319,7 +4340,7 @@ static void memset_extent_buffer(const struct extent_buffer *eb, int c,
unsigned long index = get_eb_page_index(cur);
unsigned int offset = get_eb_offset_in_page(eb, cur);
unsigned int cur_len = min(start + len - cur, PAGE_SIZE - offset);
- struct page *page = eb->pages[index];
+ struct page *page = folio_page(eb->folios[index], 0);
assert_eb_page_uptodate(eb, page);
memset_page(page, offset, c, cur_len);
@@ -4347,7 +4368,7 @@ void copy_extent_buffer_full(const struct extent_buffer *dst,
unsigned long index = get_eb_page_index(cur);
unsigned long offset = get_eb_offset_in_page(src, cur);
unsigned long cur_len = min(src->len, PAGE_SIZE - offset);
- void *addr = page_address(src->pages[index]) + offset;
+ void *addr = folio_address(src->folios[index]) + offset;
write_extent_buffer(dst, addr, cur, cur_len);
@@ -4376,7 +4397,7 @@ void copy_extent_buffer(const struct extent_buffer *dst,
offset = get_eb_offset_in_page(dst, dst_offset);
while (len > 0) {
- page = dst->pages[i];
+ page = folio_page(dst->folios[i], 0);
assert_eb_page_uptodate(dst, page);
cur = min(len, (unsigned long)(PAGE_SIZE - offset));
@@ -4439,7 +4460,7 @@ int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
size_t offset;
eb_bitmap_offset(eb, start, nr, &i, &offset);
- page = eb->pages[i];
+ page = folio_page(eb->folios[i], 0);
assert_eb_page_uptodate(eb, page);
kaddr = page_address(page);
return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
@@ -4451,7 +4472,7 @@ static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long
if (check_eb_range(eb, bytenr, 1))
return NULL;
- return page_address(eb->pages[index]) + get_eb_offset_in_page(eb, bytenr);
+ return folio_address(eb->folios[index]) + get_eb_offset_in_page(eb, bytenr);
}
/*
@@ -4558,7 +4579,7 @@ void memcpy_extent_buffer(const struct extent_buffer *dst,
unsigned long pg_off = get_eb_offset_in_page(dst, cur_src);
unsigned long cur_len = min(src_offset + len - cur_src,
PAGE_SIZE - pg_off);
- void *src_addr = page_address(dst->pages[pg_index]) + pg_off;
+ void *src_addr = folio_address(dst->folios[pg_index]) + pg_off;
const bool use_memmove = areas_overlap(src_offset + cur_off,
dst_offset + cur_off, cur_len);
@@ -4605,8 +4626,8 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
cur = min_t(unsigned long, len, src_off_in_page + 1);
cur = min(cur, dst_off_in_page + 1);
- src_addr = page_address(dst->pages[src_i]) + src_off_in_page -
- cur + 1;
+ src_addr = folio_address(dst->folios[src_i]) + src_off_in_page -
+ cur + 1;
use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
cur);