summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/compression.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2023-03-14 17:51:10 +0100
committerDavid Sterba <dsterba@suse.com>2023-04-17 18:01:18 +0200
commit43fa4219bcf012385150de299364b5044de6500d (patch)
tree19a4566c6a3e461488bbb22dc9b4ecd8a943c84a /fs/btrfs/compression.c
parent4513cb0c40d79599f72a5d1a6ab2fb279b63500d (diff)
downloadlinux-43fa4219bcf012385150de299364b5044de6500d.tar.gz
linux-43fa4219bcf012385150de299364b5044de6500d.tar.bz2
linux-43fa4219bcf012385150de299364b5044de6500d.zip
btrfs: simplify adding pages in btrfs_add_compressed_bio_pages
btrfs_add_compressed_bio_pages is needlessly complicated. Instead of iterating over the logic disk offset just to add pages to the bio use a simple offset starting at 0, which also removes most of the claiming. Additionally __bio_add_pages already takes care of the assert that the bio is always properly sized, and btrfs_submit_bio called right after asserts that the bio size is non-zero. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/compression.c')
-rw-r--r--fs/btrfs/compression.c34
1 files changed, 7 insertions, 27 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 1487c9413e69..44c4276741ce 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -258,37 +258,17 @@ static void end_compressed_bio_write(struct btrfs_bio *bbio)
static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb)
{
- struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info;
struct bio *bio = &cb->bbio.bio;
- u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
- u64 cur_disk_byte = disk_bytenr;
+ u32 offset = 0;
- while (cur_disk_byte < disk_bytenr + cb->compressed_len) {
- u64 offset = cur_disk_byte - disk_bytenr;
- unsigned int index = offset >> PAGE_SHIFT;
- unsigned int real_size;
- unsigned int added;
- struct page *page = cb->compressed_pages[index];
+ while (offset < cb->compressed_len) {
+ u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE);
- /*
- * We have various limit on the real read size:
- * - page boundary
- * - compressed length boundary
- */
- real_size = min_t(u64, U32_MAX, PAGE_SIZE - offset_in_page(offset));
- real_size = min_t(u64, real_size, cb->compressed_len - offset);
- ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
-
- added = bio_add_page(bio, page, real_size, offset_in_page(offset));
- /*
- * Maximum compressed extent is smaller than bio size limit,
- * thus bio_add_page() should always success.
- */
- ASSERT(added == real_size);
- cur_disk_byte += added;
+ /* Maximum compressed extent is smaller than bio size limit. */
+ __bio_add_page(bio, cb->compressed_pages[offset >> PAGE_SHIFT],
+ len, 0);
+ offset += len;
}
-
- ASSERT(bio->bi_iter.bi_size);
}
/*