summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
authorChris Mason <clm@fb.com>2019-07-10 12:28:14 -0700
committerDavid Sterba <dsterba@suse.com>2019-11-18 12:46:52 +0100
commit08635bae0b4ceb08fe4c156a11c83baec397d36d (patch)
treef4f5e77f3f7b40e25720ca7f13e8f285404186bb /fs/btrfs/disk-io.c
parente1f60a6580c04d0d2492bb6034e968b8c29c78cf (diff)
downloadlinux-stable-08635bae0b4ceb08fe4c156a11c83baec397d36d.tar.gz
linux-stable-08635bae0b4ceb08fe4c156a11c83baec397d36d.tar.bz2
linux-stable-08635bae0b4ceb08fe4c156a11c83baec397d36d.zip
Btrfs: stop using btrfs_schedule_bio()
btrfs_schedule_bio() hands IO off to a helper thread to do the actual submit_bio() call. This has been used to make sure async crc and compression helpers don't get stuck on IO submission. To maintain good performance, over time the IO submission threads duplicated some IO scheduler characteristics such as high and low priority IOs and they also made some ugly assumptions about request allocation batch sizes. All of this cost at least one extra context switch during IO submission, and doesn't fit well with the modern blkmq IO stack. So, this commit stops using btrfs_schedule_bio(). We may need to adjust the number of async helper threads for crcs and compression, but long term it's a better path. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: Chris Mason <clm@fb.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 307861d0a040..bad83867c519 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -791,8 +791,7 @@ static void run_one_async_done(struct btrfs_work *work)
return;
}
- ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio,
- async->mirror_num, 1);
+ ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num);
if (ret) {
async->bio->bi_status = ret;
bio_endio(async->bio);
@@ -892,12 +891,12 @@ static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
BTRFS_WQ_ENDIO_METADATA);
if (ret)
goto out_w_error;
- ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num);
} else if (!async) {
ret = btree_csum_one_bio(bio);
if (ret)
goto out_w_error;
- ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num);
} else {
/*
* kthread helpers are used to submit writes so that