diff options
author | Qu Wenruo <wqu@suse.com> | 2022-03-11 15:38:45 +0800 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2022-05-16 17:17:30 +0200 |
commit | e430c4287ebdafef961948c31c0b8c18dae7ee5e (patch) | |
tree | e762667581436f7ba0e4dbcf7cca7a21640e77dd /fs/btrfs | |
parent | 8557635ed2b04bd54c26b203a3ae43c0e5d6f5af (diff) | |
download | linux-e430c4287ebdafef961948c31c0b8c18dae7ee5e.tar.gz linux-e430c4287ebdafef961948c31c0b8c18dae7ee5e.tar.bz2 linux-e430c4287ebdafef961948c31c0b8c18dae7ee5e.zip |
btrfs: scrub: cleanup the non-RAID56 branches in scrub_stripe()
Since we have moved all other profiles handling into their own
functions, now the main body of scrub_stripe() is just handling RAID56
profiles.
There is no need to address other profiles in the main loop of
scrub_stripe(), so we can remove those dead branches.
Since we're here, also slightly change the timing of initialization of
variables like @offset, @increment and @logical.
Especially for @logical, we don't really need to initialize it for
btrfs_extent_root()/btrfs_csum_root(), we can use bg->start for that
purpose.
Now those variables are only initialize for RAID56 branches.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/scrub.c | 128 |
1 files changed, 51 insertions, 77 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index b63e9219dd14..cc2991ade2bd 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3501,14 +3501,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, u64 flags; int ret; int slot; - u64 nstripes; struct extent_buffer *l; u64 physical = map->stripes[stripe_index].physical; u64 logical; u64 logic_end; const u64 physical_end = physical + dev_extent_len; u64 generation; - int mirror_num; struct btrfs_key key; u64 increment; u64 offset; @@ -3525,28 +3523,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, int extent_mirror_num; int stop_loop = 0; - offset = 0; - nstripes = div64_u64(dev_extent_len, map->stripe_len); - mirror_num = 1; - increment = map->stripe_len; - if (map->type & BTRFS_BLOCK_GROUP_RAID0) { - offset = map->stripe_len * stripe_index; - increment = map->stripe_len * map->num_stripes; - } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { - int factor = map->num_stripes / map->sub_stripes; - offset = map->stripe_len * (stripe_index / map->sub_stripes); - increment = map->stripe_len * factor; - mirror_num = stripe_index % map->sub_stripes + 1; - } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { - mirror_num = stripe_index % map->num_stripes + 1; - } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { - mirror_num = stripe_index % map->num_stripes + 1; - } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { - get_raid56_logic_offset(physical, stripe_index, map, &offset, - NULL); - increment = map->stripe_len * nr_data_stripes(map); - } - path = btrfs_alloc_path(); if (!path) return -ENOMEM; @@ -3560,20 +3536,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, path->skip_locking = 1; path->reada = READA_FORWARD; - logical = chunk_logical + offset; - if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { - get_raid56_logic_offset(physical_end, stripe_index, - map, &logic_end, NULL); - logic_end += chunk_logical; - } else { - logic_end = logical + increment * nstripes; - } wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); scrub_blocked_if_needed(fs_info); - root = btrfs_extent_root(fs_info, logical); - csum_root = btrfs_csum_root(fs_info, logical); + root = btrfs_extent_root(fs_info, bg->start); + csum_root = btrfs_csum_root(fs_info, bg->start); /* * collect all data csums for the stripe to avoid seeking during @@ -3610,17 +3578,29 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, bg->start, bg->length, scrub_dev, map->stripes[stripe_index].physical, stripe_index + 1); + offset = 0; goto out; } if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { ret = scrub_simple_stripe(sctx, root, csum_root, bg, map, scrub_dev, stripe_index); + offset = map->stripe_len * (stripe_index / map->sub_stripes); goto out; } /* Only RAID56 goes through the old code */ ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK); ret = 0; + + /* Calculate the logical end of the stripe */ + get_raid56_logic_offset(physical_end, stripe_index, + map, &logic_end, NULL); + logic_end += chunk_logical; + + /* Initialize @offset in case we need to go to out: label */ + get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL); + increment = map->stripe_len * nr_data_stripes(map); + while (physical < physical_end) { /* * canceled? @@ -3646,22 +3626,20 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, scrub_blocked_if_needed(fs_info); } - if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { - ret = get_raid56_logic_offset(physical, stripe_index, - map, &logical, - &stripe_logical); - logical += chunk_logical; - if (ret) { - /* it is parity strip */ - stripe_logical += chunk_logical; - stripe_end = stripe_logical + increment; - ret = scrub_raid56_parity(sctx, map, scrub_dev, - stripe_logical, - stripe_end); - if (ret) - goto out; - goto skip; - } + ret = get_raid56_logic_offset(physical, stripe_index, + map, &logical, + &stripe_logical); + logical += chunk_logical; + if (ret) { + /* it is parity strip */ + stripe_logical += chunk_logical; + stripe_end = stripe_logical + increment; + ret = scrub_raid56_parity(sctx, map, scrub_dev, + stripe_logical, + stripe_end); + if (ret) + goto out; + goto skip; } if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) @@ -3779,7 +3757,8 @@ again: extent_physical = extent_logical - logical + physical; extent_dev = scrub_dev; - extent_mirror_num = mirror_num; + /* For RAID56 data stripes, mirror_num is fixed to 1 */ + extent_mirror_num = 1; if (sctx->is_dev_replace) scrub_remap_extent(fs_info, extent_logical, extent_len, &extent_physical, @@ -3810,33 +3789,28 @@ again: if (extent_logical + extent_len < key.objectid + bytes) { - if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { - /* - * loop until we find next data stripe - * or we have finished all stripes. - */ + /* + * loop until we find next data stripe + * or we have finished all stripes. + */ loop: - physical += map->stripe_len; - ret = get_raid56_logic_offset(physical, - stripe_index, map, - &logical, &stripe_logical); - logical += chunk_logical; - - if (ret && physical < physical_end) { - stripe_logical += chunk_logical; - stripe_end = stripe_logical + - increment; - ret = scrub_raid56_parity(sctx, - map, scrub_dev, - stripe_logical, - stripe_end); - if (ret) - goto out; - goto loop; - } - } else { - physical += map->stripe_len; - logical += increment; + physical += map->stripe_len; + ret = get_raid56_logic_offset(physical, + stripe_index, map, + &logical, &stripe_logical); + logical += chunk_logical; + + if (ret && physical < physical_end) { + stripe_logical += chunk_logical; + stripe_end = stripe_logical + + increment; + ret = scrub_raid56_parity(sctx, + map, scrub_dev, + stripe_logical, + stripe_end); + if (ret) + goto out; + goto loop; } if (logical < key.objectid + bytes) { cond_resched(); |