summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorMiao Xie <miaox@cn.fujitsu.com>2014-10-15 11:18:44 +0800
committerMiao Xie <miaox@cn.fujitsu.com>2014-12-03 10:18:44 +0800
commitb89e1b012c7f81123344058d5f245b844464d30c (patch)
treecffad28619632e95a9a59aa7ec50d774f012222c /fs
parent6de65650758e819d3dfdc621010dcd6117e8d186 (diff)
downloadlinux-b89e1b012c7f81123344058d5f245b844464d30c.tar.gz
linux-b89e1b012c7f81123344058d5f245b844464d30c.tar.bz2
linux-b89e1b012c7f81123344058d5f245b844464d30c.zip
Btrfs, raid56: don't change bbio and raid_map
Because we will reuse bbio and raid_map during the scrub later, it is better that we don't change any variant of bbio and don't free it at the end of IO request. So we introduced similar variants into the raid bio, and don't access those bbio's variants any more. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/raid56.c42
1 files changed, 23 insertions, 19 deletions
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 6a41631cb959..c54b0e64c590 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -58,7 +58,6 @@
*/
#define RBIO_CACHE_READY_BIT 3
-
#define RBIO_CACHE_SIZE 1024
struct btrfs_raid_bio {
@@ -146,6 +145,10 @@ struct btrfs_raid_bio {
atomic_t refs;
+
+ atomic_t stripes_pending;
+
+ atomic_t error;
/*
* these are two arrays of pointers. We allocate the
* rbio big enough to hold them both and setup their
@@ -858,13 +861,13 @@ static void raid_write_end_io(struct bio *bio, int err)
bio_put(bio);
- if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+ if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
err = 0;
/* OK, we have read all the stripes we need to. */
- if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+ if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
err = -EIO;
rbio_orig_end_io(rbio, err, 0);
@@ -949,6 +952,8 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
rbio->faila = -1;
rbio->failb = -1;
atomic_set(&rbio->refs, 1);
+ atomic_set(&rbio->error, 0);
+ atomic_set(&rbio->stripes_pending, 0);
/*
* the stripe_pages and bio_pages array point to the extra
@@ -1169,7 +1174,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
spin_unlock_irq(&rbio->bio_list_lock);
- atomic_set(&rbio->bbio->error, 0);
+ atomic_set(&rbio->error, 0);
/*
* now that we've set rmw_locked, run through the
@@ -1245,8 +1250,8 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
}
}
- atomic_set(&bbio->stripes_pending, bio_list_size(&bio_list));
- BUG_ON(atomic_read(&bbio->stripes_pending) == 0);
+ atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
+ BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
while (1) {
bio = bio_list_pop(&bio_list);
@@ -1331,11 +1336,11 @@ static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
if (rbio->faila == -1) {
/* first failure on this rbio */
rbio->faila = failed;
- atomic_inc(&rbio->bbio->error);
+ atomic_inc(&rbio->error);
} else if (rbio->failb == -1) {
/* second failure on this rbio */
rbio->failb = failed;
- atomic_inc(&rbio->bbio->error);
+ atomic_inc(&rbio->error);
} else {
ret = -EIO;
}
@@ -1394,11 +1399,11 @@ static void raid_rmw_end_io(struct bio *bio, int err)
bio_put(bio);
- if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+ if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
err = 0;
- if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+ if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
goto cleanup;
/*
@@ -1439,7 +1444,6 @@ static void async_read_rebuild(struct btrfs_raid_bio *rbio)
static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
{
int bios_to_read = 0;
- struct btrfs_bio *bbio = rbio->bbio;
struct bio_list bio_list;
int ret;
int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
@@ -1455,7 +1459,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
index_rbio_pages(rbio);
- atomic_set(&rbio->bbio->error, 0);
+ atomic_set(&rbio->error, 0);
/*
* build a list of bios to read all the missing parts of this
* stripe
@@ -1503,7 +1507,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
* the bbio may be freed once we submit the last bio. Make sure
* not to touch it after that
*/
- atomic_set(&bbio->stripes_pending, bios_to_read);
+ atomic_set(&rbio->stripes_pending, bios_to_read);
while (1) {
bio = bio_list_pop(&bio_list);
if (!bio)
@@ -1917,10 +1921,10 @@ static void raid_recover_end_io(struct bio *bio, int err)
set_bio_pages_uptodate(bio);
bio_put(bio);
- if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+ if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
- if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+ if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
rbio_orig_end_io(rbio, -EIO, 0);
else
__raid_recover_end_io(rbio);
@@ -1951,7 +1955,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
if (ret)
goto cleanup;
- atomic_set(&rbio->bbio->error, 0);
+ atomic_set(&rbio->error, 0);
/*
* read everything that hasn't failed. Thanks to the
@@ -1960,7 +1964,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
*/
for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
if (rbio->faila == stripe || rbio->failb == stripe) {
- atomic_inc(&rbio->bbio->error);
+ atomic_inc(&rbio->error);
continue;
}
@@ -1990,7 +1994,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
* were up to date, or we might have no bios to read because
* the devices were gone.
*/
- if (atomic_read(&rbio->bbio->error) <= rbio->bbio->max_errors) {
+ if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
__raid_recover_end_io(rbio);
goto out;
} else {
@@ -2002,7 +2006,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
* the bbio may be freed once we submit the last bio. Make sure
* not to touch it after that
*/
- atomic_set(&bbio->stripes_pending, bios_to_read);
+ atomic_set(&rbio->stripes_pending, bios_to_read);
while (1) {
bio = bio_list_pop(&bio_list);
if (!bio)