diff options
author | NeilBrown <neilb@suse.de> | 2011-07-28 11:39:22 +1000 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2011-07-28 11:39:22 +1000 |
commit | 73e92e51b7969ef5477dd28fe2ae4d77675896f4 (patch) | |
tree | 05f3d45d6c686abed79c0e5fac1d0b61ae9f93bd /drivers | |
parent | bc2607f393bd4fb844c1886a02af929ca0372056 (diff) | |
download | linux-73e92e51b7969ef5477dd28fe2ae4d77675896f4.tar.gz linux-73e92e51b7969ef5477dd28fe2ae4d77675896f4.tar.bz2 linux-73e92e51b7969ef5477dd28fe2ae4d77675896f4.zip |
md/raid5. Don't write to known bad block on doubtful devices.
If a device has seen write errors, don't write to any known
bad blocks on that device.
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/raid5.c | 31 |
1 files changed, 30 insertions, 1 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9768a7d67148..3fa3f20dc5f5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -526,6 +526,36 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) atomic_inc(&rdev->nr_pending); rcu_read_unlock(); + /* We have already checked bad blocks for reads. Now + * need to check for writes. + */ + while ((rw & WRITE) && rdev && + test_bit(WriteErrorSeen, &rdev->flags)) { + sector_t first_bad; + int bad_sectors; + int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, + &first_bad, &bad_sectors); + if (!bad) + break; + + if (bad < 0) { + set_bit(BlockedBadBlocks, &rdev->flags); + if (!conf->mddev->external && + conf->mddev->flags) { + /* It is very unlikely, but we might + * still need to write out the + * bad block log - better give it + * a chance*/ + md_check_recovery(conf->mddev); + } + md_wait_for_blocked_rdev(rdev, conf->mddev); + } else { + /* Acknowledged bad block - skip the write */ + rdev_dec_pending(rdev, conf->mddev); + rdev = NULL; + } + } + if (rdev) { if (s->syncing || s->expanding || s->expanded) md_sync_acct(rdev->bdev, STRIPE_SECTORS); @@ -3317,7 +3347,6 @@ finish: ops_run_io(sh, &s); - if (s.dec_preread_active) { /* We delay this until after ops_run_io so that if make_request * is waiting on a flush, it won't continue until the writes |