summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-mpath.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2017-05-01 18:18:04 -0400
committerMike Snitzer <snitzer@redhat.com>2017-05-01 18:18:04 -0400
commit7e25a7606147bfe29a7421ff2cb332b07d3cee3a (patch)
tree77047d7f4969712f8dc67e72283edc7900eaebf9 /drivers/md/dm-mpath.c
parent9438b3e080beccf6022138ea62192d55cc7dc4ed (diff)
parent390020ad2af9ca04844c4f3b1f299ad8746d84c8 (diff)
downloadlinux-stable-7e25a7606147bfe29a7421ff2cb332b07d3cee3a.tar.gz
linux-stable-7e25a7606147bfe29a7421ff2cb332b07d3cee3a.tar.bz2
linux-stable-7e25a7606147bfe29a7421ff2cb332b07d3cee3a.zip
Merge branch 'dm-4.12' into dm-4.12-post-merge
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r--drivers/md/dm-mpath.c171
1 files changed, 78 insertions, 93 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 2950b145443d..52cd3f1608b3 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -90,7 +90,7 @@ struct multipath {
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
atomic_t pg_init_count; /* Number of times pg_init called */
- unsigned queue_mode;
+ enum dm_queue_mode queue_mode;
struct mutex work_mutex;
struct work_struct trigger_event;
@@ -111,7 +111,8 @@ typedef int (*action_fn) (struct pgpath *pgpath);
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void trigger_event(struct work_struct *work);
-static void activate_path(struct work_struct *work);
+static void activate_or_offline_path(struct pgpath *pgpath);
+static void activate_path_work(struct work_struct *work);
static void process_queued_bios(struct work_struct *work);
/*-----------------------------------------------
@@ -136,7 +137,7 @@ static struct pgpath *alloc_pgpath(void)
if (pgpath) {
pgpath->is_active = true;
- INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
+ INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
}
return pgpath;
@@ -297,6 +298,8 @@ static int __pg_init_all_paths(struct multipath *m)
struct pgpath *pgpath;
unsigned long pg_init_delay = 0;
+ lockdep_assert_held(&m->lock);
+
if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
return 0;
@@ -321,13 +324,16 @@ static int __pg_init_all_paths(struct multipath *m)
return atomic_read(&m->pg_init_in_progress);
}
-static void pg_init_all_paths(struct multipath *m)
+static int pg_init_all_paths(struct multipath *m)
{
+ int ret;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- __pg_init_all_paths(m);
+ ret = __pg_init_all_paths(m);
spin_unlock_irqrestore(&m->lock, flags);
+
+ return ret;
}
static void __switch_pg(struct multipath *m, struct priority_group *pg)
@@ -436,45 +442,21 @@ failed:
}
/*
- * Check whether bios must be queued in the device-mapper core rather
- * than here in the target.
- *
- * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
- * same value then we are not between multipath_presuspend()
- * and multipath_resume() calls and we have no need to check
- * for the DMF_NOFLUSH_SUSPENDING flag.
+ * dm_report_EIO() is a macro instead of a function to make pr_debug()
+ * report the function name and line number of the function from which
+ * it has been invoked.
*/
-static bool __must_push_back(struct multipath *m)
-{
- return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
- test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
- dm_noflush_suspending(m->ti));
-}
-
-static bool must_push_back_rq(struct multipath *m)
-{
- bool r;
- unsigned long flags;
-
- spin_lock_irqsave(&m->lock, flags);
- r = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
- __must_push_back(m));
- spin_unlock_irqrestore(&m->lock, flags);
-
- return r;
-}
-
-static bool must_push_back_bio(struct multipath *m)
-{
- bool r;
- unsigned long flags;
-
- spin_lock_irqsave(&m->lock, flags);
- r = __must_push_back(m);
- spin_unlock_irqrestore(&m->lock, flags);
-
- return r;
-}
+#define dm_report_EIO(m) \
+({ \
+ struct mapped_device *md = dm_table_get_md((m)->ti->table); \
+ \
+ pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
+ dm_device_name(md), \
+ test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
+ dm_noflush_suspending((m)->ti)); \
+ -EIO; \
+})
/*
* Map cloned requests (request-based multipath)
@@ -484,11 +466,11 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
struct request **__clone)
{
struct multipath *m = ti->private;
- int r = DM_MAPIO_REQUEUE;
size_t nr_bytes = blk_rq_bytes(rq);
struct pgpath *pgpath;
struct block_device *bdev;
struct dm_mpath_io *mpio = get_mpio(map_context);
+ struct request_queue *q;
struct request *clone;
/* Do we need to select a new pgpath? */
@@ -497,13 +479,14 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
pgpath = choose_pgpath(m, nr_bytes);
if (!pgpath) {
- if (must_push_back_rq(m))
+ if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
return DM_MAPIO_DELAY_REQUEUE;
- return -EIO; /* Failed */
+ return dm_report_EIO(m); /* Failed */
} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
- pg_init_all_paths(m);
- return r;
+ if (pg_init_all_paths(m))
+ return DM_MAPIO_DELAY_REQUEUE;
+ return DM_MAPIO_REQUEUE;
}
memset(mpio, 0, sizeof(*mpio));
@@ -511,13 +494,19 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
mpio->nr_bytes = nr_bytes;
bdev = pgpath->path.dev->bdev;
-
- clone = blk_get_request(bdev_get_queue(bdev),
- rq->cmd_flags | REQ_NOMERGE,
- GFP_ATOMIC);
+ q = bdev_get_queue(bdev);
+ clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
- return r;
+ bool queue_dying = blk_queue_dying(q);
+ DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing",
+ PTR_ERR(clone), queue_dying ? " (path offline)" : "");
+ if (queue_dying) {
+ atomic_inc(&m->pg_init_in_progress);
+ activate_or_offline_path(pgpath);
+ return DM_MAPIO_REQUEUE;
+ }
+ return DM_MAPIO_DELAY_REQUEUE;
}
clone->bio = clone->biotail = NULL;
clone->rq_disk = bdev->bd_disk;
@@ -567,9 +556,9 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
}
if (!pgpath) {
- if (!must_push_back_bio(m))
- return -EIO;
- return DM_MAPIO_REQUEUE;
+ if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+ return DM_MAPIO_REQUEUE;
+ return dm_report_EIO(m);
}
mpio->pgpath = pgpath;
@@ -640,6 +629,14 @@ static void process_queued_bios(struct work_struct *work)
blk_finish_plug(&plug);
}
+static void assign_bit(bool value, long nr, unsigned long *addr)
+{
+ if (value)
+ set_bit(nr, addr);
+ else
+ clear_bit(nr, addr);
+}
+
/*
* If we run out of usable paths, should we queue I/O or error it?
*/
@@ -649,23 +646,11 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
-
- if (save_old_value) {
- if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
- set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
- else
- clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
- } else {
- if (queue_if_no_path)
- set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
- else
- clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
- }
- if (queue_if_no_path)
- set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
- else
- clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
-
+ assign_bit((save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
+ (!save_old_value && queue_if_no_path),
+ MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ assign_bit(queue_if_no_path || dm_noflush_suspending(m->ti),
+ MPATHF_QUEUE_IF_NO_PATH, &m->flags);
spin_unlock_irqrestore(&m->lock, flags);
if (!queue_if_no_path) {
@@ -1438,10 +1423,8 @@ out:
spin_unlock_irqrestore(&m->lock, flags);
}
-static void activate_path(struct work_struct *work)
+static void activate_or_offline_path(struct pgpath *pgpath)
{
- struct pgpath *pgpath =
- container_of(work, struct pgpath, activate_path.work);
struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
if (pgpath->is_active && !blk_queue_dying(q))
@@ -1450,6 +1433,14 @@ static void activate_path(struct work_struct *work)
pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
}
+static void activate_path_work(struct work_struct *work)
+{
+ struct pgpath *pgpath =
+ container_of(work, struct pgpath, activate_path.work);
+
+ activate_or_offline_path(pgpath);
+}
+
static int noretry_error(int error)
{
switch (error) {
@@ -1501,12 +1492,9 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (mpio->pgpath)
fail_path(mpio->pgpath);
- if (!atomic_read(&m->nr_valid_paths)) {
- if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
- if (!must_push_back_rq(m))
- r = -EIO;
- }
- }
+ if (atomic_read(&m->nr_valid_paths) == 0 &&
+ !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+ r = dm_report_EIO(m);
return r;
}
@@ -1547,13 +1535,9 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
if (mpio->pgpath)
fail_path(mpio->pgpath);
- if (!atomic_read(&m->nr_valid_paths)) {
- if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
- if (!must_push_back_bio(m))
- return -EIO;
- return DM_ENDIO_REQUEUE;
- }
- }
+ if (atomic_read(&m->nr_valid_paths) == 0 &&
+ !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+ return dm_report_EIO(m);
/* Queue for the daemon to resubmit */
dm_bio_restore(get_bio_details_from_bio(clone), clone);
@@ -1619,10 +1603,8 @@ static void multipath_resume(struct dm_target *ti)
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
- set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
- else
- clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ assign_bit(test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
+ MPATHF_QUEUE_IF_NO_PATH, &m->flags);
spin_unlock_irqrestore(&m->lock, flags);
}
@@ -1682,6 +1664,9 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
case DM_TYPE_MQ_REQUEST_BASED:
DMEMIT("queue_mode mq ");
break;
+ default:
+ WARN_ON_ONCE(true);
+ break;
}
}
}