summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-crypt.c3
-rw-r--r--drivers/md/dm-delay.c8
-rw-r--r--drivers/md/dm-ioctl.c24
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-mpath.c111
-rw-r--r--drivers/md/dm-raid1.c53
-rw-r--r--drivers/md/dm-snap.c34
-rw-r--r--drivers/md/dm-stripe.c3
-rw-r--r--drivers/md/dm-sysfs.c2
-rw-r--r--drivers/md/dm-table.c12
-rw-r--r--drivers/md/dm-uevent.c7
-rw-r--r--drivers/md/dm.c25
-rw-r--r--drivers/md/dm.h4
-rw-r--r--drivers/md/linear.c12
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/multipath.c20
-rw-r--r--drivers/md/raid0.c15
-rw-r--r--drivers/md/raid1.c28
-rw-r--r--drivers/md/raid10.c28
-rw-r--r--drivers/md/raid5.c4
-rw-r--r--drivers/md/raid5.h2
22 files changed, 224 insertions, 181 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a93637223c8d..3bdbb6115702 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1160,8 +1160,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
cc->start = tmpll;
- if (dm_get_device(ti, argv[3], cc->start, ti->len,
- dm_table_get_mode(ti->table), &cc->dev)) {
+ if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
ti->error = "Device lookup failed";
goto bad_device;
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index ebe7381f47c8..852052880d7a 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -156,8 +156,8 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- if (dm_get_device(ti, argv[0], dc->start_read, ti->len,
- dm_table_get_mode(ti->table), &dc->dev_read)) {
+ if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+ &dc->dev_read)) {
ti->error = "Device lookup failed";
goto bad;
}
@@ -177,8 +177,8 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_dev_read;
}
- if (dm_get_device(ti, argv[3], dc->start_write, ti->len,
- dm_table_get_mode(ti->table), &dc->dev_write)) {
+ if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
+ &dc->dev_write)) {
ti->error = "Write device lookup failed";
goto bad_dev_read;
}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 1d669322b27c..d7500e1c26f2 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -285,7 +285,8 @@ retry:
up_write(&_hash_lock);
}
-static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
+static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
+ const char *new)
{
char *new_name, *old_name;
struct hash_cell *hc;
@@ -344,7 +345,8 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
dm_table_put(table);
}
- dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie);
+ if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie))
+ *flags |= DM_UEVENT_GENERATED_FLAG;
dm_put(hc->md);
up_write(&_hash_lock);
@@ -736,10 +738,10 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
__hash_remove(hc);
up_write(&_hash_lock);
- dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr);
+ if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
+ param->flags |= DM_UEVENT_GENERATED_FLAG;
dm_put(md);
- param->data_size = 0;
return 0;
}
@@ -773,7 +775,9 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
return r;
param->data_size = 0;
- return dm_hash_rename(param->event_nr, param->name, new_name);
+
+ return dm_hash_rename(param->event_nr, &param->flags, param->name,
+ new_name);
}
static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
@@ -897,16 +901,17 @@ static int do_resume(struct dm_ioctl *param)
set_disk_ro(dm_disk(md), 1);
}
- if (dm_suspended_md(md))
+ if (dm_suspended_md(md)) {
r = dm_resume(md);
+ if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
+ param->flags |= DM_UEVENT_GENERATED_FLAG;
+ }
if (old_map)
dm_table_destroy(old_map);
- if (!r) {
- dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr);
+ if (!r)
r = __dev_status(md, param);
- }
dm_put(md);
return r;
@@ -1476,6 +1481,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
{
/* Always clear this flag */
param->flags &= ~DM_BUFFER_FULL_FLAG;
+ param->flags &= ~DM_UEVENT_GENERATED_FLAG;
/* Ignores parameters */
if (cmd == DM_REMOVE_ALL_CMD ||
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 82f7d6e6b1ea..9200dbf2391a 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -47,8 +47,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
lc->start = tmp;
- if (dm_get_device(ti, argv[0], lc->start, ti->len,
- dm_table_get_mode(ti->table), &lc->dev)) {
+ if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev)) {
ti->error = "dm-linear: Device lookup failed";
goto bad;
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 7035582786fb..5a08be0222db 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -543,8 +543,7 @@ static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
return -EINVAL;
}
- r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */,
- FMODE_READ | FMODE_WRITE, &dev);
+ r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &dev);
if (r)
return r;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e81345a1d08f..826bce7343b3 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -69,6 +69,7 @@ struct multipath {
struct list_head priority_groups;
unsigned pg_init_required; /* pg_init needs calling? */
unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
+ wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
unsigned nr_valid_paths; /* Total number of usable paths */
struct pgpath *current_pgpath;
@@ -95,8 +96,6 @@ struct multipath {
mempool_t *mpio_pool;
struct mutex work_mutex;
-
- unsigned suspended; /* Don't create new I/O internally when set. */
};
/*
@@ -202,6 +201,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
m->queue_io = 1;
INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event);
+ init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
@@ -235,6 +235,21 @@ static void free_multipath(struct multipath *m)
* Path selection
*-----------------------------------------------*/
+static void __pg_init_all_paths(struct multipath *m)
+{
+ struct pgpath *pgpath;
+
+ m->pg_init_count++;
+ m->pg_init_required = 0;
+ list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
+ /* Skip failed paths */
+ if (!pgpath->is_active)
+ continue;
+ if (queue_work(kmpath_handlerd, &pgpath->activate_path))
+ m->pg_init_in_progress++;
+ }
+}
+
static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
{
m->current_pg = pgpath->pg;
@@ -439,7 +454,7 @@ static void process_queued_ios(struct work_struct *work)
{
struct multipath *m =
container_of(work, struct multipath, process_queued_ios);
- struct pgpath *pgpath = NULL, *tmp;
+ struct pgpath *pgpath = NULL;
unsigned must_queue = 1;
unsigned long flags;
@@ -457,14 +472,9 @@ static void process_queued_ios(struct work_struct *work)
(!pgpath && !m->queue_if_no_path))
must_queue = 0;
- if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
- m->pg_init_count++;
- m->pg_init_required = 0;
- list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) {
- if (queue_work(kmpath_handlerd, &tmp->activate_path))
- m->pg_init_in_progress++;
- }
- }
+ if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
+ __pg_init_all_paths(m);
+
out:
spin_unlock_irqrestore(&m->lock, flags);
if (!must_queue)
@@ -597,8 +607,8 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
if (!p)
return ERR_PTR(-ENOMEM);
- r = dm_get_device(ti, shift(as), ti->begin, ti->len,
- dm_table_get_mode(ti->table), &p->path.dev);
+ r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table),
+ &p->path.dev);
if (r) {
ti->error = "error getting device";
goto bad;
@@ -890,9 +900,34 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
return r;
}
-static void flush_multipath_work(void)
+static void multipath_wait_for_pg_init_completion(struct multipath *m)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long flags;
+
+ add_wait_queue(&m->pg_init_wait, &wait);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ spin_lock_irqsave(&m->lock, flags);
+ if (!m->pg_init_in_progress) {
+ spin_unlock_irqrestore(&m->lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ io_schedule();
+ }
+ set_current_state(TASK_RUNNING);
+
+ remove_wait_queue(&m->pg_init_wait, &wait);
+}
+
+static void flush_multipath_work(struct multipath *m)
{
flush_workqueue(kmpath_handlerd);
+ multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd);
flush_scheduled_work();
}
@@ -901,7 +936,7 @@ static void multipath_dtr(struct dm_target *ti)
{
struct multipath *m = ti->private;
- flush_multipath_work();
+ flush_multipath_work(m);
free_multipath(m);
}
@@ -1128,8 +1163,7 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
static void pg_init_done(void *data, int errors)
{
- struct dm_path *path = data;
- struct pgpath *pgpath = path_to_pgpath(path);
+ struct pgpath *pgpath = data;
struct priority_group *pg = pgpath->pg;
struct multipath *m = pg->m;
unsigned long flags;
@@ -1143,8 +1177,8 @@ static void pg_init_done(void *data, int errors)
errors = 0;
break;
}
- DMERR("Cannot failover device because scsi_dh_%s was not "
- "loaded.", m->hw_handler_name);
+ DMERR("Could not failover the device: Handler scsi_dh_%s "
+ "Error %d.", m->hw_handler_name, errors);
/*
* Fail path for now, so we do not ping pong
*/
@@ -1181,14 +1215,24 @@ static void pg_init_done(void *data, int errors)
m->current_pgpath = NULL;
m->current_pg = NULL;
}
- } else if (!m->pg_init_required) {
- m->queue_io = 0;
+ } else if (!m->pg_init_required)
pg->bypassed = 0;
- }
- m->pg_init_in_progress--;
- if (!m->pg_init_in_progress)
- queue_work(kmultipathd, &m->process_queued_ios);
+ if (--m->pg_init_in_progress)
+ /* Activations of other paths are still on going */
+ goto out;
+
+ if (!m->pg_init_required)
+ m->queue_io = 0;
+
+ queue_work(kmultipathd, &m->process_queued_ios);
+
+ /*
+ * Wake up any thread waiting to suspend.
+ */
+ wake_up(&m->pg_init_wait);
+
+out:
spin_unlock_irqrestore(&m->lock, flags);
}
@@ -1198,7 +1242,7 @@ static void activate_path(struct work_struct *work)
container_of(work, struct pgpath, activate_path);
scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
- pg_init_done, &pgpath->path);
+ pg_init_done, pgpath);
}
/*
@@ -1276,8 +1320,7 @@ static void multipath_postsuspend(struct dm_target *ti)
struct multipath *m = ti->private;
mutex_lock(&m->work_mutex);
- m->suspended = 1;
- flush_multipath_work();
+ flush_multipath_work(m);
mutex_unlock(&m->work_mutex);
}
@@ -1289,10 +1332,6 @@ static void multipath_resume(struct dm_target *ti)
struct multipath *m = (struct multipath *) ti->private;
unsigned long flags;
- mutex_lock(&m->work_mutex);
- m->suspended = 0;
- mutex_unlock(&m->work_mutex);
-
spin_lock_irqsave(&m->lock, flags);
m->queue_if_no_path = m->saved_queue_if_no_path;
spin_unlock_irqrestore(&m->lock, flags);
@@ -1428,11 +1467,6 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
mutex_lock(&m->work_mutex);
- if (m->suspended) {
- r = -EBUSY;
- goto out;
- }
-
if (dm_suspended(ti)) {
r = -EBUSY;
goto out;
@@ -1471,8 +1505,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
goto out;
}
- r = dm_get_device(ti, argv[1], ti->begin, ti->len,
- dm_table_get_mode(ti->table), &dev);
+ r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
if (r) {
DMWARN("message: error getting device %s",
argv[1]);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 6c1046df81f6..ddda531723dc 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -465,9 +465,17 @@ static void map_region(struct dm_io_region *io, struct mirror *m,
static void hold_bio(struct mirror_set *ms, struct bio *bio)
{
/*
- * If device is suspended, complete the bio.
+ * Lock is required to avoid race condition during suspend
+ * process.
*/
+ spin_lock_irq(&ms->lock);
+
if (atomic_read(&ms->suspend)) {
+ spin_unlock_irq(&ms->lock);
+
+ /*
+ * If device is suspended, complete the bio.
+ */
if (dm_noflush_suspending(ms->ti))
bio_endio(bio, DM_ENDIO_REQUEUE);
else
@@ -478,7 +486,6 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio)
/*
* Hold bio until the suspend is complete.
*/
- spin_lock_irq(&ms->lock);
bio_list_add(&ms->holds, bio);
spin_unlock_irq(&ms->lock);
}
@@ -737,9 +744,12 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
dm_rh_delay(ms->rh, bio);
while ((bio = bio_list_pop(&nosync))) {
- if (unlikely(ms->leg_failure) && errors_handled(ms))
- hold_bio(ms, bio);
- else {
+ if (unlikely(ms->leg_failure) && errors_handled(ms)) {
+ spin_lock_irq(&ms->lock);
+ bio_list_add(&ms->failures, bio);
+ spin_unlock_irq(&ms->lock);
+ wakeup_mirrord(ms);
+ } else {
map_bio(get_default_mirror(ms), bio);
generic_make_request(bio);
}
@@ -917,8 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
return -EINVAL;
}
- if (dm_get_device(ti, argv[0], offset, ti->len,
- dm_table_get_mode(ti->table),
+ if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
&ms->mirror[mirror].dev)) {
ti->error = "Device lookup failure";
return -ENXIO;
@@ -1259,6 +1268,20 @@ static void mirror_presuspend(struct dm_target *ti)
atomic_set(&ms->suspend, 1);
/*
+ * Process bios in the hold list to start recovery waiting
+ * for bios in the hold list. After the process, no bio has
+ * a chance to be added in the hold list because ms->suspend
+ * is set.
+ */
+ spin_lock_irq(&ms->lock);
+ holds = ms->holds;
+ bio_list_init(&ms->holds);
+ spin_unlock_irq(&ms->lock);
+
+ while ((bio = bio_list_pop(&holds)))
+ hold_bio(ms, bio);
+
+ /*
* We must finish up all the work that we've
* generated (i.e. recovery work).
*/
@@ -1278,22 +1301,6 @@ static void mirror_presuspend(struct dm_target *ti)
* we know that all of our I/O has been pushed.
*/
flush_workqueue(ms->kmirrord_wq);
-
- /*
- * Now set ms->suspend is set and the workqueue flushed, no more
- * entries can be added to ms->hold list, so process it.
- *
- * Bios can still arrive concurrently with or after this
- * presuspend function, but they cannot join the hold list
- * because ms->suspend is set.
- */
- spin_lock_irq(&ms->lock);
- holds = ms->holds;
- bio_list_init(&ms->holds);
- spin_unlock_irq(&ms->lock);
-
- while ((bio = bio_list_pop(&holds)))
- hold_bio(ms, bio);
}
static void mirror_postsuspend(struct dm_target *ti)
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index ee8eb283650d..54853773510c 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -83,10 +83,10 @@ struct dm_snapshot {
/* Whether or not owning mapped_device is suspended */
int suspended;
- mempool_t *pending_pool;
-
atomic_t pending_exceptions_count;
+ mempool_t *pending_pool;
+
struct dm_exception_table pending;
struct dm_exception_table complete;
@@ -96,6 +96,11 @@ struct dm_snapshot {
*/
spinlock_t pe_lock;
+ /* Chunks with outstanding reads */
+ spinlock_t tracked_chunk_lock;
+ mempool_t *tracked_chunk_pool;
+ struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
+
/* The on disk metadata handler */
struct dm_exception_store *store;
@@ -105,10 +110,12 @@ struct dm_snapshot {
struct bio_list queued_bios;
struct work_struct queued_bios_work;
- /* Chunks with outstanding reads */
- mempool_t *tracked_chunk_pool;
- spinlock_t tracked_chunk_lock;
- struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
+ /* Wait for events based on state_bits */
+ unsigned long state_bits;
+
+ /* Range of chunks currently being merged. */
+ chunk_t first_merging_chunk;
+ int num_merging_chunks;
/*
* The merge operation failed if this flag is set.
@@ -125,13 +132,6 @@ struct dm_snapshot {
*/
int merge_failed;
- /* Wait for events based on state_bits */
- unsigned long state_bits;
-
- /* Range of chunks currently being merged. */
- chunk_t first_merging_chunk;
- int num_merging_chunks;
-
/*
* Incoming bios that overlap with chunks being merged must wait
* for them to be committed.
@@ -1081,8 +1081,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argv++;
argc--;
- r = dm_get_device(ti, cow_path, 0, 0,
- FMODE_READ | FMODE_WRITE, &s->cow);
+ r = dm_get_device(ti, cow_path, FMODE_READ | FMODE_WRITE, &s->cow);
if (r) {
ti->error = "Cannot get COW device";
goto bad_cow;
@@ -1098,7 +1097,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argv += args_used;
argc -= args_used;
- r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin);
+ r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
if (r) {
ti->error = "Cannot get origin device";
goto bad_origin;
@@ -2100,8 +2099,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
- r = dm_get_device(ti, argv[0], 0, ti->len,
- dm_table_get_mode(ti->table), &dev);
+ r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
if (r) {
ti->error = "Cannot get target device";
return r;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index bd58703ee8f6..e610725db766 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -80,8 +80,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
if (sscanf(argv[1], "%llu", &start) != 1)
return -EINVAL;
- if (dm_get_device(ti, argv[0], start, sc->stripe_width,
- dm_table_get_mode(ti->table),
+ if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
&sc->stripe[stripe].dev))
return -ENXIO;
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index f91b40942e07..84d2b91e4efb 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
NULL,
};
-static struct sysfs_ops dm_sysfs_ops = {
+static const struct sysfs_ops dm_sysfs_ops = {
.show = dm_attr_show,
};
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 4b22feb01a0c..9924ea23032d 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -429,8 +429,7 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
* it's already present.
*/
static int __table_get_device(struct dm_table *t, struct dm_target *ti,
- const char *path, sector_t start, sector_t len,
- fmode_t mode, struct dm_dev **result)
+ const char *path, fmode_t mode, struct dm_dev **result)
{
int r;
dev_t uninitialized_var(dev);
@@ -527,11 +526,10 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
}
EXPORT_SYMBOL_GPL(dm_set_device_limits);
-int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
- sector_t len, fmode_t mode, struct dm_dev **result)
+int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ struct dm_dev **result)
{
- return __table_get_device(ti->table, ti, path,
- start, len, mode, result);
+ return __table_get_device(ti->table, ti, path, mode, result);
}
@@ -1231,8 +1229,6 @@ void dm_table_unplug_all(struct dm_table *t)
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
- dm_get(t->md);
-
return t->md;
}
diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c
index c7c555a8c7b2..6b1e3b61b25e 100644
--- a/drivers/md/dm-uevent.c
+++ b/drivers/md/dm-uevent.c
@@ -187,7 +187,7 @@ void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
DMERR("%s: Invalid event_type %d", __func__, event_type);
- goto out;
+ return;
}
event = dm_build_path_uevent(md, ti,
@@ -195,12 +195,9 @@ void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
_dm_uevent_type_names[event_type].name,
path, nr_valid_paths);
if (IS_ERR(event))
- goto out;
+ return;
dm_uevent_add(md, &event->elist);
-
-out:
- dm_put(md);
}
EXPORT_SYMBOL_GPL(dm_path_uevent);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index aa4e2aa86d49..d21e1284604f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -635,8 +635,10 @@ static void dec_pending(struct dm_io *io, int error)
if (!md->barrier_error && io_error != -EOPNOTSUPP)
md->barrier_error = io_error;
end_io_acct(io);
+ free_io(md, io);
} else {
end_io_acct(io);
+ free_io(md, io);
if (io_error != DM_ENDIO_REQUEUE) {
trace_block_bio_complete(md->queue, bio);
@@ -644,8 +646,6 @@ static void dec_pending(struct dm_io *io, int error)
bio_endio(bio, io_error);
}
}
-
- free_io(md, io);
}
}
@@ -2618,18 +2618,19 @@ out:
/*-----------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------*/
-void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie)
{
char udev_cookie[DM_COOKIE_LENGTH];
char *envp[] = { udev_cookie, NULL };
if (!cookie)
- kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+ return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
else {
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
DM_COOKIE_ENV_VAR_NAME, cookie);
- kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
+ return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
+ action, envp);
}
}
@@ -2699,23 +2700,13 @@ int dm_suspended_md(struct mapped_device *md)
int dm_suspended(struct dm_target *ti)
{
- struct mapped_device *md = dm_table_get_md(ti->table);
- int r = dm_suspended_md(md);
-
- dm_put(md);
-
- return r;
+ return dm_suspended_md(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_suspended);
int dm_noflush_suspending(struct dm_target *ti)
{
- struct mapped_device *md = dm_table_get_md(ti->table);
- int r = __noflush_suspending(md);
-
- dm_put(md);
-
- return r;
+ return __noflush_suspending(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 8dadaa5bc396..bad1724d4869 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -125,8 +125,8 @@ void dm_stripe_exit(void);
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);
-void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
- unsigned cookie);
+int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+ unsigned cookie);
int dm_io_init(void);
void dm_io_exit(void);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 00435bd20699..bb2a23159b21 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -172,12 +172,14 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ * violating it, so limit max_segments to 1 lying within
+ * a single page.
*/
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
conf->array_sectors += rdev->sectors;
cnt++;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index a20a71e5efd3..fdc1890b6ac5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2642,7 +2642,7 @@ static void rdev_free(struct kobject *ko)
mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
kfree(rdev);
}
-static struct sysfs_ops rdev_sysfs_ops = {
+static const struct sysfs_ops rdev_sysfs_ops = {
.show = rdev_attr_show,
.store = rdev_attr_store,
};
@@ -4059,7 +4059,7 @@ static void md_free(struct kobject *ko)
kfree(mddev);
}
-static struct sysfs_ops md_sysfs_ops = {
+static const struct sysfs_ops md_sysfs_ops = {
.show = md_attr_show,
.store = md_attr_store,
};
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 32a662fc55c9..5558ebc705c8 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -301,14 +301,16 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ * violating it, so limit ->max_segments to one, lying
+ * within a single page.
* (Note: it is very unlikely that a device with
* merge_bvec_fn will be involved in multipath.)
*/
- if (q->merge_bvec_fn &&
- queue_max_sectors(q) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (q->merge_bvec_fn) {
+ blk_queue_max_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
conf->working_disks++;
mddev->degraded--;
@@ -476,9 +478,11 @@ static int multipath_run (mddev_t *mddev)
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, not that we ever expect a device with
* a merge_bvec_fn to be involved in multipath */
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
if (!test_bit(Faulty, &rdev->flags))
conf->working_disks++;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 77605cdceaf1..377cf2a3c333 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -176,14 +176,15 @@ static int create_strip_zones(mddev_t *mddev)
disk_stack_limits(mddev->gendisk, rdev1->bdev,
rdev1->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ * violating it, so limit ->max_segments to 1, lying within
+ * a single page.
*/
- if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
-
+ if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
if (!smallest || (rdev1->sectors < smallest->sectors))
smallest = rdev1;
cnt++;
@@ -325,7 +326,7 @@ static int raid0_run(mddev_t *mddev)
}
if (md_check_no_bitmap(mddev))
return -EINVAL;
- blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
ret = create_strip_zones(mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 859bd3ffe435..f741f77eeb2b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1152,13 +1152,17 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
- /* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ /* as we don't honour merge_bvec_fn, we must
+ * never risk violating it, so limit
+ * ->max_segments to one lying with a single
+ * page, as a one page request is never in
+ * violation.
*/
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
p->head_position = 0;
rdev->raid_disk = mirror;
@@ -2098,12 +2102,14 @@ static int run(mddev_t *mddev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ * violating it, so limit ->max_segments to 1 lying within
+ * a single page, as a one page request is never in violation.
*/
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
}
mddev->degraded = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d119b7b75e71..b4ba41ecbd20 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1155,13 +1155,17 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
- /* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ /* as we don't honour merge_bvec_fn, we must
+ * never risk violating it, so limit
+ * ->max_segments to one lying with a single
+ * page, as a one page request is never in
+ * violation.
*/
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
p->head_position = 0;
rdev->raid_disk = mirror;
@@ -2255,12 +2259,14 @@ static int run(mddev_t *mddev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ * violating it, so limit max_segments to 1 lying
+ * within a single page.
*/
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
disk->head_position = 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ceb24afdc147..70ffbd071b2e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3739,7 +3739,7 @@ static int bio_fits_rdev(struct bio *bi)
if ((bi->bi_size>>9) > queue_max_sectors(q))
return 0;
blk_recount_segments(q, bi);
- if (bi->bi_phys_segments > queue_max_phys_segments(q))
+ if (bi->bi_phys_segments > queue_max_segments(q))
return 0;
if (q->merge_bvec_fn)
@@ -4680,7 +4680,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
{
unsigned long cpu;
struct page *spare_page;
- struct raid5_percpu *allcpus;
+ struct raid5_percpu __percpu *allcpus;
void *scribble;
int err;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index dd708359b451..0f86f5e36724 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -405,7 +405,7 @@ struct raid5_private_data {
* lists and performing address
* conversions
*/
- } *percpu;
+ } __percpu *percpu;
size_t scribble_len; /* size of scribble region must be
* associated with conf to handle
* cpu hotplug while reshaping