summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 08:18:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 08:18:03 -0700
commitbd5d435a96837c3495e62eef37cbe4cb728b79ae (patch)
tree82aacaf5a1d220910c4b0a1088d7d2482c0d9ee0 /drivers/md
parentfee4b19fb3f28d17c0b9f9ea0668db5275697178 (diff)
parentac9fafa1243640349aa481adf473db283a695766 (diff)
downloadlinux-bd5d435a96837c3495e62eef37cbe4cb728b79ae.tar.gz
linux-bd5d435a96837c3495e62eef37cbe4cb728b79ae.tar.bz2
linux-bd5d435a96837c3495e62eef37cbe4cb728b79ae.zip
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: block: Skip I/O merges when disabled block: add large command support block: replace sizeof(rq->cmd) with BLK_MAX_CDB ide: use blk_rq_init() to initialize the request block: use blk_rq_init() to initialize the request block: rename and export rq_init() block: no need to initialize rq->cmd with blk_get_request block: no need to initialize rq->cmd in prepare_flush_fn hook block/blk-barrier.c:blk_ordered_cur_seq() mustn't be inline block/elevator.c:elv_rq_merge_ok() mustn't be inline block: make queue flags non-atomic block: add dma alignment and padding support to blk_rq_map_kern unexport blk_max_pfn ps3disk: Remove superfluous cast block: make rq_init() do a full memset() relay: fix splice problem
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-emc.c2
-rw-r--r--drivers/md/dm-mpath-hp-sw.c1
-rw-r--r--drivers/md/dm-mpath-rdac.c1
-rw-r--r--drivers/md/dm-table.c7
-rw-r--r--drivers/md/md.c3
5 files changed, 7 insertions, 7 deletions
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index 6b91b9ab1d41..3ea5ad4b7805 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -110,8 +110,6 @@ static struct request *get_failover_req(struct emc_handler *h,
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = 0;
- memset(&rq->cmd, 0, BLK_MAX_CDB);
-
rq->timeout = EMC_FAILOVER_TIMEOUT;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
diff --git a/drivers/md/dm-mpath-hp-sw.c b/drivers/md/dm-mpath-hp-sw.c
index 204bf42c9449..b63a0ab37c53 100644
--- a/drivers/md/dm-mpath-hp-sw.c
+++ b/drivers/md/dm-mpath-hp-sw.c
@@ -137,7 +137,6 @@ static struct request *hp_sw_get_request(struct dm_path *path)
req->sense = h->sense;
memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
- memset(&req->cmd, 0, BLK_MAX_CDB);
req->cmd[0] = START_STOP;
req->cmd[4] = 1;
req->cmd_len = COMMAND_SIZE(req->cmd[0]);
diff --git a/drivers/md/dm-mpath-rdac.c b/drivers/md/dm-mpath-rdac.c
index e04eb5c697fb..95e77734880a 100644
--- a/drivers/md/dm-mpath-rdac.c
+++ b/drivers/md/dm-mpath-rdac.c
@@ -284,7 +284,6 @@ static struct request *get_rdac_req(struct rdac_handler *h,
return NULL;
}
- memset(&rq->cmd, 0, BLK_MAX_CDB);
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = 0;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 51be53344214..73326e7c54bf 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -873,10 +873,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
q->max_hw_sectors = t->limits.max_hw_sectors;
q->seg_boundary_mask = t->limits.seg_boundary_mask;
q->bounce_pfn = t->limits.bounce_pfn;
+ /* XXX: the below will probably go bug. must ensure there can be no
+ * concurrency on queue_flags, and use the unlocked versions...
+ */
if (t->limits.no_cluster)
- q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
+ queue_flag_clear(QUEUE_FLAG_CLUSTER, q);
else
- q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER);
+ queue_flag_set(QUEUE_FLAG_CLUSTER, q);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 6fe4a769c854..bb3e4b1cb773 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -282,7 +282,8 @@ static mddev_t * mddev_find(dev_t unit)
kfree(new);
return NULL;
}
- set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
+ /* Can be unlocked because the queue is new: no concurrency */
+ queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue);
blk_queue_make_request(new->queue, md_fail_request);