summaryrefslogtreecommitdiffstats
path: root/block/blk-sysfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r--block/blk-sysfs.c45
1 files changed, 23 insertions, 22 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e03bedf180ab..370d83c18057 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -91,7 +91,7 @@ static ssize_t queue_ra_show(struct request_queue *q, char *page)
unsigned long ra_kb = q->backing_dev_info->ra_pages <<
(PAGE_SHIFT - 10);
- return queue_var_show(ra_kb, (page));
+ return queue_var_show(ra_kb, page);
}
static ssize_t
@@ -112,28 +112,28 @@ static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
{
int max_sectors_kb = queue_max_sectors(q) >> 1;
- return queue_var_show(max_sectors_kb, (page));
+ return queue_var_show(max_sectors_kb, page);
}
static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
{
- return queue_var_show(queue_max_segments(q), (page));
+ return queue_var_show(queue_max_segments(q), page);
}
static ssize_t queue_max_discard_segments_show(struct request_queue *q,
char *page)
{
- return queue_var_show(queue_max_discard_segments(q), (page));
+ return queue_var_show(queue_max_discard_segments(q), page);
}
static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
{
- return queue_var_show(q->limits.max_integrity_segments, (page));
+ return queue_var_show(q->limits.max_integrity_segments, page);
}
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
{
- return queue_var_show(queue_max_segment_size(q), (page));
+ return queue_var_show(queue_max_segment_size(q), page);
}
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
@@ -261,12 +261,12 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
{
int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
- return queue_var_show(max_hw_sectors_kb, (page));
+ return queue_var_show(max_hw_sectors_kb, page);
}
static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
{
- return queue_var_show(q->limits.virt_boundary_mask, (page));
+ return queue_var_show(q->limits.virt_boundary_mask, page);
}
#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
@@ -866,20 +866,6 @@ int blk_register_queue(struct gendisk *disk)
"%s is registering an already registered queue\n",
kobject_name(&dev->kobj));
- /*
- * SCSI probing may synchronously create and destroy a lot of
- * request_queues for non-existent devices. Shutting down a fully
- * functional queue takes measureable wallclock time as RCU grace
- * periods are involved. To avoid excessive latency in these
- * cases, a request_queue starts out in a degraded mode which is
- * faster to shut down and is made fully functional here as
- * request_queues for non-existent devices never get registered.
- */
- if (!blk_queue_init_done(q)) {
- blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
- percpu_ref_switch_to_percpu(&q->q_usage_counter);
- }
-
blk_queue_update_readahead(q);
ret = blk_trace_init_sysfs(dev);
@@ -938,6 +924,21 @@ int blk_register_queue(struct gendisk *disk)
ret = 0;
unlock:
mutex_unlock(&q->sysfs_dir_lock);
+
+ /*
+ * SCSI probing may synchronously create and destroy a lot of
+ * request_queues for non-existent devices. Shutting down a fully
+ * functional queue takes measureable wallclock time as RCU grace
+ * periods are involved. To avoid excessive latency in these
+ * cases, a request_queue starts out in a degraded mode which is
+ * faster to shut down and is made fully functional here as
+ * request_queues for non-existent devices never get registered.
+ */
+ if (!blk_queue_init_done(q)) {
+ blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
+ percpu_ref_switch_to_percpu(&q->q_usage_counter);
+ }
+
return ret;
}
EXPORT_SYMBOL_GPL(blk_register_queue);