summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bfq-iosched.c8
-rw-r--r--block/blk-merge.c14
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-sysfs.c30
4 files changed, 50 insertions, 4 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 3d64e0406675..b85a4ab8b9db 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5893,7 +5893,7 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
return min_shallow;
}
-static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
{
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
struct blk_mq_tags *tags = hctx->sched_tags;
@@ -5901,6 +5901,11 @@ static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
+}
+
+static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+{
+ bfq_depth_updated(hctx);
return 0;
}
@@ -6324,6 +6329,7 @@ static struct elevator_type iosched_bfq_mq = {
.requests_merged = bfq_requests_merged,
.request_merged = bfq_request_merged,
.has_work = bfq_has_work,
+ .depth_updated = bfq_depth_updated,
.init_hctx = bfq_init_hctx,
.init_sched = bfq_init_queue,
.exit_sched = bfq_exit_queue,
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 247b17f2a0f6..21e87a714a73 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -474,9 +474,21 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
while (nbytes > 0) {
unsigned offset = bvec->bv_offset + total;
unsigned len = min(get_max_segment_size(q, offset), nbytes);
+ struct page *page = bvec->bv_page;
+
+ /*
+ * Unfortunately a fair number of drivers barf on scatterlists
+ * that have an offset larger than PAGE_SIZE, despite other
+ * subsystems dealing with that invariant just fine. For now
+ * stick to the legacy format where we never present those from
+ * the block layer, but the code below should be removed once
+ * these offenders (mostly MMC/SD drivers) are fixed.
+ */
+ page += (offset >> PAGE_SHIFT);
+ offset &= ~PAGE_MASK;
*sg = blk_next_sg(sg, sglist);
- sg_set_page(*sg, bvec->bv_page, len, offset);
+ sg_set_page(*sg, page, len, offset);
total += len;
nbytes -= len;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9516304a38ee..fc60ed7e940e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3135,6 +3135,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
}
if (ret)
break;
+ if (q->elevator && q->elevator->type->ops.depth_updated)
+ q->elevator->type->ops.depth_updated(hctx);
}
if (!ret)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 422327089e0f..a16a02c52a85 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -728,7 +728,7 @@ static struct queue_sysfs_entry throtl_sample_time_entry = {
};
#endif
-static struct attribute *default_attrs[] = {
+static struct attribute *queue_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
@@ -770,6 +770,25 @@ static struct attribute *default_attrs[] = {
NULL,
};
+static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
+
+ if (attr == &queue_io_timeout_entry.attr &&
+ (!q->mq_ops || !q->mq_ops->timeout))
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute_group queue_attr_group = {
+ .attrs = queue_attrs,
+ .is_visible = queue_attr_visible,
+};
+
+
#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
static ssize_t
@@ -890,7 +909,6 @@ static const struct sysfs_ops queue_sysfs_ops = {
struct kobj_type blk_queue_ktype = {
.sysfs_ops = &queue_sysfs_ops,
- .default_attrs = default_attrs,
.release = blk_release_queue,
};
@@ -939,6 +957,14 @@ int blk_register_queue(struct gendisk *disk)
goto unlock;
}
+ ret = sysfs_create_group(&q->kobj, &queue_attr_group);
+ if (ret) {
+ blk_trace_remove_sysfs(dev);
+ kobject_del(&q->kobj);
+ kobject_put(&dev->kobj);
+ goto unlock;
+ }
+
if (queue_is_mq(q)) {
__blk_mq_register_dev(dev, q);
blk_mq_debugfs_register(q);