summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c34
-rw-r--r--block/blk-lib.c31
-rw-r--r--block/blk-merge.c32
-rw-r--r--block/blk-mq-tag.c5
-rw-r--r--block/blk-mq.c82
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-sysfs.c1
-rw-r--r--block/blk.h1
-rw-r--r--block/elevator.c2
9 files changed, 137 insertions, 53 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 6ebe33ed5154..89eec7965870 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -577,7 +577,7 @@ void blk_cleanup_queue(struct request_queue *q)
q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock);
- bdi_destroy(&q->backing_dev_info);
+ bdi_unregister(&q->backing_dev_info);
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
@@ -1640,6 +1640,30 @@ out:
return ret;
}
+unsigned int blk_plug_queued_count(struct request_queue *q)
+{
+ struct blk_plug *plug;
+ struct request *rq;
+ struct list_head *plug_list;
+ unsigned int ret = 0;
+
+ plug = current->plug;
+ if (!plug)
+ goto out;
+
+ if (q->mq_ops)
+ plug_list = &plug->mq_list;
+ else
+ plug_list = &plug->list;
+
+ list_for_each_entry(rq, plug_list, queuelist) {
+ if (rq->q == q)
+ ret++;
+ }
+out:
+ return ret;
+}
+
void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cmd_type = REQ_TYPE_FS;
@@ -1687,9 +1711,11 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
* Check if we can merge with the plugged list before grabbing
* any locks.
*/
- if (!blk_queue_nomerges(q) &&
- blk_attempt_plug_merge(q, bio, &request_count, NULL))
- return;
+ if (!blk_queue_nomerges(q)) {
+ if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+ return;
+ } else
+ request_count = blk_plug_queued_count(q);
spin_lock_irq(q->queue_lock);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index bd40292e5009..9ebf65379556 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -26,13 +26,6 @@ static void bio_batch_end_io(struct bio *bio)
bio_put(bio);
}
-/*
- * Ensure that max discard sectors doesn't overflow bi_size and hopefully
- * it is of the proper granularity as long as the granularity is a power
- * of two.
- */
-#define MAX_BIO_SECTORS ((1U << 31) >> 9)
-
/**
* blkdev_issue_discard - queue a discard
* @bdev: blockdev to issue discard for
@@ -50,6 +43,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD;
+ unsigned int granularity;
+ int alignment;
struct bio_batch bb;
struct bio *bio;
int ret = 0;
@@ -61,6 +56,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
+ /* Zero-sector (unknown) and one-sector granularities are the same. */
+ granularity = max(q->limits.discard_granularity >> 9, 1U);
+ alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
+
if (flags & BLKDEV_DISCARD_SECURE) {
if (!blk_queue_secdiscard(q))
return -EOPNOTSUPP;
@@ -74,7 +73,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
blk_start_plug(&plug);
while (nr_sects) {
unsigned int req_sects;
- sector_t end_sect;
+ sector_t end_sect, tmp;
bio = bio_alloc(gfp_mask, 1);
if (!bio) {
@@ -82,8 +81,22 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
break;
}
- req_sects = min_t(sector_t, nr_sects, MAX_BIO_SECTORS);
+ /* Make sure bi_size doesn't overflow */
+ req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
+
+ /*
+ * If splitting a request, and the next starting sector would be
+ * misaligned, stop the discard at the previous aligned sector.
+ */
end_sect = sector + req_sects;
+ tmp = end_sect;
+ if (req_sects < nr_sects &&
+ sector_div(tmp, granularity) != alignment) {
+ end_sect = end_sect - alignment;
+ sector_div(end_sect, granularity);
+ end_sect = end_sect * granularity + alignment;
+ req_sects = end_sect - sector;
+ }
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index c4e9c37f3e38..de5716d8e525 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -11,13 +11,16 @@
static struct bio *blk_bio_discard_split(struct request_queue *q,
struct bio *bio,
- struct bio_set *bs)
+ struct bio_set *bs,
+ unsigned *nsegs)
{
unsigned int max_discard_sectors, granularity;
int alignment;
sector_t tmp;
unsigned split_sectors;
+ *nsegs = 1;
+
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
@@ -51,8 +54,11 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
static struct bio *blk_bio_write_same_split(struct request_queue *q,
struct bio *bio,
- struct bio_set *bs)
+ struct bio_set *bs,
+ unsigned *nsegs)
{
+ *nsegs = 1;
+
if (!q->limits.max_write_same_sectors)
return NULL;
@@ -64,7 +70,8 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
static struct bio *blk_bio_segment_split(struct request_queue *q,
struct bio *bio,
- struct bio_set *bs)
+ struct bio_set *bs,
+ unsigned *segs)
{
struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter;
@@ -106,24 +113,35 @@ new_segment:
sectors += bv.bv_len >> 9;
}
+ *segs = nsegs;
return NULL;
split:
+ *segs = nsegs;
return bio_split(bio, sectors, GFP_NOIO, bs);
}
void blk_queue_split(struct request_queue *q, struct bio **bio,
struct bio_set *bs)
{
- struct bio *split;
+ struct bio *split, *res;
+ unsigned nsegs;
if ((*bio)->bi_rw & REQ_DISCARD)
- split = blk_bio_discard_split(q, *bio, bs);
+ split = blk_bio_discard_split(q, *bio, bs, &nsegs);
else if ((*bio)->bi_rw & REQ_WRITE_SAME)
- split = blk_bio_write_same_split(q, *bio, bs);
+ split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
else
- split = blk_bio_segment_split(q, *bio, q->bio_split);
+ split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
+
+ /* physical segments can be figured out during splitting */
+ res = split ? split : *bio;
+ res->bi_phys_segments = nsegs;
+ bio_set_flag(res, BIO_SEG_VALID);
if (split) {
+ /* there isn't chance to merge the splitted bio */
+ split->bi_rw |= REQ_NOMERGE;
+
bio_chain(split, *bio);
generic_make_request(*bio);
*bio = split;
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index ed96474d75cb..60ac684c8b8c 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -75,6 +75,10 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
struct blk_mq_bitmap_tags *bt;
int i, wake_index;
+ /*
+ * Make sure all changes prior to this are visible from other CPUs.
+ */
+ smp_mb();
bt = &tags->bitmap_tags;
wake_index = atomic_read(&bt->wake_index);
for (i = 0; i < BT_WAIT_QUEUES; i++) {
@@ -641,6 +645,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
{
bt_free(&tags->bitmap_tags);
bt_free(&tags->breserved_tags);
+ free_cpumask_var(tags->cpumask);
kfree(tags);
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6c240712553a..1c27b3eaef64 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -972,18 +972,25 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
}
EXPORT_SYMBOL(blk_mq_delay_queue);
-static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
- struct request *rq, bool at_head)
+static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx,
+ struct request *rq,
+ bool at_head)
{
- struct blk_mq_ctx *ctx = rq->mq_ctx;
-
trace_block_rq_insert(hctx->queue, rq);
if (at_head)
list_add(&rq->queuelist, &ctx->rq_list);
else
list_add_tail(&rq->queuelist, &ctx->rq_list);
+}
+
+static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
+ struct request *rq, bool at_head)
+{
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+ __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
blk_mq_hctx_mark_pending(hctx, ctx);
}
@@ -1039,8 +1046,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
rq->mq_ctx = ctx;
- __blk_mq_insert_request(hctx, rq, false);
+ __blk_mq_insert_req_list(hctx, ctx, rq, false);
}
+ blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock);
blk_mq_run_hw_queue(hctx, from_schedule);
@@ -1122,7 +1130,7 @@ static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx,
struct request *rq, struct bio *bio)
{
- if (!hctx_allow_merges(hctx)) {
+ if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
blk_mq_bio_to_request(rq, bio);
spin_lock(&ctx->lock);
insert_rq:
@@ -1246,9 +1254,12 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_split(q, &bio, q->bio_split);
- if (!is_flush_fua && !blk_queue_nomerges(q) &&
- blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
- return;
+ if (!is_flush_fua && !blk_queue_nomerges(q)) {
+ if (blk_attempt_plug_merge(q, bio, &request_count,
+ &same_queue_rq))
+ return;
+ } else
+ request_count = blk_plug_queued_count(q);
rq = blk_mq_map_request(q, bio, &data);
if (unlikely(!rq))
@@ -1355,7 +1366,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
plug = current->plug;
if (plug) {
blk_mq_bio_to_request(rq, bio);
- if (list_empty(&plug->mq_list))
+ if (!request_count)
trace_block_plug(q);
else if (request_count >= BLK_MAX_REQUEST_COUNT) {
blk_flush_plug_list(plug, false);
@@ -1662,7 +1673,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
INIT_LIST_HEAD(&hctx->dispatch);
hctx->queue = q;
hctx->queue_num = hctx_idx;
- hctx->flags = set->flags;
+ hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
blk_mq_hctx_notify, hctx);
@@ -1849,27 +1860,26 @@ static void blk_mq_map_swqueue(struct request_queue *q,
}
}
-static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
+static void queue_set_hctx_shared(struct request_queue *q, bool shared)
{
struct blk_mq_hw_ctx *hctx;
- struct request_queue *q;
- bool shared;
int i;
- if (set->tag_list.next == set->tag_list.prev)
- shared = false;
- else
- shared = true;
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (shared)
+ hctx->flags |= BLK_MQ_F_TAG_SHARED;
+ else
+ hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
+ }
+}
+
+static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
+{
+ struct request_queue *q;
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_freeze_queue(q);
-
- queue_for_each_hw_ctx(q, hctx, i) {
- if (shared)
- hctx->flags |= BLK_MQ_F_TAG_SHARED;
- else
- hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
- }
+ queue_set_hctx_shared(q, shared);
blk_mq_unfreeze_queue(q);
}
}
@@ -1880,7 +1890,12 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
mutex_lock(&set->tag_list_lock);
list_del_init(&q->tag_set_list);
- blk_mq_update_tag_set_depth(set);
+ if (list_is_singular(&set->tag_list)) {
+ /* just transitioned to unshared */
+ set->flags &= ~BLK_MQ_F_TAG_SHARED;
+ /* update existing queue */
+ blk_mq_update_tag_set_depth(set, false);
+ }
mutex_unlock(&set->tag_list_lock);
}
@@ -1890,8 +1905,17 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
q->tag_set = set;
mutex_lock(&set->tag_list_lock);
+
+ /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
+ if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
+ set->flags |= BLK_MQ_F_TAG_SHARED;
+ /* update existing queue */
+ blk_mq_update_tag_set_depth(set, true);
+ }
+ if (set->flags & BLK_MQ_F_TAG_SHARED)
+ queue_set_hctx_shared(q, true);
list_add_tail(&q->tag_set_list, &set->tag_list);
- blk_mq_update_tag_set_depth(set);
+
mutex_unlock(&set->tag_list_lock);
}
@@ -2275,10 +2299,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
int i;
for (i = 0; i < set->nr_hw_queues; i++) {
- if (set->tags[i]) {
+ if (set->tags[i])
blk_mq_free_rq_map(set, set->tags[i], i);
- free_cpumask_var(set->tags[i]->cpumask);
- }
}
kfree(set->tags);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index f4fea7964910..b44dce165761 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -29,8 +29,6 @@ void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
-void blk_mq_clone_flush_request(struct request *flush_rq,
- struct request *orig_rq);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 61fc2633bbea..31849e328b45 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -540,6 +540,7 @@ static void blk_release_queue(struct kobject *kobj)
struct request_queue *q =
container_of(kobj, struct request_queue, kobj);
+ bdi_exit(&q->backing_dev_info);
blkcg_exit_queue(q);
if (q->elevator) {
diff --git a/block/blk.h b/block/blk.h
index 157c93d54dc9..da722eb786df 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -108,6 +108,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count,
struct request **same_queue_rq);
+unsigned int blk_plug_queued_count(struct request_queue *q);
void blk_account_io_start(struct request *req, bool new_io);
void blk_account_io_completion(struct request *req, unsigned int bytes);
diff --git a/block/elevator.c b/block/elevator.c
index 84d63943f2de..c3555c9c672f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -420,7 +420,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
* noxmerges: Only simple one-hit cache try
* merges: All merge tries attempted
*/
- if (blk_queue_nomerges(q))
+ if (blk_queue_nomerges(q) || !bio_mergeable(bio))
return ELEVATOR_NO_MERGE;
/*