summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bio.c4
-rw-r--r--block/blk-cgroup.c20
-rw-r--r--block/blk-core.c5
3 files changed, 6 insertions, 23 deletions
diff --git a/block/bio.c b/block/bio.c
index 9d54aa37ce6c..a5d75f6bf4c7 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -754,10 +754,12 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
return false;
- if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
+ if (bio->bi_vcnt > 0) {
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page_is_mergeable(bv, page, len, off, same_page)) {
+ if (bio->bi_iter.bi_size > UINT_MAX - len)
+ return false;
bv->bv_len += len;
bio->bi_iter.bi_size += len;
return true;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 708dea92dac8..a229b94d5390 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1062,26 +1062,6 @@ err_unlock:
}
/**
- * blkcg_drain_queue - drain blkcg part of request_queue
- * @q: request_queue to drain
- *
- * Called from blk_drain_queue(). Responsible for draining blkcg part.
- */
-void blkcg_drain_queue(struct request_queue *q)
-{
- lockdep_assert_held(&q->queue_lock);
-
- /*
- * @q could be exiting and already have destroyed all blkgs as
- * indicated by NULL root_blkg. If so, don't confuse policies.
- */
- if (!q->root_blkg)
- return;
-
- blk_throtl_drain(q);
-}
-
-/**
* blkcg_exit_queue - exit and release blkcg part of request_queue
* @q: request_queue being released
*
diff --git a/block/blk-core.c b/block/blk-core.c
index e4b27f7e9f51..e0a094fddee5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1310,7 +1310,7 @@ EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
void blk_account_io_completion(struct request *req, unsigned int bytes)
{
- if (blk_do_io_stat(req)) {
+ if (req->part && blk_do_io_stat(req)) {
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
@@ -1328,7 +1328,8 @@ void blk_account_io_done(struct request *req, u64 now)
* normal IO on queueing nor completion. Accounting the
* containing request is enough.
*/
- if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
+ if (req->part && blk_do_io_stat(req) &&
+ !(req->rq_flags & RQF_FLUSH_SEQ)) {
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;