summaryrefslogtreecommitdiffstats
path: root/block/bio.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2019-07-01 15:14:46 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-07-14 08:01:08 +0200
commitc785529bebceeaf38db8ebf9b50ff3a173fb18c6 (patch)
tree592117f52a65756df77dcd99ebc14f20dafda815 /block/bio.c
parenta6d278658745fa72fe810394759f9f786993cd33 (diff)
downloadlinux-stable-c785529bebceeaf38db8ebf9b50ff3a173fb18c6.tar.gz
linux-stable-c785529bebceeaf38db8ebf9b50ff3a173fb18c6.tar.bz2
linux-stable-c785529bebceeaf38db8ebf9b50ff3a173fb18c6.zip
block: fix .bi_size overflow
commit 79d08f89bb1b5c2c1ff90d9bb95497ab9e8aa7e0 upstream. 'bio->bi_iter.bi_size' is 'unsigned int', which at most hold 4G - 1 bytes. Before 07173c3ec276 ("block: enable multipage bvecs"), one bio can include very limited pages, and usually at most 256, so the fs bio size won't be bigger than 1M bytes most of times. Since we support multi-page bvec, in theory one fs bio really can be added > 1M pages, especially in case of hugepage, or big writeback with too many dirty pages. Then there is chance in which .bi_size is overflowed. Fixes this issue by using bio_full() to check if the added segment may overflow .bi_size. Cc: Liu Yiding <liuyd.fnst@cn.fujitsu.com> Cc: kernel test robot <rong.a.chen@intel.com> Cc: "Darrick J. Wong" <darrick.wong@oracle.com> Cc: linux-xfs@vger.kernel.org Cc: linux-fsdevel@vger.kernel.org Cc: stable@vger.kernel.org Fixes: 07173c3ec276 ("block: enable multipage bvecs") Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'block/bio.c')
-rw-r--r--block/bio.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/block/bio.c b/block/bio.c
index ce797d73bb43..67bba12d273b 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -731,7 +731,7 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
}
}
- if (bio_full(bio))
+ if (bio_full(bio, len))
return 0;
if (bio->bi_phys_segments >= queue_max_segments(q))
@@ -807,7 +807,7 @@ void __bio_add_page(struct bio *bio, struct page *page,
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
- WARN_ON_ONCE(bio_full(bio));
+ WARN_ON_ONCE(bio_full(bio, len));
bv->bv_page = page;
bv->bv_offset = off;
@@ -834,7 +834,7 @@ int bio_add_page(struct bio *bio, struct page *page,
bool same_page = false;
if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
- if (bio_full(bio))
+ if (bio_full(bio, len))
return 0;
__bio_add_page(bio, page, len, offset);
}
@@ -922,7 +922,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
if (same_page)
put_page(page);
} else {
- if (WARN_ON_ONCE(bio_full(bio)))
+ if (WARN_ON_ONCE(bio_full(bio, len)))
return -EINVAL;
__bio_add_page(bio, page, len, offset);
}
@@ -966,7 +966,7 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
ret = __bio_iov_bvec_add_pages(bio, iter);
else
ret = __bio_iov_iter_get_pages(bio, iter);
- } while (!ret && iov_iter_count(iter) && !bio_full(bio));
+ } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
if (iov_iter_bvec_no_ref(iter))
bio_set_flag(bio, BIO_NO_PAGE_REF);