summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorKeith Busch <kbusch@kernel.org>2022-07-12 08:32:56 -0700
committerJens Axboe <axboe@kernel.dk>2022-08-02 21:08:54 -0600
commite97424fd44727b4a5ecb124a49f575fed6086999 (patch)
tree8cf90aab52270f2e71cb3b961c3c32d51d66ebf1 /block
parent34cdb8c825f28a5c91c5336a80967533da57da74 (diff)
downloadlinux-stable-e97424fd44727b4a5ecb124a49f575fed6086999.tar.gz
linux-stable-e97424fd44727b4a5ecb124a49f575fed6086999.tar.bz2
linux-stable-e97424fd44727b4a5ecb124a49f575fed6086999.zip
block: fix leaking page ref on truncated direct io
The size being added to a bio from an iov is aligned to a block size after the pages were gotten. If the new aligned size truncates the last page, its reference was being leaked. Ensure all pages that were not added to the bio have their reference released. Since this essentially requires doing the same that bio_put_pages(), and there was only one caller for that function, this patch makes the put_page() loop common for everyone. Fixes: b1a000d3b8ec5 ("block: relax direct io memory alignment") Reported-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Keith Busch <kbusch@kernel.org> Link: https://lore.kernel.org/r/20220712153256.2202024-3-kbusch@fb.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/bio.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/block/bio.c b/block/bio.c
index 4740ba3cb9df..d6eb90d9b20b 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1151,14 +1151,6 @@ void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
bio_set_flag(bio, BIO_CLONED);
}
-static void bio_put_pages(struct page **pages, size_t size, size_t off)
-{
- size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);
-
- for (i = 0; i < nr; i++)
- put_page(pages[i]);
-}
-
static int bio_iov_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
@@ -1207,7 +1199,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
ssize_t size, left;
- unsigned len, i;
+ unsigned len, i = 0;
size_t offset;
int ret = 0;
@@ -1228,10 +1220,16 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
*/
size = iov_iter_get_pages(iter, pages, UINT_MAX - bio->bi_iter.bi_size,
nr_pages, &offset);
- if (size > 0)
+ if (size > 0) {
+ nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
size = ALIGN_DOWN(size, bdev_logical_block_size(bio->bi_bdev));
- if (unlikely(size <= 0))
- return size ? size : -EFAULT;
+ } else
+ nr_pages = 0;
+
+ if (unlikely(size <= 0)) {
+ ret = size ? size : -EFAULT;
+ goto out;
+ }
for (left = size, i = 0; left > 0; left -= len, i++) {
struct page *page = pages[i];
@@ -1240,10 +1238,8 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
ret = bio_iov_add_zone_append_page(bio, page, len,
offset);
- if (ret) {
- bio_put_pages(pages + i, left, offset);
+ if (ret)
break;
- }
} else
bio_iov_add_page(bio, page, len, offset);
@@ -1251,6 +1247,10 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
}
iov_iter_advance(iter, size - left);
+out:
+ while (i < nr_pages)
+ put_page(pages[i++]);
+
return ret;
}