summaryrefslogtreecommitdiffstats
path: root/block/bio.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/bio.c')
-rw-r--r--block/bio.c35
1 files changed, 25 insertions, 10 deletions
diff --git a/block/bio.c b/block/bio.c
index a1c4d2900c7a..963d1d406b3a 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -33,7 +33,7 @@ static struct biovec_slab {
{ .nr_vecs = 16, .name = "biovec-16" },
{ .nr_vecs = 64, .name = "biovec-64" },
{ .nr_vecs = 128, .name = "biovec-128" },
- { .nr_vecs = BIO_MAX_PAGES, .name = "biovec-max" },
+ { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
};
static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
@@ -46,7 +46,7 @@ static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
return &bvec_slabs[1];
case 65 ... 128:
return &bvec_slabs[2];
- case 129 ... BIO_MAX_PAGES:
+ case 129 ... BIO_MAX_VECS:
return &bvec_slabs[3];
default:
BUG();
@@ -151,9 +151,9 @@ out:
void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
{
- BIO_BUG_ON(nr_vecs > BIO_MAX_PAGES);
+ BIO_BUG_ON(nr_vecs > BIO_MAX_VECS);
- if (nr_vecs == BIO_MAX_PAGES)
+ if (nr_vecs == BIO_MAX_VECS)
mempool_free(bv, pool);
else if (nr_vecs > BIO_INLINE_VECS)
kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
@@ -186,15 +186,15 @@ struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
/*
* Try a slab allocation first for all smaller allocations. If that
* fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
- * The mempool is sized to handle up to BIO_MAX_PAGES entries.
+ * The mempool is sized to handle up to BIO_MAX_VECS entries.
*/
- if (*nr_vecs < BIO_MAX_PAGES) {
+ if (*nr_vecs < BIO_MAX_VECS) {
struct bio_vec *bvl;
bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
return bvl;
- *nr_vecs = BIO_MAX_PAGES;
+ *nr_vecs = BIO_MAX_VECS;
}
return mempool_alloc(pool, gfp_mask);
@@ -949,7 +949,7 @@ void bio_release_pages(struct bio *bio, bool mark_dirty)
}
EXPORT_SYMBOL_GPL(bio_release_pages);
-static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
+static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
{
WARN_ON_ONCE(bio->bi_max_vecs);
@@ -959,11 +959,26 @@ static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
bio->bi_iter.bi_size = iter->count;
bio_set_flag(bio, BIO_NO_PAGE_REF);
bio_set_flag(bio, BIO_CLONED);
+}
+static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
+{
+ __bio_iov_bvec_set(bio, iter);
iov_iter_advance(iter, iter->count);
return 0;
}
+static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
+{
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
+ struct iov_iter i = *iter;
+
+ iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9);
+ __bio_iov_bvec_set(bio, &i);
+ iov_iter_advance(iter, i.count);
+ return 0;
+}
+
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
/**
@@ -1094,8 +1109,8 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
int ret = 0;
if (iov_iter_is_bvec(iter)) {
- if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
- return -EINVAL;
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND)
+ return bio_iov_bvec_set_append(bio, iter);
return bio_iov_bvec_set(bio, iter);
}