summaryrefslogtreecommitdiffstats
path: root/fs/mpage.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-08-17 15:45:36 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-17 16:20:29 -0700
commit74c8164e1cdb1eb22f1d49d54e515e81821a8ad0 (patch)
treef1fbddb5d202d0eacb79859118f77e0f18491566 /fs/mpage.c
parent357c1206520da7a40e383fe329ce379bda722cd9 (diff)
downloadlinux-stable-74c8164e1cdb1eb22f1d49d54e515e81821a8ad0.tar.gz
linux-stable-74c8164e1cdb1eb22f1d49d54e515e81821a8ad0.tar.bz2
linux-stable-74c8164e1cdb1eb22f1d49d54e515e81821a8ad0.zip
mpage: mpage_readpages() should submit IO as read-ahead
a_ops->readpages() is only ever used for read-ahead, yet we don't flag the IO being submitted as such. Fix that up. Any file system that uses mpage_readpages() as its ->readpages() implementation will now get this right. Since we're passing in whether the IO is read-ahead or not, we don't need to pass in the 'gfp' separately, as it is dependent on the IO being read-ahead. Kill off that member. Add some documentation notes on ->readpages() being purely for read-ahead. Link: http://lkml.kernel.org/r/20180621010725.17813-3-axboe@kernel.dk Signed-off-by: Jens Axboe <axboe@kernel.dk> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Chris Mason <clm@fb.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Theodore Ts'o <tytso@mit.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/mpage.c')
-rw-r--r--fs/mpage.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/fs/mpage.c b/fs/mpage.c
index 6dc90e456abf..c820dc9bebab 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -137,11 +137,11 @@ struct mpage_readpage_args {
struct bio *bio;
struct page *page;
unsigned int nr_pages;
+ bool is_readahead;
sector_t last_block_in_bio;
struct buffer_head map_bh;
unsigned long first_logical_block;
get_block_t *get_block;
- gfp_t gfp;
};
/*
@@ -170,8 +170,18 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
struct block_device *bdev = NULL;
int length;
int fully_mapped = 1;
+ int op_flags;
unsigned nblocks;
unsigned relative_block;
+ gfp_t gfp;
+
+ if (args->is_readahead) {
+ op_flags = REQ_RAHEAD;
+ gfp = readahead_gfp_mask(page->mapping);
+ } else {
+ op_flags = 0;
+ gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+ }
if (page_has_buffers(page))
goto confused;
@@ -284,7 +294,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
* This page will go to BIO. Do we need to send this BIO off first?
*/
if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
- args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio);
+ args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
alloc_new:
if (args->bio == NULL) {
@@ -296,14 +306,14 @@ alloc_new:
args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
min_t(int, args->nr_pages,
BIO_MAX_PAGES),
- args->gfp);
+ gfp);
if (args->bio == NULL)
goto confused;
}
length = first_hole << blkbits;
if (bio_add_page(args->bio, page, length, 0) < length) {
- args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio);
+ args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
goto alloc_new;
}
@@ -311,7 +321,7 @@ alloc_new:
nblocks = map_bh->b_size >> blkbits;
if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
(first_hole != blocks_per_page))
- args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio);
+ args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
else
args->last_block_in_bio = blocks[blocks_per_page - 1];
out:
@@ -319,7 +329,7 @@ out:
confused:
if (args->bio)
- args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio);
+ args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
if (!PageUptodate(page))
block_read_full_page(page, args->get_block);
else
@@ -377,7 +387,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
{
struct mpage_readpage_args args = {
.get_block = get_block,
- .gfp = readahead_gfp_mask(mapping),
+ .is_readahead = true,
};
unsigned page_idx;
@@ -388,7 +398,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
list_del(&page->lru);
if (!add_to_page_cache_lru(page, mapping,
page->index,
- args.gfp)) {
+ readahead_gfp_mask(mapping))) {
args.page = page;
args.nr_pages = nr_pages - page_idx;
args.bio = do_mpage_readpage(&args);
@@ -397,7 +407,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
}
BUG_ON(!list_empty(pages));
if (args.bio)
- mpage_bio_submit(REQ_OP_READ, 0, args.bio);
+ mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio);
return 0;
}
EXPORT_SYMBOL(mpage_readpages);
@@ -411,7 +421,6 @@ int mpage_readpage(struct page *page, get_block_t get_block)
.page = page,
.nr_pages = 1,
.get_block = get_block,
- .gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL),
};
args.bio = do_mpage_readpage(&args);