summaryrefslogtreecommitdiffstats
path: root/mm/readahead.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-04-07 21:18:55 +0100
committerDavid Howells <dhowells@redhat.com>2021-04-23 09:25:00 +0100
commitfcd9ae4f7f3b5fbd549285bab0478a339113620e (patch)
tree4a188d6754431b22be1913cf13841ca4d808bc87 /mm/readahead.c
parent73e10ded33a1cfc0c72404aaedc493e9813b6239 (diff)
downloadlinux-stable-fcd9ae4f7f3b5fbd549285bab0478a339113620e.tar.gz
linux-stable-fcd9ae4f7f3b5fbd549285bab0478a339113620e.tar.bz2
linux-stable-fcd9ae4f7f3b5fbd549285bab0478a339113620e.zip
mm/filemap: Pass the file_ra_state in the ractl
For readahead_expand(), we need to modify the file ra_state, so pass it down by adding it to the ractl. We have to do this because it's not always the same as f_ra in the struct file that is already being passed. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: David Howells <dhowells@redhat.com> Tested-by: Jeff Layton <jlayton@kernel.org> Tested-by: Dave Wysochanski <dwysocha@redhat.com> Tested-By: Marc Dionne <marc.dionne@auristor.com> Link: https://lore.kernel.org/r/20210407201857.3582797-2-willy@infradead.org/ Link: https://lore.kernel.org/r/161789067431.6155.8063840447229665720.stgit@warthog.procyon.org.uk/ # v6
Diffstat (limited to 'mm/readahead.c')
-rw-r--r--mm/readahead.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index c5b0457415be..2088569a947e 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -272,9 +272,10 @@ void do_page_cache_ra(struct readahead_control *ractl,
* memory at once.
*/
void force_page_cache_ra(struct readahead_control *ractl,
- struct file_ra_state *ra, unsigned long nr_to_read)
+ unsigned long nr_to_read)
{
struct address_space *mapping = ractl->mapping;
+ struct file_ra_state *ra = ractl->ra;
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
unsigned long max_pages, index;
@@ -433,10 +434,10 @@ static int try_context_readahead(struct address_space *mapping,
* A minimal readahead algorithm for trivial sequential/random reads.
*/
static void ondemand_readahead(struct readahead_control *ractl,
- struct file_ra_state *ra, bool hit_readahead_marker,
- unsigned long req_size)
+ bool hit_readahead_marker, unsigned long req_size)
{
struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
+ struct file_ra_state *ra = ractl->ra;
unsigned long max_pages = ra->ra_pages;
unsigned long add_pages;
unsigned long index = readahead_index(ractl);
@@ -550,7 +551,7 @@ readit:
}
void page_cache_sync_ra(struct readahead_control *ractl,
- struct file_ra_state *ra, unsigned long req_count)
+ unsigned long req_count)
{
bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
@@ -560,7 +561,7 @@ void page_cache_sync_ra(struct readahead_control *ractl,
* read-ahead will do the right thing and limit the read to just the
* requested range, which we'll set to 1 page for this case.
*/
- if (!ra->ra_pages || blk_cgroup_congested()) {
+ if (!ractl->ra->ra_pages || blk_cgroup_congested()) {
if (!ractl->file)
return;
req_count = 1;
@@ -569,21 +570,20 @@ void page_cache_sync_ra(struct readahead_control *ractl,
/* be dumb */
if (do_forced_ra) {
- force_page_cache_ra(ractl, ra, req_count);
+ force_page_cache_ra(ractl, req_count);
return;
}
/* do read-ahead */
- ondemand_readahead(ractl, ra, false, req_count);
+ ondemand_readahead(ractl, false, req_count);
}
EXPORT_SYMBOL_GPL(page_cache_sync_ra);
void page_cache_async_ra(struct readahead_control *ractl,
- struct file_ra_state *ra, struct page *page,
- unsigned long req_count)
+ struct page *page, unsigned long req_count)
{
/* no read-ahead */
- if (!ra->ra_pages)
+ if (!ractl->ra->ra_pages)
return;
/*
@@ -604,7 +604,7 @@ void page_cache_async_ra(struct readahead_control *ractl,
return;
/* do read-ahead */
- ondemand_readahead(ractl, ra, true, req_count);
+ ondemand_readahead(ractl, true, req_count);
}
EXPORT_SYMBOL_GPL(page_cache_async_ra);