summaryrefslogtreecommitdiffstats
path: root/mm/readahead.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-10-17 09:25:52 -0600
committerJens Axboe <axboe@kernel.dk>2020-10-17 13:49:08 -0600
commit324bcf54c449c7b5b7024c9fa4549fbaaae1935d (patch)
tree03a41400fc0a67de4ef4f69987e39f39a53c7ba4 /mm/readahead.c
parent13bd691421bc191a402d2e0d3da5f248d170a632 (diff)
downloadlinux-324bcf54c449c7b5b7024c9fa4549fbaaae1935d.tar.gz
linux-324bcf54c449c7b5b7024c9fa4549fbaaae1935d.tar.bz2
linux-324bcf54c449c7b5b7024c9fa4549fbaaae1935d.zip
mm: use limited read-ahead to satisfy read
For the case where read-ahead is disabled on the file, or if the cgroup is congested, ensure that we can at least do 1 page of read-ahead to make progress on the read in an async fashion. This could potentially be larger, but it's not needed in terms of functionality, so let's error on the side of caution as larger counts of pages may run into reclaim issues (particularly if we're congested). This makes sure we're not hitting the potentially sync ->readpage() path for IO that is marked IOCB_WAITQ, which could cause us to block. It also means we'll use the same path for IO, regardless of whether or not read-ahead happens to be disabled on the lower level device. Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reported-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reported-by: Hao_Xu <haoxu@linux.alibaba.com> [axboe: updated for new ractl API] Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'mm/readahead.c')
-rw-r--r--mm/readahead.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index c6ffb76827da..c5b0457415be 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -552,15 +552,23 @@ readit:
void page_cache_sync_ra(struct readahead_control *ractl,
struct file_ra_state *ra, unsigned long req_count)
{
- /* no read-ahead */
- if (!ra->ra_pages)
- return;
+ bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
- if (blk_cgroup_congested())
- return;
+ /*
+ * Even if read-ahead is disabled, issue this request as read-ahead
+ * as we'll need it to satisfy the requested range. The forced
+ * read-ahead will do the right thing and limit the read to just the
+ * requested range, which we'll set to 1 page for this case.
+ */
+ if (!ra->ra_pages || blk_cgroup_congested()) {
+ if (!ractl->file)
+ return;
+ req_count = 1;
+ do_forced_ra = true;
+ }
/* be dumb */
- if (ractl->file && (ractl->file->f_mode & FMODE_RANDOM)) {
+ if (do_forced_ra) {
force_page_cache_ra(ractl, ra, req_count);
return;
}