diff options
author | Fengguang Wu <wfg@mail.ustc.edu.cn> | 2007-10-16 01:24:33 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 09:42:52 -0700 |
commit | f4e6b498d6e06742d72706ef50593a9c4dd72214 (patch) | |
tree | 74a573302b2ea086c0d21907175be604f110f5b1 /mm | |
parent | 0bb7ba6b9c358c12084a3cbc6ac08c8d1e973937 (diff) | |
download | linux-stable-f4e6b498d6e06742d72706ef50593a9c4dd72214.tar.gz linux-stable-f4e6b498d6e06742d72706ef50593a9c4dd72214.tar.bz2 linux-stable-f4e6b498d6e06742d72706ef50593a9c4dd72214.zip |
readahead: combine file_ra_state.prev_index/prev_offset into prev_pos
Combine the file_ra_state members
unsigned long prev_index
unsigned int prev_offset
into
loff_t prev_pos
It is more consistent and better supports huge files.
Thanks to Peter for the nice proposal!
[akpm@linux-foundation.org: fix shift overflow]
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 13 | ||||
-rw-r--r-- | mm/readahead.c | 15 |
2 files changed, 15 insertions, 13 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 5dc18d76e703..bbcca456d8a6 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -879,8 +879,8 @@ void do_generic_mapping_read(struct address_space *mapping, cached_page = NULL; index = *ppos >> PAGE_CACHE_SHIFT; next_index = index; - prev_index = ra.prev_index; - prev_offset = ra.prev_offset; + prev_index = ra.prev_pos >> PAGE_CACHE_SHIFT; + prev_offset = ra.prev_pos & (PAGE_CACHE_SIZE-1); last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; offset = *ppos & ~PAGE_CACHE_MASK; @@ -966,7 +966,6 @@ page_ok: index += offset >> PAGE_CACHE_SHIFT; offset &= ~PAGE_CACHE_MASK; prev_offset = offset; - ra.prev_offset = offset; page_cache_release(page); if (ret == nr && desc->count) @@ -1056,9 +1055,11 @@ no_cached_page: out: *_ra = ra; - _ra->prev_index = prev_index; + _ra->prev_pos = prev_index; + _ra->prev_pos <<= PAGE_CACHE_SHIFT; + _ra->prev_pos |= prev_offset; - *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; + *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; if (cached_page) page_cache_release(cached_page); if (filp) @@ -1396,7 +1397,7 @@ retry_find: * Found the page and have a reference on it. */ mark_page_accessed(page); - ra->prev_index = page->index; + ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT; vmf->page = page; return ret | VM_FAULT_LOCKED; diff --git a/mm/readahead.c b/mm/readahead.c index d2504877b269..4a58befbde4a 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -46,7 +46,7 @@ void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) { ra->ra_pages = mapping->backing_dev_info->ra_pages; - ra->prev_index = -1; + ra->prev_pos = -1; } EXPORT_SYMBOL_GPL(file_ra_state_init); @@ -327,7 +327,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra, * indicator. The flag won't be set on already cached pages, to avoid the * readahead-for-nothing fuss, saving pointless page cache lookups. * - * prev_index tracks the last visited page in the _previous_ read request. + * prev_pos tracks the last visited byte in the _previous_ read request. * It should be maintained by the caller, and will be used for detecting * small random reads. Note that the readahead algorithm checks loosely * for sequential patterns. Hence interleaved reads might be served as @@ -351,11 +351,9 @@ ondemand_readahead(struct address_space *mapping, bool hit_readahead_marker, pgoff_t offset, unsigned long req_size) { - int max; /* max readahead pages */ - int sequential; - - max = ra->ra_pages; - sequential = (offset - ra->prev_index <= 1UL) || (req_size > max); + int max = ra->ra_pages; /* max readahead pages */ + pgoff_t prev_offset; + int sequential; /* * It's the expected callback offset, assume sequential access. @@ -369,6 +367,9 @@ ondemand_readahead(struct address_space *mapping, goto readit; } + prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT; + sequential = offset - prev_offset <= 1UL || req_size > max; + /* * Standalone, small read. * Read as is, and do not pollute the readahead state. |