summaryrefslogtreecommitdiffstats
path: root/fs/mpage.c
diff options
context:
space:
mode:
authorOGAWA Hirofumi <hirofumi@mail.parknet.co.jp>2006-06-23 02:03:26 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 07:42:49 -0700
commit111ebb6e6f7bd7de6d722c5848e95621f43700d9 (patch)
treebb00b13001db9be201e9b6d31468a79f4d1240bf /fs/mpage.c
parent4c91c3648c620003cb7b21b8858f36cd6132e168 (diff)
downloadlinux-111ebb6e6f7bd7de6d722c5848e95621f43700d9.tar.gz
linux-111ebb6e6f7bd7de6d722c5848e95621f43700d9.tar.bz2
linux-111ebb6e6f7bd7de6d722c5848e95621f43700d9.zip
[PATCH] writeback: fix range handling
When a writeback_control's `start' and `end' fields are used to indicate a one-byte-range starting at file offset zero, the required values of .start=0,.end=0 mean that the ->writepages() implementation has no way of telling that it is being asked to perform a range request. Because we're currently overloading (start == 0 && end == 0) to mean "this is not a write-a-range request". To make all this sane, the patch changes range of writeback_control. So caller does: If it is calling ->writepages() to write pages, it sets range (range_start/end or range_cyclic) always. And if range_cyclic is true, ->writepages() thinks the range is cyclic, otherwise it just uses range_start and range_end. This patch does, - Add LLONG_MAX, LLONG_MIN, ULLONG_MAX to include/linux/kernel.h -1 is usually ok for range_end (type is long long). But, if someone did, range_end += val; range_end is "val - 1" u64val = range_end >> bits; u64val is "~(0ULL)" or something, they are wrong. So, this adds LLONG_MAX to avoid nasty things, and uses LLONG_MAX for range_end. - All callers of ->writepages() sets range_start/end or range_cyclic. - Fix updates of ->writeback_index. It seems already bit strange. If it starts at 0 and ended by check of nr_to_write, this last index may reduce chance to scan end of file. So, this updates ->writeback_index only if range_cyclic is true or whole-file is scanned. Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Cc: Nathan Scott <nathans@sgi.com> Cc: Anton Altaparmakov <aia21@cantab.net> Cc: Steven French <sfrench@us.ibm.com> Cc: "Vladimir V. Saveliev" <vs@namesys.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/mpage.c')
-rw-r--r--fs/mpage.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/fs/mpage.c b/fs/mpage.c
index 9bf2eb30e6f4..1e4598247d0b 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -707,9 +707,9 @@ mpage_writepages(struct address_space *mapping,
struct pagevec pvec;
int nr_pages;
pgoff_t index;
- pgoff_t end = -1; /* Inclusive */
+ pgoff_t end; /* Inclusive */
int scanned = 0;
- int is_range = 0;
+ int range_whole = 0;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
@@ -721,16 +721,14 @@ mpage_writepages(struct address_space *mapping,
writepage = mapping->a_ops->writepage;
pagevec_init(&pvec, 0);
- if (wbc->sync_mode == WB_SYNC_NONE) {
+ if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
+ end = -1;
} else {
- index = 0; /* whole-file sweep */
- scanned = 1;
- }
- if (wbc->start || wbc->end) {
- index = wbc->start >> PAGE_CACHE_SHIFT;
- end = wbc->end >> PAGE_CACHE_SHIFT;
- is_range = 1;
+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+ range_whole = 1;
scanned = 1;
}
retry:
@@ -759,7 +757,7 @@ retry:
continue;
}
- if (unlikely(is_range) && page->index > end) {
+ if (!wbc->range_cyclic && page->index > end) {
done = 1;
unlock_page(page);
continue;
@@ -810,7 +808,7 @@ retry:
index = 0;
goto retry;
}
- if (!is_range)
+ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = index;
if (bio)
mpage_bio_submit(WRITE, bio);