summaryrefslogtreecommitdiffstats
path: root/fs/splice.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-05-03 10:35:26 +0200
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-05-04 06:55:12 +0200
commit1432873af7ae29d4bb3c56114c05b539d078ca62 (patch)
treecf4f72608d2e10f7ff786b9d60067963f1ab4ca9 /fs/splice.c
parentbfc4ee39fdbb2deb8864785d5e5bc5cdd3b31a69 (diff)
downloadlinux-stable-1432873af7ae29d4bb3c56114c05b539d078ca62.tar.gz
linux-stable-1432873af7ae29d4bb3c56114c05b539d078ca62.tar.bz2
linux-stable-1432873af7ae29d4bb3c56114c05b539d078ca62.zip
[PATCH] splice: LRU fixups
Nick says that the current construct isn't safe. This goes back to the original, but sets PIPE_BUF_FLAG_LRU on user pages as well as they all seem to be on the LRU in the first place. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'fs/splice.c')
-rw-r--r--fs/splice.c33
1 files changed, 11 insertions, 22 deletions
diff --git a/fs/splice.c b/fs/splice.c
index 27f5e3738a7b..0b202425b0b5 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -78,6 +78,7 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
return 1;
}
+ buf->flags |= PIPE_BUF_FLAG_LRU;
return 0;
}
@@ -85,6 +86,7 @@ static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
struct pipe_buffer *buf)
{
page_cache_release(buf->page);
+ buf->flags &= ~PIPE_BUF_FLAG_LRU;
}
static int page_cache_pipe_buf_pin(struct pipe_inode_info *info,
@@ -141,6 +143,7 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
return 1;
+ buf->flags |= PIPE_BUF_FLAG_LRU;
return generic_pipe_buf_steal(pipe, buf);
}
@@ -566,37 +569,23 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
*/
if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
/*
- * If steal succeeds, buf->page is now pruned from the vm
- * side (page cache) and we can reuse it. The page will also
- * be locked on successful return.
+ * If steal succeeds, buf->page is now pruned from the
+ * pagecache and we can reuse it. The page will also be
+ * locked on successful return.
*/
if (buf->ops->steal(info, buf))
goto find_page;
page = buf->page;
- page_cache_get(page);
-
- /*
- * page must be on the LRU for adding to the pagecache.
- * Check this without grabbing the zone lock, if it isn't
- * the do grab the zone lock, recheck, and add if necessary.
- */
- if (!PageLRU(page)) {
- struct zone *zone = page_zone(page);
-
- spin_lock_irq(&zone->lru_lock);
- if (!PageLRU(page)) {
- SetPageLRU(page);
- add_page_to_inactive_list(zone, page);
- }
- spin_unlock_irq(&zone->lru_lock);
- }
-
if (add_to_page_cache(page, mapping, index, gfp_mask)) {
- page_cache_release(page);
unlock_page(page);
goto find_page;
}
+
+ page_cache_get(page);
+
+ if (!(buf->flags & PIPE_BUF_FLAG_LRU))
+ lru_cache_add(page);
} else {
find_page:
page = find_lock_page(mapping, index);