summaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-07-03 15:02:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 16:07:31 -0700
commita0b8cab3b9b2efadabdcff264c450ca515e2619c (patch)
tree31bcd78c027cafbc9daf892fe7b6db07c48297a5 /mm/swap.c
parent059285a25f30c13ed4f5d91cecd6094b9b20bb7b (diff)
downloadlinux-a0b8cab3b9b2efadabdcff264c450ca515e2619c.tar.gz
linux-a0b8cab3b9b2efadabdcff264c450ca515e2619c.tar.bz2
linux-a0b8cab3b9b2efadabdcff264c450ca515e2619c.zip
mm: remove lru parameter from __pagevec_lru_add and remove parts of pagevec API
Now that the LRU to add a page to is decided at LRU-add time, remove the misleading lru parameter from __pagevec_lru_add. A consequence of this is that the pagevec_lru_add_file, pagevec_lru_add_anon and similar helpers are misleading as the caller no longer has direct control over what LRU the page is added to. Unused helpers are removed by this patch and existing users of pagevec_lru_add_file() are converted to use lru_cache_add_file() directly and use the per-cpu pagevecs instead of creating their own pagevec. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Rik van Riel <riel@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Alexey Lyahkov <alexey.lyashkov@gmail.com> Cc: Andrew Perepechko <anserper@ya.ru> Cc: Robin Dong <sanbai@taobao.com> Cc: Theodore Tso <tytso@mit.edu> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Bernd Schubert <bernd.schubert@fastmail.fm> Cc: David Howells <dhowells@redhat.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/mm/swap.c b/mm/swap.c
index c53d161fc76d..6a9d0c43924a 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -505,7 +505,7 @@ void __lru_cache_add(struct page *page, enum lru_list lru)
page_cache_get(page);
if (!pagevec_space(pvec))
- __pagevec_lru_add(pvec, lru);
+ __pagevec_lru_add(pvec);
pagevec_add(pvec, page);
put_cpu_var(lru_add_pvec);
}
@@ -628,7 +628,7 @@ void lru_add_drain_cpu(int cpu)
struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
if (pagevec_count(pvec))
- __pagevec_lru_add(pvec, NR_LRU_LISTS);
+ __pagevec_lru_add(pvec);
pvec = &per_cpu(lru_rotate_pvecs, cpu);
if (pagevec_count(pvec)) {
@@ -832,12 +832,10 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
- enum lru_list requested_lru = (enum lru_list)arg;
int file = page_is_file_cache(page);
int active = PageActive(page);
enum lru_list lru = page_lru(page);
- WARN_ON_ONCE(requested_lru < NR_LRU_LISTS && requested_lru != lru);
VM_BUG_ON(PageUnevictable(page));
VM_BUG_ON(PageLRU(page));
@@ -851,11 +849,9 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
* Add the passed pages to the LRU, then drop the caller's refcount
* on them. Reinitialises the caller's pagevec.
*/
-void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
+void __pagevec_lru_add(struct pagevec *pvec)
{
- VM_BUG_ON(is_unevictable_lru(lru));
-
- pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
+ pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
}
EXPORT_SYMBOL(__pagevec_lru_add);