diff options
author | Lee Schermerhorn <Lee.Schermerhorn@hp.com> | 2009-12-14 17:59:54 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 08:53:23 -0800 |
commit | 418b27ef50e7e9b0c2fbd88db804bf065e5eb1a6 (patch) | |
tree | 24a4354bfa3bda6c763b859c235669c939c97ce4 /mm | |
parent | 4eb2b1dcd598f8489130405c81c60c289896d92a (diff) | |
download | linux-418b27ef50e7e9b0c2fbd88db804bf065e5eb1a6.tar.gz linux-418b27ef50e7e9b0c2fbd88db804bf065e5eb1a6.tar.bz2 linux-418b27ef50e7e9b0c2fbd88db804bf065e5eb1a6.zip |
mm: remove unevictable_migrate_page function
unevictable_migrate_page() in mm/internal.h is a relic of the since
removed UNEVICTABLE_LRU Kconfig option. This patch removes the function
and open codes the test in migrate_page_copy().
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 12 | ||||
-rw-r--r-- | mm/migrate.c | 4 |
2 files changed, 2 insertions, 14 deletions
diff --git a/mm/internal.h b/mm/internal.h index a4b927cdca09..4fe67a162cb4 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -63,18 +63,6 @@ static inline unsigned long page_order(struct page *page) return page_private(page); } -/* - * unevictable_migrate_page() called only from migrate_page_copy() to - * migrate unevictable flag to new page. - * Note that the old page has been isolated from the LRU lists at this - * point so we don't need to worry about LRU statistics. - */ -static inline void unevictable_migrate_page(struct page *new, struct page *old) -{ - if (TestClearPageUnevictable(old)) - SetPageUnevictable(new); -} - #ifdef CONFIG_MMU extern long mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); diff --git a/mm/migrate.c b/mm/migrate.c index 2a0ea3ef509e..efddbf0926b2 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -288,8 +288,8 @@ static void migrate_page_copy(struct page *newpage, struct page *page) if (TestClearPageActive(page)) { VM_BUG_ON(PageUnevictable(page)); SetPageActive(newpage); - } else - unevictable_migrate_page(newpage, page); + } else if (TestClearPageUnevictable(page)) + SetPageUnevictable(newpage); if (PageChecked(page)) SetPageChecked(newpage); if (PageMappedToDisk(page)) |