summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-02-04 22:28:41 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 09:44:14 -0800
commit46017e954826ac59e91df76341a3f76b45467847 (patch)
tree711a35e3936118665d0eac2afeef8758b4f4e95f /mm
parentc4cc6d07b2f465fbf5efd99bbe772a49c515f3f2 (diff)
downloadlinux-46017e954826ac59e91df76341a3f76b45467847.tar.gz
linux-46017e954826ac59e91df76341a3f76b45467847.tar.bz2
linux-46017e954826ac59e91df76341a3f76b45467847.zip
swapin_readahead: move and rearrange args
swapin_readahead has never sat well in mm/memory.c: move it to mm/swap_state.c beside its kindred read_swap_cache_async. Why were its args in a different order? rearrange them. And since it was always followed by a read_swap_cache_async of the target page, fold that in and return struct page*. Then CONFIG_SWAP=n no longer needs valid_swaphandles and read_swap_cache_async stubs. Signed-off-by: Hugh Dickins <hugh@veritas.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c45
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/swap_state.c47
3 files changed, 50 insertions, 48 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 1d803c2d0184..ccc9403d5352 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1980,48 +1980,6 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
return 0;
}
-/**
- * swapin_readahead - swap in pages in hope we need them soon
- * @entry: swap entry of this memory
- * @addr: address to start
- * @vma: user vma this addresses belong to
- *
- * Primitive swap readahead code. We simply read an aligned block of
- * (1 << page_cluster) entries in the swap area. This method is chosen
- * because it doesn't cost us any seek time. We also make sure to queue
- * the 'original' request together with the readahead ones...
- *
- * This has been extended to use the NUMA policies from the mm triggering
- * the readahead.
- *
- * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
- */
-void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
-{
- int nr_pages;
- struct page *page;
- unsigned long offset;
- unsigned long end_offset;
-
- /*
- * Get starting offset for readaround, and number of pages to read.
- * Adjust starting address by readbehind (for NUMA interleave case)?
- * No, it's very unlikely that swap layout would follow vma layout,
- * more likely that neighbouring swap pages came from the same node:
- * so use the same "addr" to choose the same node for each swap read.
- */
- nr_pages = valid_swaphandles(entry, &offset);
- for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
- /* Ok, do the async read-ahead now */
- page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
- vma, addr);
- if (!page)
- break;
- page_cache_release(page);
- }
- lru_add_drain(); /* Push any new pages onto the LRU now */
-}
-
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
@@ -2049,8 +2007,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
page = lookup_swap_cache(entry);
if (!page) {
grab_swap_token(); /* Contend for token _before_ read-in */
- swapin_readahead(entry, address, vma);
- page = read_swap_cache_async(entry, vma, address);
+ page = swapin_readahead(entry, vma, address);
if (!page) {
/*
* Back out if somebody else faulted in this pte
diff --git a/mm/shmem.c b/mm/shmem.c
index 88c6685f16b7..3a22a8f79331 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1036,8 +1036,7 @@ static struct page *shmem_swapin(struct shmem_inode_info *info,
pvma.vm_pgoff = idx;
pvma.vm_ops = NULL;
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
- swapin_readahead(entry, 0, &pvma);
- page = read_swap_cache_async(entry, &pvma, 0);
+ page = swapin_readahead(entry, &pvma, 0);
mpol_free(pvma.vm_policy);
return page;
}
@@ -1067,8 +1066,7 @@ static inline int shmem_parse_mpol(char *value, int *policy,
static inline struct page *
shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
{
- swapin_readahead(entry, 0, NULL);
- return read_swap_cache_async(entry, NULL, 0);
+ return swapin_readahead(entry, NULL, 0);
}
static inline struct page *
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b52635601dfe..668a80422630 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
+#include <linux/swapops.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
@@ -368,3 +369,49 @@ struct page *read_swap_cache_async(swp_entry_t entry,
page_cache_release(new_page);
return found_page;
}
+
+/**
+ * swapin_readahead - swap in pages in hope we need them soon
+ * @entry: swap entry of this memory
+ * @vma: user vma this address belongs to
+ * @addr: target address for mempolicy
+ *
+ * Returns the struct page for entry and addr, after queueing swapin.
+ *
+ * Primitive swap readahead code. We simply read an aligned block of
+ * (1 << page_cluster) entries in the swap area. This method is chosen
+ * because it doesn't cost us any seek time. We also make sure to queue
+ * the 'original' request together with the readahead ones...
+ *
+ * This has been extended to use the NUMA policies from the mm triggering
+ * the readahead.
+ *
+ * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
+ */
+struct page *swapin_readahead(swp_entry_t entry,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ int nr_pages;
+ struct page *page;
+ unsigned long offset;
+ unsigned long end_offset;
+
+ /*
+ * Get starting offset for readaround, and number of pages to read.
+ * Adjust starting address by readbehind (for NUMA interleave case)?
+ * No, it's very unlikely that swap layout would follow vma layout,
+ * more likely that neighbouring swap pages came from the same node:
+ * so use the same "addr" to choose the same node for each swap read.
+ */
+ nr_pages = valid_swaphandles(entry, &offset);
+ for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
+ /* Ok, do the async read-ahead now */
+ page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
+ vma, addr);
+ if (!page)
+ break;
+ page_cache_release(page);
+ }
+ lru_add_drain(); /* Push any new pages onto the LRU now */
+ return read_swap_cache_async(entry, vma, addr);
+}