summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-02-27 13:27:34 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2006-02-27 13:27:34 -0500
commitf3b270a47882b958e9e3c5bd86894e3a7072899a (patch)
treede3bdfd5d67e8310257b93ac3d8d703599b9d929 /mm
parent116ad29d9839610d2811a1962cac7f3f2a9f9295 (diff)
parentb9a33cebac70d6f67a769ce8d4078fee2b254ada (diff)
downloadlinux-f3b270a47882b958e9e3c5bd86894e3a7072899a.tar.gz
linux-f3b270a47882b958e9e3c5bd86894e3a7072899a.tar.bz2
linux-f3b270a47882b958e9e3c5bd86894e3a7072899a.zip
Merge branch 'master'
Diffstat (limited to 'mm')
-rw-r--r--mm/mempolicy.c22
-rw-r--r--mm/vmscan.c10
2 files changed, 25 insertions, 7 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 880831bd3003..67af4cea1e23 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -552,7 +552,7 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
*/
if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
if (isolate_lru_page(page))
- list_add(&page->lru, pagelist);
+ list_add_tail(&page->lru, pagelist);
}
}
@@ -569,6 +569,7 @@ static int migrate_pages_to(struct list_head *pagelist,
LIST_HEAD(moved);
LIST_HEAD(failed);
int err = 0;
+ unsigned long offset = 0;
int nr_pages;
struct page *page;
struct list_head *p;
@@ -576,8 +577,21 @@ static int migrate_pages_to(struct list_head *pagelist,
redo:
nr_pages = 0;
list_for_each(p, pagelist) {
- if (vma)
- page = alloc_page_vma(GFP_HIGHUSER, vma, vma->vm_start);
+ if (vma) {
+ /*
+ * The address passed to alloc_page_vma is used to
+ * generate the proper interleave behavior. We fake
+ * the address here by an increasing offset in order
+ * to get the proper distribution of pages.
+ *
+ * No decision has been made as to which page
+ * a certain old page is moved to so we cannot
+ * specify the correct address.
+ */
+ page = alloc_page_vma(GFP_HIGHUSER, vma,
+ offset + vma->vm_start);
+ offset += PAGE_SIZE;
+ }
else
page = alloc_pages_node(dest, GFP_HIGHUSER, 0);
@@ -585,7 +599,7 @@ redo:
err = -ENOMEM;
goto out;
}
- list_add(&page->lru, &newlist);
+ list_add_tail(&page->lru, &newlist);
nr_pages++;
if (nr_pages > MIGRATE_CHUNK_SIZE)
break;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1838c15ca4fd..b0af7593d01e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1908,7 +1908,12 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
sc.swap_cluster_max = SWAP_CLUSTER_MAX;
cond_resched();
- p->flags |= PF_MEMALLOC;
+ /*
+ * We need to be able to allocate from the reserves for RECLAIM_SWAP
+ * and we also need to be able to write out pages for RECLAIM_WRITE
+ * and RECLAIM_SWAP.
+ */
+ p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
@@ -1932,11 +1937,10 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
* a long time.
*/
shrink_slab(sc.nr_scanned, gfp_mask, order);
- sc.nr_reclaimed = 1; /* Avoid getting the off node timeout */
}
p->reclaim_state = NULL;
- current->flags &= ~PF_MEMALLOC;
+ current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
if (sc.nr_reclaimed == 0)
zone->last_unsuccessful_zone_reclaim = jiffies;