From 304dbdb7a4fbb7f40a6ad5c5836fdd456c233c63 Mon Sep 17 00:00:00 2001 From: Lee Schermerhorn Date: Sat, 22 Apr 2006 02:35:48 -0700 Subject: [PATCH] add migratepage address space op to shmem Basic problem: pages of a shared memory segment can only be migrated once. In 2.6.16 through 2.6.17-rc1, shared memory mappings do not have a migratepage address space op. Therefore, migrate_pages() falls back to default processing. In this path, it will try to pageout() dirty pages. Once a shared memory page has been migrated it becomes dirty, so migrate_pages() will try to page it out. However, because the page count is 3 [cache + current + pte], pageout() will return PAGE_KEEP because is_page_cache_freeable() returns false. This will abort all subsequent migrations. This patch adds a migratepage address space op to shared memory segments to avoid taking the default path. We use the "migrate_page()" function because it knows how to migrate dirty pages. This allows shared memory segment pages to migrate, subject to other conditions such as # pte's referencing the page [page_mapcount(page)], when requested. I think this is safe. If we're migrating a shared memory page, then we found the page via a page table, so it must be in memory. Can be verified with memtoy and the shmem-mbind-test script, both available at: http://free.linux.hp.com/~lts/Tools/ Signed-off-by: Lee Schermerhorn Acked-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index 37eaf42ed2c6..4c5e68e4e9ae 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -46,6 +46,8 @@ #include #include #include +#include + #include #include #include @@ -2173,6 +2175,7 @@ static struct address_space_operations shmem_aops = { .prepare_write = shmem_prepare_write, .commit_write = simple_commit_write, #endif + .migratepage = migrate_page, }; static struct file_operations shmem_file_operations = { -- cgit v1.2.3 From 83d722f7e198b034699b1500d98729beff930efd Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Mon, 24 Apr 2006 19:35:21 -0700 Subject: [PATCH] Remove __devinit and __cpuinit from notifier_call definitions Few of the notifier_chain_register() callers use __init in the definition of notifier_call. It is incorrect as the function definition should be available after the initializations (they do not unregister them during initializations). This patch fixes all such usages to _not_ have the notifier_call __init section. Signed-off-by: Chandra Seetharaman Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- mm/slab.c | 2 +- mm/vmscan.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 123c60586740..ea77c999047e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1962,7 +1962,7 @@ static inline void free_zone_pagesets(int cpu) } } -static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, +static int pageset_cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { diff --git a/mm/slab.c b/mm/slab.c index e6ef9bd52335..af5c5237e11a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1036,7 +1036,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) #endif -static int __devinit cpuup_callback(struct notifier_block *nfb, +static int cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; diff --git a/mm/vmscan.c b/mm/vmscan.c index acdf001d6941..4649a63a8cb6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1328,7 +1328,7 @@ repeat: not required for correctness. So if the last cpu in a node goes away, we get changed to run anywhere: as the first one comes back, restore their cpu bindings. */ -static int __devinit cpu_callback(struct notifier_block *nfb, +static int cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { pg_data_t *pgdat; -- cgit v1.2.3