From 6a04de6dbe1772d98fddf5099738d6f508e86e21 Mon Sep 17 00:00:00 2001 From: "Wu, Bryan" Date: Wed, 11 Apr 2007 23:28:47 -0700 Subject: [PATCH] nommu: fix bug ip_conntrack does not work on nommu num_physpages is not exported out in mm/nommu.c, so the ip_conntrack module link will fail. Signed-off-by: Bryan Wu Acked-By: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/nommu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index cbbc13774819..1f60194d9b9b 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -45,6 +45,7 @@ int heap_stack_gap = 0; EXPORT_SYMBOL(mem_map); EXPORT_SYMBOL(__vm_enough_memory); +EXPORT_SYMBOL(num_physpages); /* list of shareable VMAs */ struct rb_root nommu_vma_tree = RB_ROOT; -- cgit v1.2.3 From 650a7c974f1b91de9732c0f720e792837f8abfd6 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 23 Apr 2007 21:36:13 -0700 Subject: oom: kill all threads that share mm with killed task oom_kill_task() calls __oom_kill_task() to OOM kill a selected task. When finding other threads that share an mm with that task, we need to kill those individual threads and not the same one. (Bug introduced by f2a2a7108aa0039ba7a5fe7a0d2ecef2219a7584) Acked-by: William Irwin Acked-by: Christoph Lameter Cc: Nick Piggin Cc: Andrew Morton Cc: Andi Kleen Signed-off-by: David Rientjes Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 2f3916986abf..af981b645a69 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -333,7 +333,7 @@ static int oom_kill_task(struct task_struct *p) */ do_each_thread(g, q) { if (q->mm == mm && q->tgid != p->tgid) - force_sig(SIGKILL, p); + force_sig(SIGKILL, q); } while_each_thread(g, q); return 0; -- cgit v1.2.3 From 3d124cbba316737af8f3a6959edb95bbd130a4d8 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 23 Apr 2007 14:41:02 -0700 Subject: fix OOM killing processes wrongly thought MPOL_BIND I only have CONFIG_NUMA=y for build testing: surprised when trying a memhog to see lots of other processes killed with "No available memory (MPOL_BIND)". memhog is killed correctly once we initialize nodemask in constrained_alloc(). Signed-off-by: Hugh Dickins Acked-by: Christoph Lameter Acked-by: William Irwin Acked-by: KAMEZAWA Hiroyuki Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index af981b645a69..3791edfffeeb 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -176,6 +176,8 @@ static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask) struct zone **z; nodemask_t nodes; int node; + + nodes_clear(nodes); /* node has memory ? */ for_each_online_node(node) if (NODE_DATA(node)->node_present_pages) -- cgit v1.2.3 From 0e8c7d0fd5b4999675c7d5cd95d0eb7106b756b3 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 23 Apr 2007 14:41:09 -0700 Subject: page migration: fix NR_FILE_PAGES accounting NR_FILE_PAGES must be accounted for depending on the zone that the page belongs to. If we replace the page in the radix tree then we may have to shift the count to another zone. Suggested-by: Ethan Solomita Eventually-typed-in-by: Christoph Lameter Cc: Martin Bligh Cc: Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index 7a66ca25dc8a..a91ca00abebe 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -297,7 +297,7 @@ static int migrate_page_move_mapping(struct address_space *mapping, void **pslot; if (!mapping) { - /* Anonymous page */ + /* Anonymous page without mapping */ if (page_count(page) != 1) return -EAGAIN; return 0; @@ -333,6 +333,19 @@ static int migrate_page_move_mapping(struct address_space *mapping, */ __put_page(page); + /* + * If moved to a different zone then also account + * the page for that zone. Other VM counters will be + * taken care of when we establish references to the + * new page and drop references to the old page. + * + * Note that anonymous pages are accounted for + * via NR_FILE_PAGES and NR_ANON_PAGES if they + * are mapped to swap space. + */ + __dec_zone_page_state(page, NR_FILE_PAGES); + __inc_zone_page_state(newpage, NR_FILE_PAGES); + write_unlock_irq(&mapping->tree_lock); return 0; -- cgit v1.2.3 From 6c210482ae4a9a5bb9377ad250feaacec3faa3cd Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 27 Apr 2007 16:01:57 +0200 Subject: [S390] split page_test_and_clear_dirty. The page_test_and_clear_dirty primitive really consists of two operations, page_test_dirty and the page_clear_dirty. The combination of the two is not an atomic operation, so it makes more sense to have two separate operations instead of one. In addition to the improved readability of the s390 version of SetPageUptodate, it now avoids the page_test_dirty operation which is an insert-storage-key-extended (iske) instruction which is an expensive operation. Signed-off-by: Martin Schwidefsky --- mm/rmap.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/rmap.c b/mm/rmap.c index b82146e6dfc9..59da5b734c80 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -498,8 +498,10 @@ int page_mkclean(struct page *page) struct address_space *mapping = page_mapping(page); if (mapping) ret = page_mkclean_file(mapping, page); - if (page_test_and_clear_dirty(page)) + if (page_test_dirty(page)) { + page_clear_dirty(page); ret = 1; + } } return ret; @@ -605,8 +607,10 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma) * Leaving it set also helps swapoff to reinstate ptes * faster for those pages still in swapcache. */ - if (page_test_and_clear_dirty(page)) + if (page_test_dirty(page)) { + page_clear_dirty(page); set_page_dirty(page); + } __dec_zone_page_state(page, PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); } -- cgit v1.2.3 From 07db59bd6b0f279c31044cba6787344f63be87ea Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 27 Apr 2007 09:10:47 -0700 Subject: Change default dirty-writeback limits Do this really early in the 2.6.22-rc series, so that we'll get feedback. And don't change by half measures. Just cut the default dirty limit to a quarter of what it was, and see if anybody even notices. Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index f469e3cd08e8..a794945fd194 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -67,12 +67,12 @@ static inline long sync_writeback_pages(void) /* * Start background writeback (via pdflush) at this percentage */ -int dirty_background_ratio = 10; +int dirty_background_ratio = 5; /* * The generator of dirty data starts writeback at this percentage */ -int vm_dirty_ratio = 40; +int vm_dirty_ratio = 10; /* * The interval between `kupdate'-style writebacks, in jiffies -- cgit v1.2.3