summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>2013-01-04 15:35:08 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-01-04 16:11:46 -0800
commita458431e176ddb27e8ef8b98c2a681b217337393 (patch)
tree466ec91a25ebbe30870d12486071bb08a8c7cd5a /mm
parent358e419f826b552c9d795bcd3820597217692461 (diff)
downloadlinux-stable-a458431e176ddb27e8ef8b98c2a681b217337393.tar.gz
linux-stable-a458431e176ddb27e8ef8b98c2a681b217337393.tar.bz2
linux-stable-a458431e176ddb27e8ef8b98c2a681b217337393.zip
mm: fix zone_watermark_ok_safe() accounting of isolated pages
Commit 702d1a6e0766 ("memory-hotplug: fix kswapd looping forever problem") added an isolated pageblocks counter (nr_pageblock_isolate in struct zone) and used it to adjust free pages counter in zone_watermark_ok_safe() to prevent kswapd looping forever problem. Then later, commit 2139cbe627b8 ("cma: fix counting of isolated pages") fixed accounting of isolated pages in global free pages counter. It made the previous zone_watermark_ok_safe() fix unnecessary and potentially harmful (cause now isolated pages may be accounted twice making free pages counter incorrect). This patch removes the special isolated pageblocks counter altogether which fixes zone_watermark_ok_safe() free pages check. Reported-by: Tomasz Stanislawski <t.stanislaws@samsung.com> Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Cc: Minchan Kim <minchan@kernel.org> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Aaditya Kumar <aaditya.kumar.30@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.cz> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c27
-rw-r--r--mm/page_isolation.c26
2 files changed, 2 insertions, 51 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4ba5e37127fc..bc6cc0e913bd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -221,11 +221,6 @@ EXPORT_SYMBOL(nr_online_nodes);
int page_group_by_mobility_disabled __read_mostly;
-/*
- * NOTE:
- * Don't use set_pageblock_migratetype(page, MIGRATE_ISOLATE) directly.
- * Instead, use {un}set_pageblock_isolate.
- */
void set_pageblock_migratetype(struct page *page, int migratetype)
{
@@ -1655,20 +1650,6 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
return true;
}
-#ifdef CONFIG_MEMORY_ISOLATION
-static inline unsigned long nr_zone_isolate_freepages(struct zone *zone)
-{
- if (unlikely(zone->nr_pageblock_isolate))
- return zone->nr_pageblock_isolate * pageblock_nr_pages;
- return 0;
-}
-#else
-static inline unsigned long nr_zone_isolate_freepages(struct zone *zone)
-{
- return 0;
-}
-#endif
-
bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags)
{
@@ -1684,14 +1665,6 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
- /*
- * If the zone has MIGRATE_ISOLATE type free pages, we should consider
- * it. nr_zone_isolate_freepages is never accurate so kswapd might not
- * sleep although it could do so. But this is more desirable for memory
- * hotplug than sleeping which can cause a livelock in the direct
- * reclaim path.
- */
- free_pages -= nr_zone_isolate_freepages(z);
return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
free_pages);
}
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 9d2264ea4606..383bdbb98b04 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -8,28 +8,6 @@
#include <linux/memory.h>
#include "internal.h"
-/* called while holding zone->lock */
-static void set_pageblock_isolate(struct page *page)
-{
- if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE)
- return;
-
- set_pageblock_migratetype(page, MIGRATE_ISOLATE);
- page_zone(page)->nr_pageblock_isolate++;
-}
-
-/* called while holding zone->lock */
-static void restore_pageblock_isolate(struct page *page, int migratetype)
-{
- struct zone *zone = page_zone(page);
- if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE))
- return;
-
- BUG_ON(zone->nr_pageblock_isolate <= 0);
- set_pageblock_migratetype(page, migratetype);
- zone->nr_pageblock_isolate--;
-}
-
int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
{
struct zone *zone;
@@ -80,7 +58,7 @@ out:
unsigned long nr_pages;
int migratetype = get_pageblock_migratetype(page);
- set_pageblock_isolate(page);
+ set_pageblock_migratetype(page, MIGRATE_ISOLATE);
nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
__mod_zone_freepage_state(zone, -nr_pages, migratetype);
@@ -103,7 +81,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
goto out;
nr_pages = move_freepages_block(zone, page, migratetype);
__mod_zone_freepage_state(zone, nr_pages, migratetype);
- restore_pageblock_isolate(page, migratetype);
+ set_pageblock_migratetype(page, migratetype);
out:
spin_unlock_irqrestore(&zone->lock, flags);
}