summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2012-10-08 16:33:48 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 16:23:00 +0900
commite46a28790e594c0876d1a84270926abf75460f61 (patch)
treefebfaa6c20dab69490308190729f1d898e4df930
parent7a71932d5676b7410ab64d149bad8bde6b0d8632 (diff)
downloadlinux-stable-e46a28790e594c0876d1a84270926abf75460f61.tar.gz
linux-stable-e46a28790e594c0876d1a84270926abf75460f61.tar.bz2
linux-stable-e46a28790e594c0876d1a84270926abf75460f61.zip
CMA: migrate mlocked pages
Presently CMA cannot migrate mlocked pages so it ends up failing to allocate contiguous memory space. This patch makes mlocked pages be migrated out. Of course, it can affect realtime processes but in CMA usecase, contiguous memory allocation failing is far worse than access latency to an mlocked page being variable while CMA is running. If someone wants to make the system realtime, he shouldn't enable CMA because stalls can still happen at random times. [akpm@linux-foundation.org: tweak comment text, per Mel] Signed-off-by: Minchan Kim <minchan@kernel.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--mm/compaction.c8
-rw-r--r--mm/internal.h2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/vmscan.c4
5 files changed, 12 insertions, 6 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a5578871d033..50aaca81f63d 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -218,6 +218,8 @@ struct lruvec {
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
+/* Isolate unevictable pages */
+#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
/* LRU Isolation modes. */
typedef unsigned __bitwise__ isolate_mode_t;
diff --git a/mm/compaction.c b/mm/compaction.c
index d8187f9cabbf..2c4ce17651d8 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -461,6 +461,7 @@ static bool too_many_isolated(struct zone *zone)
* @cc: Compaction control structure.
* @low_pfn: The first PFN of the range.
* @end_pfn: The one-past-the-last PFN of the range.
+ * @unevictable: true if it allows to isolate unevictable pages
*
* Isolate all pages that can be migrated from the range specified by
* [low_pfn, end_pfn). Returns zero if there is a fatal signal
@@ -476,7 +477,7 @@ static bool too_many_isolated(struct zone *zone)
*/
unsigned long
isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
- unsigned long low_pfn, unsigned long end_pfn)
+ unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
{
unsigned long last_pageblock_nr = 0, pageblock_nr;
unsigned long nr_scanned = 0, nr_isolated = 0;
@@ -602,6 +603,9 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
if (!cc->sync)
mode |= ISOLATE_ASYNC_MIGRATE;
+ if (unevictable)
+ mode |= ISOLATE_UNEVICTABLE;
+
lruvec = mem_cgroup_page_lruvec(page, zone);
/* Try isolate the page */
@@ -807,7 +811,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
}
/* Perform the isolation */
- low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
+ low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
if (!low_pfn || cc->contended)
return ISOLATE_ABORT;
diff --git a/mm/internal.h b/mm/internal.h
index 4dc93e2fe69e..f5f295fe11e1 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -138,7 +138,7 @@ isolate_freepages_range(struct compact_control *cc,
unsigned long start_pfn, unsigned long end_pfn);
unsigned long
isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
- unsigned long low_pfn, unsigned long end_pfn);
+ unsigned long low_pfn, unsigned long end_pfn, bool unevictable);
#endif
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5485f0ef4ec3..fd86c47de86f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5690,7 +5690,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
if (list_empty(&cc->migratepages)) {
cc->nr_migratepages = 0;
pfn = isolate_migratepages_range(cc->zone, cc,
- pfn, end);
+ pfn, end, true);
if (!pfn) {
ret = -EINTR;
break;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8b627309dd44..2624edcfb420 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1009,8 +1009,8 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
if (!PageLRU(page))
return ret;
- /* Do not give back unevictable pages for compaction */
- if (PageUnevictable(page))
+ /* Compaction should not handle unevictable pages but CMA can do so */
+ if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
return ret;
ret = -EBUSY;