summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2019-03-05 15:45:21 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-05 21:07:17 -0800
commitcb810ad294d3c3a454e51b12fbb483bbb7096b98 (patch)
treed6551444a5e5bc0a6e069c4af0d8bf3b64518cc8 /mm/compaction.c
parent8854c55f54bcc104e3adae42abe16948286ec75c (diff)
downloadlinux-stable-cb810ad294d3c3a454e51b12fbb483bbb7096b98.tar.gz
linux-stable-cb810ad294d3c3a454e51b12fbb483bbb7096b98.tar.bz2
linux-stable-cb810ad294d3c3a454e51b12fbb483bbb7096b98.zip
mm, compaction: rework compact_should_abort as compact_check_resched
With incremental changes, compact_should_abort no longer makes any documented sense. Rename to compact_check_resched and update the associated comments. There is no benefit other than reducing redundant code and making the intent slightly clearer. It could potentially be merged with earlier patches but it just makes the review slightly harder. Link: http://lkml.kernel.org/r/20190118175136.31341-17-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Dan Carpenter <dan.carpenter@oracle.com> Cc: David Rientjes <rientjes@google.com> Cc: YueHaibing <yuehaibing@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c61
1 files changed, 23 insertions, 38 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 78ae182aaf34..68e3c214bcbd 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -405,6 +405,21 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
}
/*
+ * Aside from avoiding lock contention, compaction also periodically checks
+ * need_resched() and records async compaction as contended if necessary.
+ */
+static inline void compact_check_resched(struct compact_control *cc)
+{
+ /* async compaction aborts if contended */
+ if (need_resched()) {
+ if (cc->mode == MIGRATE_ASYNC)
+ cc->contended = true;
+
+ cond_resched();
+ }
+}
+
+/*
* Compaction requires the taking of some coarse locks that are potentially
* very heavily contended. The lock should be periodically unlocked to avoid
* having disabled IRQs for a long time, even when there is nobody waiting on
@@ -432,33 +447,7 @@ static bool compact_unlock_should_abort(spinlock_t *lock,
return true;
}
- if (need_resched()) {
- if (cc->mode == MIGRATE_ASYNC)
- cc->contended = true;
- cond_resched();
- }
-
- return false;
-}
-
-/*
- * Aside from avoiding lock contention, compaction also periodically checks
- * need_resched() and either schedules in sync compaction or aborts async
- * compaction. This is similar to what compact_unlock_should_abort() does, but
- * is used where no lock is concerned.
- *
- * Returns false when no scheduling was needed, or sync compaction scheduled.
- * Returns true when async compaction should abort.
- */
-static inline bool compact_should_abort(struct compact_control *cc)
-{
- /* async compaction aborts if contended */
- if (need_resched()) {
- if (cc->mode == MIGRATE_ASYNC)
- cc->contended = true;
-
- cond_resched();
- }
+ compact_check_resched(cc);
return false;
}
@@ -747,8 +736,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
return 0;
}
- if (compact_should_abort(cc))
- return 0;
+ compact_check_resched(cc);
if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
skip_on_failure = true;
@@ -1379,12 +1367,10 @@ static void isolate_freepages(struct compact_control *cc)
isolate_start_pfn = block_start_pfn) {
/*
* This can iterate a massively long zone without finding any
- * suitable migration targets, so periodically check if we need
- * to schedule, or even abort async compaction.
+ * suitable migration targets, so periodically check resched.
*/
- if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
- && compact_should_abort(cc))
- break;
+ if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
+ compact_check_resched(cc);
page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
zone);
@@ -1677,11 +1663,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
/*
* This can potentially iterate a massively long zone with
* many pageblocks unsuitable, so periodically check if we
- * need to schedule, or even abort async compaction.
+ * need to schedule.
*/
- if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
- && compact_should_abort(cc))
- break;
+ if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
+ compact_check_resched(cc);
page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
zone);