summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLiangcai Fan <liangcaifan19@gmail.com>2021-11-05 13:41:36 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-06 13:30:39 -0700
commitbd3400ea173fb611cdf2030d03620185ff6c0b0e (patch)
tree06641ec051ba94a768253dcc282bb0531b665217 /mm
parent8531fc6f52f5fc201f43d8c36c2606e25748b1c4 (diff)
downloadlinux-bd3400ea173fb611cdf2030d03620185ff6c0b0e.tar.gz
linux-bd3400ea173fb611cdf2030d03620185ff6c0b0e.tar.bz2
linux-bd3400ea173fb611cdf2030d03620185ff6c0b0e.zip
mm: khugepaged: recalculate min_free_kbytes after stopping khugepaged
When initializing transparent huge pages, min_free_kbytes would be calculated according to what khugepaged expected. So when transparent huge pages get disabled, min_free_kbytes should be recalculated instead of the higher value set by khugepaged. Link: https://lkml.kernel.org/r/1633937809-16558-1-git-send-email-liangcaifan19@gmail.com Signed-off-by: Liangcai Fan <liangcaifan19@gmail.com> Signed-off-by: Chunyan Zhang <zhang.lyra@gmail.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/khugepaged.c10
-rw-r--r--mm/page_alloc.c7
2 files changed, 14 insertions, 3 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 8a8b3aa92937..629961966854 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2299,6 +2299,11 @@ static void set_recommended_min_free_kbytes(void)
int nr_zones = 0;
unsigned long recommended_min;
+ if (!khugepaged_enabled()) {
+ calculate_min_free_kbytes();
+ goto update_wmarks;
+ }
+
for_each_populated_zone(zone) {
/*
* We don't need to worry about fragmentation of
@@ -2334,6 +2339,8 @@ static void set_recommended_min_free_kbytes(void)
min_free_kbytes = recommended_min;
}
+
+update_wmarks:
setup_per_zone_wmarks();
}
@@ -2355,12 +2362,11 @@ int start_stop_khugepaged(void)
if (!list_empty(&khugepaged_scan.mm_head))
wake_up_interruptible(&khugepaged_wait);
-
- set_recommended_min_free_kbytes();
} else if (khugepaged_thread) {
kthread_stop(khugepaged_thread);
khugepaged_thread = NULL;
}
+ set_recommended_min_free_kbytes();
fail:
mutex_unlock(&khugepaged_mutex);
return err;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a7035467bf6d..09a0f1c5d5d2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8469,7 +8469,7 @@ void setup_per_zone_wmarks(void)
* 8192MB: 11584k
* 16384MB: 16384k
*/
-int __meminit init_per_zone_wmark_min(void)
+void calculate_min_free_kbytes(void)
{
unsigned long lowmem_kbytes;
int new_min_free_kbytes;
@@ -8483,6 +8483,11 @@ int __meminit init_per_zone_wmark_min(void)
pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
new_min_free_kbytes, user_min_free_kbytes);
+}
+
+int __meminit init_per_zone_wmark_min(void)
+{
+ calculate_min_free_kbytes();
setup_per_zone_wmarks();
refresh_zone_stat_thresholds();
setup_per_zone_lowmem_reserve();