summaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-13 15:47:10 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 17:32:46 -0800
commit878aee7d6b5504e01b9caffce080e792b6b8d090 (patch)
treec4a01a78885c25b6b3b1e0c74af7cb83c98a07c5 /mm/huge_memory.c
parent8ee53820edfd1f3b6554c593f337148dd3d7fc91 (diff)
downloadlinux-878aee7d6b5504e01b9caffce080e792b6b8d090.tar.gz
linux-878aee7d6b5504e01b9caffce080e792b6b8d090.tar.bz2
linux-878aee7d6b5504e01b9caffce080e792b6b8d090.zip
thp: freeze khugepaged and ksmd
It's unclear why schedule friendly kernel threads can't be taken away by the CPU through the scheduler itself. It's safer to stop them as they can trigger memory allocation, if kswapd also freezes itself to avoid generating I/O they have too. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 39d7df40c067..45b6d53bcfbc 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -15,6 +15,7 @@
#include <linux/mm_inline.h>
#include <linux/kthread.h>
#include <linux/khugepaged.h>
+#include <linux/freezer.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
#include "internal.h"
@@ -2085,6 +2086,9 @@ static void khugepaged_do_scan(struct page **hpage)
break;
#endif
+ if (unlikely(kthread_should_stop() || freezing(current)))
+ break;
+
spin_lock(&khugepaged_mm_lock);
if (!khugepaged_scan.mm_slot)
pass_through_head++;
@@ -2147,6 +2151,9 @@ static void khugepaged_loop(void)
if (hpage)
put_page(hpage);
#endif
+ try_to_freeze();
+ if (unlikely(kthread_should_stop()))
+ break;
if (khugepaged_has_work()) {
DEFINE_WAIT(wait);
if (!khugepaged_scan_sleep_millisecs)
@@ -2157,8 +2164,8 @@ static void khugepaged_loop(void)
khugepaged_scan_sleep_millisecs));
remove_wait_queue(&khugepaged_wait, &wait);
} else if (khugepaged_enabled())
- wait_event_interruptible(khugepaged_wait,
- khugepaged_wait_event());
+ wait_event_freezable(khugepaged_wait,
+ khugepaged_wait_event());
}
}
@@ -2166,6 +2173,7 @@ static int khugepaged(void *none)
{
struct mm_slot *mm_slot;
+ set_freezable();
set_user_nice(current, 19);
/* serialize with start_khugepaged() */
@@ -2180,6 +2188,8 @@ static int khugepaged(void *none)
mutex_lock(&khugepaged_mutex);
if (!khugepaged_enabled())
break;
+ if (unlikely(kthread_should_stop()))
+ break;
}
spin_lock(&khugepaged_mm_lock);