diff options
author | Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> | 2012-10-08 16:29:41 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 16:22:25 +0900 |
commit | 911891afe1c3104adf0f802189909868239ebbfd (patch) | |
tree | 8583f33ded3ef14a85fe7cd2244c605ece673f37 /mm/huge_memory.c | |
parent | 637e3a27ec2c84f7ecd083fa6943da2f19eb5e9f (diff) | |
download | linux-stable-911891afe1c3104adf0f802189909868239ebbfd.tar.gz linux-stable-911891afe1c3104adf0f802189909868239ebbfd.tar.bz2 linux-stable-911891afe1c3104adf0f802189909868239ebbfd.zip |
thp: move khugepaged_mutex out of khugepaged
Currently, hugepaged_mutex is used really complexly and hard to
understand, actually, it is just used to serialize start_khugepaged and
khugepaged for these reasons:
- khugepaged_thread is shared between them
- the thp disable path (echo never > transparent_hugepage/enabled) is
nonblocking, so we need to protect khugepaged_thread to get a stable
running state
These can be avoided by:
- use the lock to serialize the thread creation and cancel
- thp disable path can not finised until the thread exits
Then khugepaged_thread is fully controlled by start_khugepaged, khugepaged
will be happy without the lock
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 36 |
1 files changed, 13 insertions, 23 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9833d8ecf38f..0931b2b19c52 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -139,9 +139,6 @@ static int start_khugepaged(void) { int err = 0; if (khugepaged_enabled()) { - int wakeup; - - mutex_lock(&khugepaged_mutex); if (!khugepaged_thread) khugepaged_thread = kthread_run(khugepaged, NULL, "khugepaged"); @@ -151,15 +148,17 @@ static int start_khugepaged(void) err = PTR_ERR(khugepaged_thread); khugepaged_thread = NULL; } - wakeup = !list_empty(&khugepaged_scan.mm_head); - mutex_unlock(&khugepaged_mutex); - if (wakeup) + + if (!list_empty(&khugepaged_scan.mm_head)) wake_up_interruptible(&khugepaged_wait); set_recommended_min_free_kbytes(); - } else + } else if (khugepaged_thread) { /* wakeup to exit */ wake_up_interruptible(&khugepaged_wait); + kthread_stop(khugepaged_thread); + khugepaged_thread = NULL; + } return err; } @@ -221,7 +220,12 @@ static ssize_t enabled_store(struct kobject *kobj, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); if (ret > 0) { - int err = start_khugepaged(); + int err; + + mutex_lock(&khugepaged_mutex); + err = start_khugepaged(); + mutex_unlock(&khugepaged_mutex); + if (err) ret = err; } @@ -2329,20 +2333,10 @@ static int khugepaged(void *none) set_freezable(); set_user_nice(current, 19); - /* serialize with start_khugepaged() */ - mutex_lock(&khugepaged_mutex); - - for (;;) { - mutex_unlock(&khugepaged_mutex); + while (!kthread_should_stop()) { VM_BUG_ON(khugepaged_thread != current); khugepaged_loop(); VM_BUG_ON(khugepaged_thread != current); - - mutex_lock(&khugepaged_mutex); - if (!khugepaged_enabled()) - break; - if (unlikely(kthread_should_stop())) - break; } spin_lock(&khugepaged_mm_lock); @@ -2351,10 +2345,6 @@ static int khugepaged(void *none) if (mm_slot) collect_mm_slot(mm_slot); spin_unlock(&khugepaged_mm_lock); - - khugepaged_thread = NULL; - mutex_unlock(&khugepaged_mutex); - return 0; } |