summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 18:23:28 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 18:23:28 -0800
commit59d53737a8640482995fea13c6e2c0fd016115d6 (patch)
tree3423eb92315865d76cb8d488513bfef6ab9251d0 /kernel
parentd3f180ea1a44aecba1b0dab2a253428e77f906bf (diff)
parent8138a67a5557ffea3a21dfd6f037842d4e748513 (diff)
downloadlinux-59d53737a8640482995fea13c6e2c0fd016115d6.tar.gz
linux-59d53737a8640482995fea13c6e2c0fd016115d6.tar.bz2
linux-59d53737a8640482995fea13c6e2c0fd016115d6.zip
Merge branch 'akpm' (patches from Andrew)
Merge second set of updates from Andrew Morton: "More of MM" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (83 commits) mm/nommu.c: fix arithmetic overflow in __vm_enough_memory() mm/mmap.c: fix arithmetic overflow in __vm_enough_memory() vmstat: Reduce time interval to stat update on idle cpu mm/page_owner.c: remove unnecessary stack_trace field Documentation/filesystems/proc.txt: describe /proc/<pid>/map_files mm: incorporate read-only pages into transparent huge pages vmstat: do not use deferrable delayed work for vmstat_update mm: more aggressive page stealing for UNMOVABLE allocations mm: always steal split buddies in fallback allocations mm: when stealing freepages, also take pages created by splitting buddy page mincore: apply page table walker on do_mincore() mm: /proc/pid/clear_refs: avoid split_huge_page() mm: pagewalk: fix misbehavior of walk_page_range for vma(VM_PFNMAP) mempolicy: apply page table walker on queue_pages_range() arch/powerpc/mm/subpage-prot.c: use walk->vma and walk_page_vma() memcg: cleanup preparation for page table walk numa_maps: remove numa_maps->vma numa_maps: fix typo in gather_hugetbl_stats pagemap: use walk->vma instead of calling find_vma() clear_refs: remove clear_refs_private->vma and introduce clear_refs_test_walk() ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c11
-rw-r--r--kernel/power/process.c75
3 files changed, 35 insertions, 54 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 6806c55475ee..feff10bbb307 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -435,7 +435,8 @@ static void exit_mm(struct task_struct *tsk)
task_unlock(tsk);
mm_update_next_owner(mm);
mmput(mm);
- clear_thread_flag(TIF_MEMDIE);
+ if (test_thread_flag(TIF_MEMDIE))
+ unmark_oom_victim();
}
static struct task_struct *find_alive_thread(struct task_struct *p)
diff --git a/kernel/fork.c b/kernel/fork.c
index b379d9abddc7..66e19c251581 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -555,6 +555,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
INIT_LIST_HEAD(&mm->mmlist);
mm->core_state = NULL;
atomic_long_set(&mm->nr_ptes, 0);
+#ifndef __PAGETABLE_PMD_FOLDED
+ atomic_long_set(&mm->nr_pmds, 0);
+#endif
mm->map_count = 0;
mm->locked_vm = 0;
mm->pinned_vm = 0;
@@ -603,6 +606,14 @@ static void check_mm(struct mm_struct *mm)
printk(KERN_ALERT "BUG: Bad rss-counter state "
"mm:%p idx:%d val:%ld\n", mm, i, x);
}
+
+ if (atomic_long_read(&mm->nr_ptes))
+ pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n",
+ atomic_long_read(&mm->nr_ptes));
+ if (mm_nr_pmds(mm))
+ pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n",
+ mm_nr_pmds(mm));
+
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
#endif
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 5a6ec8678b9a..564f786df470 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -84,8 +84,8 @@ static int try_to_freeze_tasks(bool user_only)
elapsed_msecs = elapsed_msecs64;
if (todo) {
- printk("\n");
- printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
+ pr_cont("\n");
+ pr_err("Freezing of tasks %s after %d.%03d seconds "
"(%d tasks refusing to freeze, wq_busy=%d):\n",
wakeup ? "aborted" : "failed",
elapsed_msecs / 1000, elapsed_msecs % 1000,
@@ -101,37 +101,13 @@ static int try_to_freeze_tasks(bool user_only)
read_unlock(&tasklist_lock);
}
} else {
- printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
+ pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
elapsed_msecs % 1000);
}
return todo ? -EBUSY : 0;
}
-static bool __check_frozen_processes(void)
-{
- struct task_struct *g, *p;
-
- for_each_process_thread(g, p)
- if (p != current && !freezer_should_skip(p) && !frozen(p))
- return false;
-
- return true;
-}
-
-/*
- * Returns true if all freezable tasks (except for current) are frozen already
- */
-static bool check_frozen_processes(void)
-{
- bool ret;
-
- read_lock(&tasklist_lock);
- ret = __check_frozen_processes();
- read_unlock(&tasklist_lock);
- return ret;
-}
-
/**
* freeze_processes - Signal user space processes to enter the refrigerator.
* The current thread will not be frozen. The same process that calls
@@ -142,7 +118,6 @@ static bool check_frozen_processes(void)
int freeze_processes(void)
{
int error;
- int oom_kills_saved;
error = __usermodehelper_disable(UMH_FREEZING);
if (error)
@@ -155,31 +130,24 @@ int freeze_processes(void)
atomic_inc(&system_freezing_cnt);
pm_wakeup_clear();
- printk("Freezing user space processes ... ");
+ pr_info("Freezing user space processes ... ");
pm_freezing = true;
- oom_kills_saved = oom_kills_count();
error = try_to_freeze_tasks(true);
if (!error) {
__usermodehelper_set_disable_depth(UMH_DISABLED);
- oom_killer_disable();
-
- /*
- * There might have been an OOM kill while we were
- * freezing tasks and the killed task might be still
- * on the way out so we have to double check for race.
- */
- if (oom_kills_count() != oom_kills_saved &&
- !check_frozen_processes()) {
- __usermodehelper_set_disable_depth(UMH_ENABLED);
- printk("OOM in progress.");
- error = -EBUSY;
- } else {
- printk("done.");
- }
+ pr_cont("done.");
}
- printk("\n");
+ pr_cont("\n");
BUG_ON(in_atomic());
+ /*
+ * Now that the whole userspace is frozen we need to disbale
+ * the OOM killer to disallow any further interference with
+ * killable tasks.
+ */
+ if (!error && !oom_killer_disable())
+ error = -EBUSY;
+
if (error)
thaw_processes();
return error;
@@ -197,13 +165,14 @@ int freeze_kernel_threads(void)
{
int error;
- printk("Freezing remaining freezable tasks ... ");
+ pr_info("Freezing remaining freezable tasks ... ");
+
pm_nosig_freezing = true;
error = try_to_freeze_tasks(false);
if (!error)
- printk("done.");
+ pr_cont("done.");
- printk("\n");
+ pr_cont("\n");
BUG_ON(in_atomic());
if (error)
@@ -224,7 +193,7 @@ void thaw_processes(void)
oom_killer_enable();
- printk("Restarting tasks ... ");
+ pr_info("Restarting tasks ... ");
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
@@ -243,7 +212,7 @@ void thaw_processes(void)
usermodehelper_enable();
schedule();
- printk("done.\n");
+ pr_cont("done.\n");
trace_suspend_resume(TPS("thaw_processes"), 0, false);
}
@@ -252,7 +221,7 @@ void thaw_kernel_threads(void)
struct task_struct *g, *p;
pm_nosig_freezing = false;
- printk("Restarting kernel threads ... ");
+ pr_info("Restarting kernel threads ... ");
thaw_workqueues();
@@ -264,5 +233,5 @@ void thaw_kernel_threads(void)
read_unlock(&tasklist_lock);
schedule();
- printk("done.\n");
+ pr_cont("done.\n");
}