summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.preempt3
-rw-r--r--kernel/auditsc.c1
-rw-r--r--kernel/capability.c4
-rw-r--r--kernel/cpuset.c43
-rw-r--r--kernel/exit.c116
-rw-r--r--kernel/fork.c18
-rw-r--r--kernel/futex.c3
-rw-r--r--kernel/irq/chip.c3
-rw-r--r--kernel/irq/manage.c31
-rw-r--r--kernel/kexec.c113
-rw-r--r--kernel/ksysfs.c10
-rw-r--r--kernel/module.c130
-rw-r--r--kernel/nsproxy.c3
-rw-r--r--kernel/params.c17
-rw-r--r--kernel/posix-timers.c3
-rw-r--r--kernel/printk.c52
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/rcupdate.c1
-rw-r--r--kernel/rcutorture.c10
-rw-r--r--kernel/rtmutex-debug.c7
-rw-r--r--kernel/sched.c26
-rw-r--r--kernel/signal.c24
-rw-r--r--kernel/softlockup.c54
-rw-r--r--kernel/sys_ni.c4
-rw-r--r--kernel/sysctl.c44
-rw-r--r--kernel/taskstats.c1
-rw-r--r--kernel/time.c1
-rw-r--r--kernel/time/tick-broadcast.c17
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/timekeeping.c5
-rw-r--r--kernel/user.c12
31 files changed, 500 insertions, 260 deletions
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 6b066632e40c..c64ce9c14207 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -63,6 +63,3 @@ config PREEMPT_BKL
Say Y here if you are building a kernel for a desktop system.
Say N if you are unsure.
-config PREEMPT_NOTIFIERS
- bool
-
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 0ae703c157ba..938e60a61882 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -45,7 +45,6 @@
#include <linux/init.h>
#include <asm/types.h>
#include <asm/atomic.h>
-#include <asm/types.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/mm.h>
diff --git a/kernel/capability.c b/kernel/capability.c
index c8d3c7762034..4e350a36ed6a 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -17,9 +17,6 @@
unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
kernel_cap_t cap_bset = CAP_INIT_EFF_SET;
-EXPORT_SYMBOL(securebits);
-EXPORT_SYMBOL(cap_bset);
-
/*
* This lock protects task->cap_* for all tasks including current.
* Locking rule: acquire this prior to tasklist_lock.
@@ -244,7 +241,6 @@ int __capable(struct task_struct *t, int cap)
}
return 0;
}
-EXPORT_SYMBOL(__capable);
int capable(int cap)
{
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 0864f4097930..2eb2e50db0d6 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2506,41 +2506,20 @@ int cpuset_mem_spread_node(void)
EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
/**
- * cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors?
- * @p: pointer to task_struct of some other task.
- *
- * Description: Return true if the nearest mem_exclusive ancestor
- * cpusets of tasks @p and current overlap. Used by oom killer to
- * determine if task @p's memory usage might impact the memory
- * available to the current task.
- *
- * Call while holding callback_mutex.
+ * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
+ * @tsk1: pointer to task_struct of some task.
+ * @tsk2: pointer to task_struct of some other task.
+ *
+ * Description: Return true if @tsk1's mems_allowed intersects the
+ * mems_allowed of @tsk2. Used by the OOM killer to determine if
+ * one of the task's memory usage might impact the memory available
+ * to the other.
**/
-int cpuset_excl_nodes_overlap(const struct task_struct *p)
+int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
+ const struct task_struct *tsk2)
{
- const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */
- int overlap = 1; /* do cpusets overlap? */
-
- task_lock(current);
- if (current->flags & PF_EXITING) {
- task_unlock(current);
- goto done;
- }
- cs1 = nearest_exclusive_ancestor(current->cpuset);
- task_unlock(current);
-
- task_lock((struct task_struct *)p);
- if (p->flags & PF_EXITING) {
- task_unlock((struct task_struct *)p);
- goto done;
- }
- cs2 = nearest_exclusive_ancestor(p->cpuset);
- task_unlock((struct task_struct *)p);
-
- overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
-done:
- return overlap;
+ return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
}
/*
diff --git a/kernel/exit.c b/kernel/exit.c
index 7f7959de4a87..2c704c86edb3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -44,7 +44,6 @@
#include <linux/resource.h>
#include <linux/blkdev.h>
#include <linux/task_io_accounting_ops.h>
-#include <linux/freezer.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -93,10 +92,9 @@ static void __exit_signal(struct task_struct *tsk)
* If there is any task waiting for the group exit
* then notify it:
*/
- if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
+ if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
wake_up_process(sig->group_exit_task);
- sig->group_exit_task = NULL;
- }
+
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
/*
@@ -593,17 +591,6 @@ static void exit_mm(struct task_struct * tsk)
mmput(mm);
}
-static inline void
-choose_new_parent(struct task_struct *p, struct task_struct *reaper)
-{
- /*
- * Make sure we're not reparenting to ourselves and that
- * the parent is not a zombie.
- */
- BUG_ON(p == reaper || reaper->exit_state);
- p->real_parent = reaper;
-}
-
static void
reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
{
@@ -711,7 +698,7 @@ forget_original_parent(struct task_struct *father, struct list_head *to_release)
if (father == p->real_parent) {
/* reparent with a reaper, real father it's us */
- choose_new_parent(p, reaper);
+ p->real_parent = reaper;
reparent_thread(p, father, 0);
} else {
/* reparent ptraced task to its real parent */
@@ -732,7 +719,7 @@ forget_original_parent(struct task_struct *father, struct list_head *to_release)
}
list_for_each_safe(_p, _n, &father->ptrace_children) {
p = list_entry(_p, struct task_struct, ptrace_list);
- choose_new_parent(p, reaper);
+ p->real_parent = reaper;
reparent_thread(p, father, 1);
}
}
@@ -759,13 +746,11 @@ static void exit_notify(struct task_struct *tsk)
* Now we'll wake all the threads in the group just to make
* sure someone gets all the pending signals.
*/
- read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock);
for (t = next_thread(tsk); t != tsk; t = next_thread(t))
if (!signal_pending(t) && !(t->flags & PF_EXITING))
recalc_sigpending_and_wake(t);
spin_unlock_irq(&tsk->sighand->siglock);
- read_unlock(&tasklist_lock);
}
write_lock_irq(&tasklist_lock);
@@ -793,9 +778,8 @@ static void exit_notify(struct task_struct *tsk)
* and we were the only connection outside, so our pgrp
* is about to become orphaned.
*/
-
t = tsk->real_parent;
-
+
pgrp = task_pgrp(tsk);
if ((task_pgrp(t) != pgrp) &&
(task_session(t) == task_session(tsk)) &&
@@ -842,6 +826,11 @@ static void exit_notify(struct task_struct *tsk)
state = EXIT_DEAD;
tsk->exit_state = state;
+ if (thread_group_leader(tsk) &&
+ tsk->signal->notify_count < 0 &&
+ tsk->signal->group_exit_task)
+ wake_up_process(tsk->signal->group_exit_task);
+
write_unlock_irq(&tasklist_lock);
list_for_each_safe(_p, _n, &ptrace_dead) {
@@ -883,6 +872,14 @@ static void check_stack_usage(void)
static inline void check_stack_usage(void) {}
#endif
+static inline void exit_child_reaper(struct task_struct *tsk)
+{
+ if (likely(tsk->group_leader != child_reaper(tsk)))
+ return;
+
+ panic("Attempted to kill init!");
+}
+
fastcall NORET_TYPE void do_exit(long code)
{
struct task_struct *tsk = current;
@@ -896,13 +893,6 @@ fastcall NORET_TYPE void do_exit(long code)
panic("Aiee, killing interrupt handler!");
if (unlikely(!tsk->pid))
panic("Attempted to kill the idle task!");
- if (unlikely(tsk == child_reaper(tsk))) {
- if (tsk->nsproxy->pid_ns != &init_pid_ns)
- tsk->nsproxy->pid_ns->child_reaper = init_pid_ns.child_reaper;
- else
- panic("Attempted to kill init!");
- }
-
if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
current->ptrace_message = code;
@@ -932,13 +922,13 @@ fastcall NORET_TYPE void do_exit(long code)
schedule();
}
+ tsk->flags |= PF_EXITING;
/*
* tsk->flags are checked in the futex code to protect against
* an exiting task cleaning up the robust pi futexes.
*/
- spin_lock_irq(&tsk->pi_lock);
- tsk->flags |= PF_EXITING;
- spin_unlock_irq(&tsk->pi_lock);
+ smp_mb();
+ spin_unlock_wait(&tsk->pi_lock);
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
@@ -952,16 +942,19 @@ fastcall NORET_TYPE void do_exit(long code)
}
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
+ exit_child_reaper(tsk);
hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
}
acct_collect(code, group_dead);
+#ifdef CONFIG_FUTEX
if (unlikely(tsk->robust_list))
exit_robust_list(tsk);
-#if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)
+#ifdef CONFIG_COMPAT
if (unlikely(tsk->compat_robust_list))
compat_exit_robust_list(tsk);
#endif
+#endif
if (group_dead)
tty_audit_exit();
if (unlikely(tsk->audit_context))
@@ -996,6 +989,7 @@ fastcall NORET_TYPE void do_exit(long code)
mpol_free(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
+#ifdef CONFIG_FUTEX
/*
* This must happen late, after the PID is not
* hashed anymore:
@@ -1004,6 +998,7 @@ fastcall NORET_TYPE void do_exit(long code)
exit_pi_state_list(tsk);
if (unlikely(current->pi_state_cache))
kfree(current->pi_state_cache);
+#endif
/*
* Make sure we are holding no locks:
*/
@@ -1168,8 +1163,7 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
int __user *stat_addr, struct rusage __user *ru)
{
unsigned long state;
- int retval;
- int status;
+ int retval, status, traced;
if (unlikely(noreap)) {
pid_t pid = p->pid;
@@ -1203,15 +1197,11 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
BUG_ON(state != EXIT_DEAD);
return 0;
}
- if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) {
- /*
- * This can only happen in a race with a ptraced thread
- * dying on another processor.
- */
- return 0;
- }
- if (likely(p->real_parent == p->parent) && likely(p->signal)) {
+ /* traced means p->ptrace, but not vice versa */
+ traced = (p->real_parent != p->parent);
+
+ if (likely(!traced)) {
struct signal_struct *psig;
struct signal_struct *sig;
@@ -1298,35 +1288,30 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
retval = put_user(p->pid, &infop->si_pid);
if (!retval && infop)
retval = put_user(p->uid, &infop->si_uid);
- if (retval) {
- // TODO: is this safe?
- p->exit_state = EXIT_ZOMBIE;
- return retval;
- }
- retval = p->pid;
- if (p->real_parent != p->parent) {
+ if (!retval)
+ retval = p->pid;
+
+ if (traced) {
write_lock_irq(&tasklist_lock);
- /* Double-check with lock held. */
- if (p->real_parent != p->parent) {
- __ptrace_unlink(p);
- // TODO: is this safe?
- p->exit_state = EXIT_ZOMBIE;
- /*
- * If this is not a detached task, notify the parent.
- * If it's still not detached after that, don't release
- * it now.
- */
+ /* We dropped tasklist, ptracer could die and untrace */
+ ptrace_unlink(p);
+ /*
+ * If this is not a detached task, notify the parent.
+ * If it's still not detached after that, don't release
+ * it now.
+ */
+ if (p->exit_signal != -1) {
+ do_notify_parent(p, p->exit_signal);
if (p->exit_signal != -1) {
- do_notify_parent(p, p->exit_signal);
- if (p->exit_signal != -1)
- p = NULL;
+ p->exit_state = EXIT_ZOMBIE;
+ p = NULL;
}
}
write_unlock_irq(&tasklist_lock);
}
if (p != NULL)
release_task(p);
- BUG_ON(!retval);
+
return retval;
}
@@ -1345,7 +1330,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
if (!p->exit_code)
return 0;
if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
- p->signal && p->signal->group_stop_count > 0)
+ p->signal->group_stop_count > 0)
/*
* A group stop is in progress and this is the group leader.
* We won't report until all threads have stopped.
@@ -1459,9 +1444,6 @@ static int wait_task_continued(struct task_struct *p, int noreap,
pid_t pid;
uid_t uid;
- if (unlikely(!p->signal))
- return 0;
-
if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
return 0;
diff --git a/kernel/fork.c b/kernel/fork.c
index 3fc3c1383912..490495a39c7e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -107,6 +107,7 @@ static struct kmem_cache *mm_cachep;
void free_task(struct task_struct *tsk)
{
+ prop_local_destroy_single(&tsk->dirties);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
free_task_struct(tsk);
@@ -163,6 +164,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
+ int err;
prepare_to_copy(orig);
@@ -178,6 +180,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
*tsk = *orig;
tsk->stack = ti;
+
+ err = prop_local_init_single(&tsk->dirties);
+ if (err) {
+ free_thread_info(ti);
+ free_task_struct(tsk);
+ return NULL;
+ }
+
setup_thread_stack(tsk, orig);
#ifdef CONFIG_CC_STACKPROTECTOR
@@ -1069,7 +1079,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
do_posix_clock_monotonic_gettime(&p->start_time);
p->real_start_time = p->start_time;
monotonic_to_bootbased(&p->real_start_time);
+#ifdef CONFIG_SECURITY
p->security = NULL;
+#endif
p->io_context = NULL;
p->io_wait = NULL;
p->audit_context = NULL;
@@ -1146,13 +1158,14 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
+#ifdef CONFIG_FUTEX
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
-
+#endif
/*
* sigaltstack should be cleared when sharing the same VM
*/
@@ -1435,8 +1448,7 @@ long do_fork(unsigned long clone_flags,
#define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
-static void sighand_ctor(void *data, struct kmem_cache *cachep,
- unsigned long flags)
+static void sighand_ctor(struct kmem_cache *cachep, void *data)
{
struct sighand_struct *sighand = data;
diff --git a/kernel/futex.c b/kernel/futex.c
index fcc94e7b4086..d725676d84f3 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -52,6 +52,7 @@
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/module.h>
+#include <linux/magic.h>
#include <asm/futex.h>
#include "rtmutex_common.h"
@@ -2080,7 +2081,7 @@ static int futexfs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA, mnt);
+ return get_sb_pseudo(fs_type, "futex", NULL, FUTEXFS_SUPER_MAGIC, mnt);
}
static struct file_system_type futex_fs_type = {
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index f1a73f0b54e7..9b5dff6b3f6a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -503,7 +503,6 @@ out_unlock:
spin_unlock(&desc->lock);
}
-#ifdef CONFIG_SMP
/**
* handle_percpu_IRQ - Per CPU local irq handler
* @irq: the interrupt number
@@ -529,8 +528,6 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
desc->chip->eoi(irq);
}
-#endif /* CONFIG_SMP */
-
void
__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7230d914eaa2..80eab7a04205 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -405,7 +405,6 @@ void free_irq(unsigned int irq, void *dev_id)
struct irq_desc *desc;
struct irqaction **p;
unsigned long flags;
- irqreturn_t (*handler)(int, void *) = NULL;
WARN_ON(in_interrupt());
if (irq >= NR_IRQS)
@@ -445,8 +444,21 @@ void free_irq(unsigned int irq, void *dev_id)
/* Make sure it's not being used on another CPU */
synchronize_irq(irq);
- if (action->flags & IRQF_SHARED)
- handler = action->handler;
+#ifdef CONFIG_DEBUG_SHIRQ
+ /*
+ * It's a shared IRQ -- the driver ought to be
+ * prepared for it to happen even now it's
+ * being freed, so let's make sure.... We do
+ * this after actually deregistering it, to
+ * make sure that a 'real' IRQ doesn't run in
+ * parallel with our fake
+ */
+ if (action->flags & IRQF_SHARED) {
+ local_irq_save(flags);
+ action->handler(irq, dev_id);
+ local_irq_restore(flags);
+ }
+#endif
kfree(action);
return;
}
@@ -454,19 +466,6 @@ void free_irq(unsigned int irq, void *dev_id)
spin_unlock_irqrestore(&desc->lock, flags);
return;
}
-#ifdef CONFIG_DEBUG_SHIRQ
- if (handler) {
- /*
- * It's a shared IRQ -- the driver ought to be prepared for it
- * to happen even now it's being freed, so let's make sure....
- * We do this after actually deregistering it, to make sure that
- * a 'real' IRQ doesn't run in parallel with our fake
- */
- local_irq_save(flags);
- handler(irq, dev_id);
- local_irq_restore(flags);
- }
-#endif
}
EXPORT_SYMBOL(free_irq);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 25db14b89e82..7885269b0da2 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -17,21 +17,30 @@
#include <linux/highmem.h>
#include <linux/syscalls.h>
#include <linux/reboot.h>
-#include <linux/syscalls.h>
#include <linux/ioport.h>
#include <linux/hardirq.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
+#include <linux/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/numa.h>
#include <asm/page.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/semaphore.h>
+#include <asm/sections.h>
/* Per cpu memory for storing cpu states in case of system crash. */
note_buf_t* crash_notes;
+/* vmcoreinfo stuff */
+unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
+u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
+size_t vmcoreinfo_size;
+size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
+
/* Location of the reserved area for the crash kernel */
struct resource crashk_res = {
.name = "Crash kernel",
@@ -1061,6 +1070,7 @@ void crash_kexec(struct pt_regs *regs)
if (kexec_crash_image) {
struct pt_regs fixed_regs;
crash_setup_regs(&fixed_regs, regs);
+ crash_save_vmcoreinfo();
machine_crash_shutdown(&fixed_regs);
machine_kexec(kexec_crash_image);
}
@@ -1135,3 +1145,104 @@ static int __init crash_notes_memory_init(void)
return 0;
}
module_init(crash_notes_memory_init)
+
+void crash_save_vmcoreinfo(void)
+{
+ u32 *buf;
+
+ if (!vmcoreinfo_size)
+ return;
+
+ vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
+
+ buf = (u32 *)vmcoreinfo_note;
+
+ buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
+ vmcoreinfo_size);
+
+ final_note(buf);
+}
+
+void vmcoreinfo_append_str(const char *fmt, ...)
+{
+ va_list args;
+ char buf[0x50];
+ int r;
+
+ va_start(args, fmt);
+ r = vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ if (r + vmcoreinfo_size > vmcoreinfo_max_size)
+ r = vmcoreinfo_max_size - vmcoreinfo_size;
+
+ memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
+
+ vmcoreinfo_size += r;
+}
+
+/*
+ * provide an empty default implementation here -- architecture
+ * code may override this
+ */
+void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
+{}
+
+unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
+{
+ return __pa((unsigned long)(char *)&vmcoreinfo_note);
+}
+
+static int __init crash_save_vmcoreinfo_init(void)
+{
+ vmcoreinfo_append_str("OSRELEASE=%s\n", init_uts_ns.name.release);
+ vmcoreinfo_append_str("PAGESIZE=%ld\n", PAGE_SIZE);
+
+ VMCOREINFO_SYMBOL(init_uts_ns);
+ VMCOREINFO_SYMBOL(node_online_map);
+ VMCOREINFO_SYMBOL(swapper_pg_dir);
+ VMCOREINFO_SYMBOL(_stext);
+
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+ VMCOREINFO_SYMBOL(mem_map);
+ VMCOREINFO_SYMBOL(contig_page_data);
+#endif
+#ifdef CONFIG_SPARSEMEM
+ VMCOREINFO_SYMBOL(mem_section);
+ VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
+ VMCOREINFO_SIZE(mem_section);
+ VMCOREINFO_OFFSET(mem_section, section_mem_map);
+#endif
+ VMCOREINFO_SIZE(page);
+ VMCOREINFO_SIZE(pglist_data);
+ VMCOREINFO_SIZE(zone);
+ VMCOREINFO_SIZE(free_area);
+ VMCOREINFO_SIZE(list_head);
+ VMCOREINFO_TYPEDEF_SIZE(nodemask_t);
+ VMCOREINFO_OFFSET(page, flags);
+ VMCOREINFO_OFFSET(page, _count);
+ VMCOREINFO_OFFSET(page, mapping);
+ VMCOREINFO_OFFSET(page, lru);
+ VMCOREINFO_OFFSET(pglist_data, node_zones);
+ VMCOREINFO_OFFSET(pglist_data, nr_zones);
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
+ VMCOREINFO_OFFSET(pglist_data, node_mem_map);
+#endif
+ VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
+ VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
+ VMCOREINFO_OFFSET(pglist_data, node_id);
+ VMCOREINFO_OFFSET(zone, free_area);
+ VMCOREINFO_OFFSET(zone, vm_stat);
+ VMCOREINFO_OFFSET(zone, spanned_pages);
+ VMCOREINFO_OFFSET(free_area, free_list);
+ VMCOREINFO_OFFSET(list_head, next);
+ VMCOREINFO_OFFSET(list_head, prev);
+ VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
+ VMCOREINFO_NUMBER(NR_FREE_PAGES);
+
+ arch_crash_save_vmcoreinfo();
+
+ return 0;
+}
+
+module_init(crash_save_vmcoreinfo_init)
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 6046939d0804..65daa5373ca6 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -61,6 +61,15 @@ static ssize_t kexec_crash_loaded_show(struct kset *kset, char *page)
return sprintf(page, "%d\n", !!kexec_crash_image);
}
KERNEL_ATTR_RO(kexec_crash_loaded);
+
+static ssize_t vmcoreinfo_show(struct kset *kset, char *page)
+{
+ return sprintf(page, "%lx %x\n",
+ paddr_vmcoreinfo_note(),
+ (unsigned int)vmcoreinfo_max_size);
+}
+KERNEL_ATTR_RO(vmcoreinfo);
+
#endif /* CONFIG_KEXEC */
/*
@@ -96,6 +105,7 @@ static struct attribute * kernel_attrs[] = {
#ifdef CONFIG_KEXEC
&kexec_loaded_attr.attr,
&kexec_crash_loaded_attr.attr,
+ &vmcoreinfo_attr.attr,
#endif
NULL
};
diff --git a/kernel/module.c b/kernel/module.c
index db0ead0363e2..a389b423c279 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -20,6 +20,7 @@
#include <linux/moduleloader.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
+#include <linux/sysfs.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -692,8 +693,7 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
}
/* If it has an init func, it must have an exit func to unload */
- if ((mod->init != NULL && mod->exit == NULL)
- || mod->unsafe) {
+ if (mod->init && !mod->exit) {
forced = try_force_unload(flags);
if (!forced) {
/* This module can't be removed */
@@ -741,11 +741,6 @@ static void print_unload_info(struct seq_file *m, struct module *mod)
seq_printf(m, "%s,", use->module_which_uses->name);
}
- if (mod->unsafe) {
- printed_something = 1;
- seq_printf(m, "[unsafe],");
- }
-
if (mod->init != NULL && mod->exit == NULL) {
printed_something = 1;
seq_printf(m, "[permanent],");
@@ -1053,6 +1048,100 @@ static void remove_sect_attrs(struct module *mod)
}
}
+/*
+ * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
+ */
+
+struct module_notes_attrs {
+ struct kobject *dir;
+ unsigned int notes;
+ struct bin_attribute attrs[0];
+};
+
+static ssize_t module_notes_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ /*
+ * The caller checked the pos and count against our size.
+ */
+ memcpy(buf, bin_attr->private + pos, count);
+ return count;
+}
+
+static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
+ unsigned int i)
+{
+ if (notes_attrs->dir) {
+ while (i-- > 0)
+ sysfs_remove_bin_file(notes_attrs->dir,
+ &notes_attrs->attrs[i]);
+ kobject_del(notes_attrs->dir);
+ }
+ kfree(notes_attrs);
+}
+
+static void add_notes_attrs(struct module *mod, unsigned int nsect,
+ char *secstrings, Elf_Shdr *sechdrs)
+{
+ unsigned int notes, loaded, i;
+ struct module_notes_attrs *notes_attrs;
+ struct bin_attribute *nattr;
+
+ /* Count notes sections and allocate structures. */
+ notes = 0;
+ for (i = 0; i < nsect; i++)
+ if ((sechdrs[i].sh_flags & SHF_ALLOC) &&
+ (sechdrs[i].sh_type == SHT_NOTE))
+ ++notes;
+
+ if (notes == 0)
+ return;
+
+ notes_attrs = kzalloc(sizeof(*notes_attrs)
+ + notes * sizeof(notes_attrs->attrs[0]),
+ GFP_KERNEL);
+ if (notes_attrs == NULL)
+ return;
+
+ notes_attrs->notes = notes;
+ nattr = &notes_attrs->attrs[0];
+ for (loaded = i = 0; i < nsect; ++i) {
+ if (!(sechdrs[i].sh_flags & SHF_ALLOC))
+ continue;
+ if (sechdrs[i].sh_type == SHT_NOTE) {
+ nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
+ nattr->attr.mode = S_IRUGO;
+ nattr->size = sechdrs[i].sh_size;
+ nattr->private = (void *) sechdrs[i].sh_addr;
+ nattr->read = module_notes_read;
+ ++nattr;
+ }
+ ++loaded;
+ }
+
+ notes_attrs->dir = kobject_add_dir(&mod->mkobj.kobj, "notes");
+ if (!notes_attrs->dir)
+ goto out;
+
+ for (i = 0; i < notes; ++i)
+ if (sysfs_create_bin_file(notes_attrs->dir,
+ &notes_attrs->attrs[i]))
+ goto out;
+
+ mod->notes_attrs = notes_attrs;
+ return;
+
+ out:
+ free_notes_attrs(notes_attrs, i);
+}
+
+static void remove_notes_attrs(struct module *mod)
+{
+ if (mod->notes_attrs)
+ free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
+}
+
#else
static inline void add_sect_attrs(struct module *mod, unsigned int nsect,
@@ -1063,6 +1152,15 @@ static inline void add_sect_attrs(struct module *mod, unsigned int nsect,
static inline void remove_sect_attrs(struct module *mod)
{
}
+
+static inline void add_notes_attrs(struct module *mod, unsigned int nsect,
+ char *sectstrings, Elf_Shdr *sechdrs)
+{
+}
+
+static inline void remove_notes_attrs(struct module *mod)
+{
+}
#endif /* CONFIG_KALLSYMS */
#ifdef CONFIG_SYSFS
@@ -1197,6 +1295,7 @@ static void free_module(struct module *mod)
{
/* Delete from various lists */
stop_machine_run(__unlink_module, mod, NR_CPUS);
+ remove_notes_attrs(mod);
remove_sect_attrs(mod);
mod_kobject_remove(mod);
@@ -1782,7 +1881,8 @@ static struct module *load_module(void __user *umod,
module_unload_init(mod);
/* Initialize kobject, so we can reference it. */
- if (mod_sysfs_init(mod) != 0)
+ err = mod_sysfs_init(mod);
+ if (err)
goto cleanup;
/* Set up license info based on the info section */
@@ -1924,6 +2024,7 @@ static struct module *load_module(void __user *umod,
if (err < 0)
goto arch_cleanup;
add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
+ add_notes_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
/* Size of section 0 is 0, so this works well if no unwind info. */
mod->unwind_info = unwind_add_table(mod,
@@ -2011,15 +2112,10 @@ sys_init_module(void __user *umod,
buggy refcounters. */
mod->state = MODULE_STATE_GOING;
synchronize_sched();
- if (mod->unsafe)
- printk(KERN_ERR "%s: module is now stuck!\n",
- mod->name);
- else {
- module_put(mod);
- mutex_lock(&module_mutex);
- free_module(mod);
- mutex_unlock(&module_mutex);
- }
+ module_put(mod);
+ mutex_lock(&module_mutex);
+ free_module(mod);
+ mutex_unlock(&module_mutex);
return ret;
}
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index f1decd21a534..049e7c0ac566 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -203,8 +203,7 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags,
static int __init nsproxy_cache_init(void)
{
- nsproxy_cachep = kmem_cache_create("nsproxy", sizeof(struct nsproxy),
- 0, SLAB_PANIC, NULL);
+ nsproxy_cachep = KMEM_CACHE(nsproxy, SLAB_PANIC);
return 0;
}
diff --git a/kernel/params.c b/kernel/params.c
index 4e57732fcfb4..1d6aca288cdc 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -252,8 +252,9 @@ int param_get_bool(char *buffer, struct kernel_param *kp)
int param_set_invbool(const char *val, struct kernel_param *kp)
{
int boolval, ret;
- struct kernel_param dummy = { .arg = &boolval };
+ struct kernel_param dummy;
+ dummy.arg = &boolval;
ret = param_set_bool(val, &dummy);
if (ret == 0)
*(int *)kp->arg = !boolval;
@@ -262,11 +263,7 @@ int param_set_invbool(const char *val, struct kernel_param *kp)
int param_get_invbool(char *buffer, struct kernel_param *kp)
{
- int val;
- struct kernel_param dummy = { .arg = &val };
-
- val = !*(int *)kp->arg;
- return param_get_bool(buffer, &dummy);
+ return sprintf(buffer, "%c", (*(int *)kp->arg) ? 'N' : 'Y');
}
/* We break the rule and mangle the string. */
@@ -325,7 +322,7 @@ static int param_array(const char *name,
int param_array_set(const char *val, struct kernel_param *kp)
{
- struct kparam_array *arr = kp->arg;
+ const struct kparam_array *arr = kp->arr;
unsigned int temp_num;
return param_array(kp->name, val, 1, arr->max, arr->elem,
@@ -335,7 +332,7 @@ int param_array_set(const char *val, struct kernel_param *kp)
int param_array_get(char *buffer, struct kernel_param *kp)
{
int i, off, ret;
- struct kparam_array *arr = kp->arg;
+ const struct kparam_array *arr = kp->arr;
struct kernel_param p;
p = *kp;
@@ -354,7 +351,7 @@ int param_array_get(char *buffer, struct kernel_param *kp)
int param_set_copystring(const char *val, struct kernel_param *kp)
{
- struct kparam_string *kps = kp->arg;
+ const struct kparam_string *kps = kp->str;
if (!val) {
printk(KERN_ERR "%s: missing param set value\n", kp->name);
@@ -371,7 +368,7 @@ int param_set_copystring(const char *val, struct kernel_param *kp)
int param_get_string(char *buffer, struct kernel_param *kp)
{
- struct kparam_string *kps = kp->arg;
+ const struct kparam_string *kps = kp->str;
return strlcpy(buffer, kps->string, kps->maxlen);
}
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 57efe0400bc2..d71ed09fe1dd 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -241,7 +241,8 @@ static __init int init_posix_timers(void)
register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
posix_timers_cache = kmem_cache_create("posix_timers_cache",
- sizeof (struct k_itimer), 0, 0, NULL);
+ sizeof (struct k_itimer), 0, SLAB_PANIC,
+ NULL);
idr_init(&posix_timers_id);
return 0;
}
diff --git a/kernel/printk.c b/kernel/printk.c
index b2b5c3a22a36..52493474f0ab 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -220,6 +220,58 @@ static inline void boot_delay_msec(void)
#endif
/*
+ * Return the number of unread characters in the log buffer.
+ */
+int log_buf_get_len(void)
+{
+ return logged_chars;
+}
+
+/*
+ * Copy a range of characters from the log buffer.
+ */
+int log_buf_copy(char *dest, int idx, int len)
+{
+ int ret, max;
+ bool took_lock = false;
+
+ if (!oops_in_progress) {
+ spin_lock_irq(&logbuf_lock);
+ took_lock = true;
+ }
+
+ max = log_buf_get_len();
+ if (idx < 0 || idx >= max) {
+ ret = -1;
+ } else {
+ if (len > max)
+ len = max;
+ ret = len;
+ idx += (log_end - max);
+ while (len-- > 0)
+ dest[len] = LOG_BUF(idx + len);
+ }
+
+ if (took_lock)
+ spin_unlock_irq(&logbuf_lock);
+
+ return ret;
+}
+
+/*
+ * Extract a single character from the log buffer.
+ */
+int log_buf_read(int idx)
+{
+ char ret;
+
+ if (log_buf_copy(&ret, idx, 1) == 1)
+ return ret;
+ else
+ return -1;
+}
+
+/*
* Commands to do_syslog:
*
* 0 -- Close the log. Currently a NOP.
diff --git a/kernel/profile.c b/kernel/profile.c
index 6f69bf792d96..631b75c25d7e 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -37,7 +37,7 @@ struct profile_hit {
#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
/* Oprofile timer tick hook */
-int (*timer_hook)(struct pt_regs *) __read_mostly;
+static int (*timer_hook)(struct pt_regs *) __read_mostly;
static atomic_t *prof_buffer;
static unsigned long prof_len, prof_shift;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 130214f3d229..a66d4d1615f7 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -45,7 +45,6 @@
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
-#include <linux/rcupdate.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index ddff33247785..c3e165c2318f 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -35,14 +35,12 @@
#include <linux/sched.h>
#include <asm/atomic.h>
#include <linux/bitops.h>
-#include <linux/module.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/freezer.h>
#include <linux/cpu.h>
-#include <linux/random.h>
#include <linux/delay.h>
#include <linux/byteorder/swabb.h>
#include <linux/stat.h>
@@ -166,16 +164,14 @@ struct rcu_random_state {
/*
* Crude but fast random-number generator. Uses a linear congruential
- * generator, with occasional help from get_random_bytes().
+ * generator, with occasional help from cpu_clock().
*/
static unsigned long
rcu_random(struct rcu_random_state *rrsp)
{
- long refresh;
-
if (--rrsp->rrs_count < 0) {
- get_random_bytes(&refresh, sizeof(refresh));
- rrsp->rrs_state += refresh;
+ rrsp->rrs_state +=
+ (unsigned long)cpu_clock(raw_smp_processor_id());
rrsp->rrs_count = RCU_RANDOM_REFRESH;
}
rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 5aedbee014df..6b0703db152d 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -82,12 +82,7 @@ do { \
* into the tracing code when doing error printk or
* executing a BUG():
*/
-int rt_trace_on = 1;
-
-void deadlock_trace_off(void)
-{
- rt_trace_on = 0;
-}
+static int rt_trace_on = 1;
static void printk_task(struct task_struct *p)
{
diff --git a/kernel/sched.c b/kernel/sched.c
index 0ec9521a8e70..92721d1534b8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5075,6 +5075,17 @@ wait_to_die:
}
#ifdef CONFIG_HOTPLUG_CPU
+
+static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
+{
+ int ret;
+
+ local_irq_disable();
+ ret = __migrate_task(p, src_cpu, dest_cpu);
+ local_irq_enable();
+ return ret;
+}
+
/*
* Figure out where task on dead CPU should go, use force if neccessary.
* NOTE: interrupts should be disabled by the caller
@@ -5113,7 +5124,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
"longer affine to cpu%d\n",
p->pid, p->comm, dead_cpu);
}
- } while (!__migrate_task(p, dead_cpu, dest_cpu));
+ } while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
}
/*
@@ -5141,7 +5152,7 @@ static void migrate_live_tasks(int src_cpu)
{
struct task_struct *p, *t;
- write_lock_irq(&tasklist_lock);
+ read_lock(&tasklist_lock);
do_each_thread(t, p) {
if (p == current)
@@ -5151,7 +5162,7 @@ static void migrate_live_tasks(int src_cpu)
move_task_off_dead_cpu(src_cpu, p);
} while_each_thread(t, p);
- write_unlock_irq(&tasklist_lock);
+ read_unlock(&tasklist_lock);
}
/*
@@ -5229,11 +5240,10 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
* Drop lock around migration; if someone else moves it,
* that's OK. No task can be added to this CPU, so iteration is
* fine.
- * NOTE: interrupts should be left disabled --dev@
*/
- spin_unlock(&rq->lock);
+ spin_unlock_irq(&rq->lock);
move_task_off_dead_cpu(dead_cpu, p);
- spin_lock(&rq->lock);
+ spin_lock_irq(&rq->lock);
put_task_struct(p);
}
@@ -5471,14 +5481,14 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
kthread_stop(rq->migration_thread);
rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */
- rq = task_rq_lock(rq->idle, &flags);
+ spin_lock_irq(&rq->lock);
update_rq_clock(rq);
deactivate_task(rq, rq->idle, 0);
rq->idle->static_prio = MAX_PRIO;
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq->idle->sched_class = &idle_sched_class;
migrate_dead_tasks(cpu);
- task_rq_unlock(rq, &flags);
+ spin_unlock_irq(&rq->lock);
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
diff --git a/kernel/signal.c b/kernel/signal.c
index 792952381092..2124ffadcfde 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -909,8 +909,7 @@ __group_complete_signal(int sig, struct task_struct *p)
do {
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
- t = next_thread(t);
- } while (t != p);
+ } while_each_thread(p, t);
return;
}
@@ -928,13 +927,11 @@ __group_complete_signal(int sig, struct task_struct *p)
rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
p->signal->group_stop_count = 0;
p->signal->group_exit_task = t;
- t = p;
+ p = t;
do {
p->signal->group_stop_count++;
- signal_wake_up(t, 0);
- t = next_thread(t);
- } while (t != p);
- wake_up_process(p->signal->group_exit_task);
+ signal_wake_up(t, t == p);
+ } while_each_thread(p, t);
return;
}
@@ -985,9 +982,6 @@ void zap_other_threads(struct task_struct *p)
p->signal->flags = SIGNAL_GROUP_EXIT;
p->signal->group_stop_count = 0;
- if (thread_group_empty(p))
- return;
-
for (t = next_thread(p); t != p; t = next_thread(t)) {
/*
* Don't bother with already dead threads
@@ -2300,15 +2294,6 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
k = &current->sighand->action[sig-1];
spin_lock_irq(&current->sighand->siglock);
- if (signal_pending(current)) {
- /*
- * If there might be a fatal signal pending on multiple
- * threads, make sure we take it before changing the action.
- */
- spin_unlock_irq(&current->sighand->siglock);
- return -ERESTARTNOINTR;
- }
-
if (oact)
*oact = *k;
@@ -2335,7 +2320,6 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
rm_from_queue_full(&mask, &t->signal->shared_pending);
do {
rm_from_queue_full(&mask, &t->pending);
- recalc_sigpending_and_wake(t);
t = next_thread(t);
} while (t != current);
}
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 708d4882c0c3..edeeef3a6a32 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -15,13 +15,16 @@
#include <linux/notifier.h>
#include <linux/module.h>
+#include <asm/irq_regs.h>
+
static DEFINE_SPINLOCK(print_lock);
static DEFINE_PER_CPU(unsigned long, touch_timestamp);
static DEFINE_PER_CPU(unsigned long, print_timestamp);
static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
-static int did_panic = 0;
+static int did_panic;
+int softlockup_thresh = 10;
static int
softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
@@ -40,14 +43,16 @@ static struct notifier_block panic_block = {
* resolution, and we don't need to waste time with a big divide when
* 2^30ns == 1.074s.
*/
-static unsigned long get_timestamp(void)
+static unsigned long get_timestamp(int this_cpu)
{
- return sched_clock() >> 30; /* 2^30 ~= 10^9 */
+ return cpu_clock(this_cpu) >> 30; /* 2^30 ~= 10^9 */
}
void touch_softlockup_watchdog(void)
{
- __raw_get_cpu_var(touch_timestamp) = get_timestamp();
+ int this_cpu = raw_smp_processor_id();
+
+ __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
@@ -70,6 +75,7 @@ void softlockup_tick(void)
int this_cpu = smp_processor_id();
unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
unsigned long print_timestamp;
+ struct pt_regs *regs = get_irq_regs();
unsigned long now;
if (touch_timestamp == 0) {
@@ -80,10 +86,11 @@ void softlockup_tick(void)
print_timestamp = per_cpu(print_timestamp, this_cpu);
/* report at most once a second */
- if (print_timestamp < (touch_timestamp + 1) ||
- did_panic ||
- !per_cpu(watchdog_task, this_cpu))
+ if ((print_timestamp >= touch_timestamp &&
+ print_timestamp < (touch_timestamp + 1)) ||
+ did_panic || !per_cpu(watchdog_task, this_cpu)) {
return;
+ }
/* do not print during early bootup: */
if (unlikely(system_state != SYSTEM_RUNNING)) {
@@ -91,28 +98,33 @@ void softlockup_tick(void)
return;
}
- now = get_timestamp();
+ now = get_timestamp(this_cpu);
/* Wake up the high-prio watchdog task every second: */
if (now > (touch_timestamp + 1))
wake_up_process(per_cpu(watchdog_task, this_cpu));
/* Warn about unreasonable 10+ seconds delays: */
- if (now > (touch_timestamp + 10)) {
- per_cpu(print_timestamp, this_cpu) = touch_timestamp;
+ if (now <= (touch_timestamp + softlockup_thresh))
+ return;
- spin_lock(&print_lock);
- printk(KERN_ERR "BUG: soft lockup detected on CPU#%d!\n",
- this_cpu);
+ per_cpu(print_timestamp, this_cpu) = touch_timestamp;
+
+ spin_lock(&print_lock);
+ printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
+ this_cpu, now - touch_timestamp,
+ current->comm, current->pid);
+ if (regs)
+ show_regs(regs);
+ else
dump_stack();
- spin_unlock(&print_lock);
- }
+ spin_unlock(&print_lock);
}
/*
* The watchdog thread - runs every second and touches the timestamp.
*/
-static int watchdog(void * __bind_cpu)
+static int watchdog(void *__bind_cpu)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
@@ -150,13 +162,13 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
BUG_ON(per_cpu(watchdog_task, hotcpu));
p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
if (IS_ERR(p)) {
- printk("watchdog for %i failed\n", hotcpu);
+ printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
return NOTIFY_BAD;
}
- per_cpu(touch_timestamp, hotcpu) = 0;
- per_cpu(watchdog_task, hotcpu) = p;
+ per_cpu(touch_timestamp, hotcpu) = 0;
+ per_cpu(watchdog_task, hotcpu) = p;
kthread_bind(p, hotcpu);
- break;
+ break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
wake_up_process(per_cpu(watchdog_task, hotcpu));
@@ -176,7 +188,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
kthread_stop(p);
break;
#endif /* CONFIG_HOTPLUG_CPU */
- }
+ }
return NOTIFY_OK;
}
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index b0ec498a18d9..52c7a151e298 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -4,6 +4,10 @@
#include <asm/unistd.h>
+/* we can't #include <linux/syscalls.h> here,
+ but tell gcc to not warn with -Wmissing-prototypes */
+asmlinkage long sys_ni_syscall(void);
+
/*
* Non-implemented system calls get redirected here.
*/
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 96efbb859997..dde3d53e8adc 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -63,6 +63,7 @@ extern int print_fatal_signals;
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern int sysctl_panic_on_oom;
+extern int sysctl_oom_kill_allocating_task;
extern int max_threads;
extern int core_uses_pid;
extern int suid_dumpable;
@@ -79,6 +80,19 @@ extern int maps_protect;
extern int sysctl_stat_interval;
extern int audit_argv_kb;
+/* Constants used for minimum and maximum */
+#ifdef CONFIG_DETECT_SOFTLOCKUP
+static int one = 1;
+static int sixty = 60;
+#endif
+
+#ifdef CONFIG_MMU
+static int two = 2;
+#endif
+
+static int zero;
+static int one_hundred = 100;
+
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
static int maxolduid = 65535;
static int minolduid;
@@ -710,6 +724,19 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
+#ifdef CONFIG_DETECT_SOFTLOCKUP
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "softlockup_thresh",
+ .data = &softlockup_thresh,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &one,
+ .extra2 = &sixty,
+ },
+#endif
#ifdef CONFIG_COMPAT
{
.ctl_name = KERN_COMPAT_LOG,
@@ -756,13 +783,6 @@ static ctl_table kern_table[] = {
{ .ctl_name = 0 }
};
-/* Constants for minimum and maximum testing in vm_table.
- We use these as one-element integer vectors. */
-static int zero;
-static int two = 2;
-static int one_hundred = 100;
-
-
static ctl_table vm_table[] = {
{
.ctl_name = VM_OVERCOMMIT_MEMORY,
@@ -781,6 +801,14 @@ static ctl_table vm_table[] = {
.proc_handler = &proc_dointvec,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "oom_kill_allocating_task",
+ .data = &sysctl_oom_kill_allocating_task,
+ .maxlen = sizeof(sysctl_oom_kill_allocating_task),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
.ctl_name = VM_OVERCOMMIT_RATIO,
.procname = "overcommit_ratio",
.data = &sysctl_overcommit_ratio,
@@ -813,7 +841,7 @@ static ctl_table vm_table[] = {
.data = &vm_dirty_ratio,
.maxlen = sizeof(vm_dirty_ratio),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = &dirty_ratio_handler,
.strategy = &sysctl_intvec,
.extra1 = &zero,
.extra2 = &one_hundred,
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 059431ed67db..7d4d7f9c1bb2 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -20,7 +20,6 @@
#include <linux/taskstats_kern.h>
#include <linux/tsacct_kern.h>
#include <linux/delayacct.h>
-#include <linux/tsacct_kern.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
#include <net/genetlink.h>
diff --git a/kernel/time.c b/kernel/time.c
index 1afcc78dc3b1..2d5b6a682138 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -34,7 +34,6 @@
#include <linux/syscalls.h>
#include <linux/security.h>
#include <linux/fs.h>
-#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index fc3fc79b3d59..fab9dd8bbd6b 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -274,21 +274,12 @@ out:
*/
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
- int cpu = get_cpu();
-
- if (!cpu_isset(*oncpu, cpu_online_map)) {
+ if (!cpu_isset(*oncpu, cpu_online_map))
printk(KERN_ERR "tick-braodcast: ignoring broadcast for "
"offline CPU #%d\n", *oncpu);
- } else {
-
- if (cpu == *oncpu)
- tick_do_broadcast_on_off(&reason);
- else
- smp_call_function_single(*oncpu,
- tick_do_broadcast_on_off,
- &reason, 1, 1);
- }
- put_cpu();
+ else
+ smp_call_function_single(*oncpu, tick_do_broadcast_on_off,
+ &reason, 1, 1);
}
/*
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 8c3fef1db09c..ce89ffb474d0 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -570,7 +570,7 @@ void tick_setup_sched_timer(void)
/* Get the next period (per cpu) */
ts->sched_timer.expires = tick_init_jiffy_update();
offset = ktime_to_ns(tick_period) >> 1;
- do_div(offset, NR_CPUS);
+ do_div(offset, num_possible_cpus());
offset *= smp_processor_id();
ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 7e8983aecf83..e5e466b27598 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -24,9 +24,7 @@
* This read-write spinlock protects us from races in SMP while
* playing with xtime and avenrun.
*/
-__attribute__((weak)) __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
-
-EXPORT_SYMBOL(xtime_lock);
+__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
/*
@@ -47,7 +45,6 @@ EXPORT_SYMBOL(xtime_lock);
struct timespec xtime __attribute__ ((aligned (16)));
struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
static unsigned long total_sleep_time; /* seconds */
-EXPORT_SYMBOL(xtime);
static struct timespec xtime_cache __attribute__ ((aligned (16)));
static inline void update_xtime_cache(u64 nsec)
diff --git a/kernel/user.c b/kernel/user.c
index 7e8215d87b40..e91331c457e2 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -44,7 +44,6 @@ struct user_struct root_user = {
.processes = ATOMIC_INIT(1),
.files = ATOMIC_INIT(0),
.sigpending = ATOMIC_INIT(0),
- .mq_bytes = 0,
.locked_shm = 0,
#ifdef CONFIG_KEYS
.uid_keyring = &root_user_keyring,
@@ -58,19 +57,17 @@ struct user_struct root_user = {
/*
* These routines must be called with the uidhash spinlock held!
*/
-static inline void uid_hash_insert(struct user_struct *up,
- struct hlist_head *hashent)
+static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
{
hlist_add_head(&up->uidhash_node, hashent);
}
-static inline void uid_hash_remove(struct user_struct *up)
+static void uid_hash_remove(struct user_struct *up)
{
hlist_del_init(&up->uidhash_node);
}
-static inline struct user_struct *uid_hash_find(uid_t uid,
- struct hlist_head *hashent)
+static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
{
struct user_struct *user;
struct hlist_node *h;
@@ -350,8 +347,9 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
atomic_set(&new->inotify_watches, 0);
atomic_set(&new->inotify_devs, 0);
#endif
-
+#ifdef CONFIG_POSIX_MQUEUE
new->mq_bytes = 0;
+#endif
new->locked_shm = 0;
if (alloc_uid_keyring(new, current) < 0) {