summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/async.c321
-rw-r--r--kernel/capability.c2
-rw-r--r--kernel/cgroup.c33
-rw-r--r--kernel/compat.c5
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/cpuset.c34
-rw-r--r--kernel/dma-coherent.c42
-rw-r--r--kernel/exit.c21
-rw-r--r--kernel/fork.c17
-rw-r--r--kernel/futex.c72
-rw-r--r--kernel/hrtimer.c142
-rw-r--r--kernel/irq/autoprobe.c5
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/kprobes.c281
-rw-r--r--kernel/ksysfs.c4
-rw-r--r--kernel/module.c35
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/power/main.c6
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/profile.c1
-rw-r--r--kernel/rcupdate.c11
-rw-r--r--kernel/rcupreempt.c11
-rw-r--r--kernel/rcutorture.c18
-rw-r--r--kernel/rcutree.c13
-rw-r--r--kernel/resource.c61
-rw-r--r--kernel/sched.c23
-rw-r--r--kernel/sched_clock.c5
-rw-r--r--kernel/sched_cpupri.c2
-rw-r--r--kernel/sched_fair.c30
-rw-r--r--kernel/signal.c3
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/sysctl.c27
-rw-r--r--kernel/test_kprobes.c210
-rw-r--r--kernel/time.c4
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/timekeeping.c7
-rw-r--r--kernel/tsacct.c4
39 files changed, 1085 insertions, 390 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index e1c5bf3365c0..2921d90ce32f 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -9,7 +9,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
- notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o
+ notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
+ async.o
ifdef CONFIG_FUNCTION_TRACER
# Do not trace debug files and internal ftrace files
diff --git a/kernel/async.c b/kernel/async.c
new file mode 100644
index 000000000000..97373380c9e7
--- /dev/null
+++ b/kernel/async.c
@@ -0,0 +1,321 @@
+/*
+ * async.c: Asynchronous function calls for boot performance
+ *
+ * (C) Copyright 2009 Intel Corporation
+ * Author: Arjan van de Ven <arjan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+
+/*
+
+Goals and Theory of Operation
+
+The primary goal of this feature is to reduce the kernel boot time,
+by doing various independent hardware delays and discovery operations
+decoupled and not strictly serialized.
+
+More specifically, the asynchronous function call concept allows
+certain operations (primarily during system boot) to happen
+asynchronously, out of order, while these operations still
+have their externally visible parts happen sequentially and in-order.
+(not unlike how out-of-order CPUs retire their instructions in order)
+
+Key to the asynchronous function call implementation is the concept of
+a "sequence cookie" (which, although it has an abstracted type, can be
+thought of as a monotonically incrementing number).
+
+The async core will assign each scheduled event such a sequence cookie and
+pass this to the called functions.
+
+The asynchronously called function should before doing a globally visible
+operation, such as registering device numbers, call the
+async_synchronize_cookie() function and pass in its own cookie. The
+async_synchronize_cookie() function will make sure that all asynchronous
+operations that were scheduled prior to the operation corresponding with the
+cookie have completed.
+
+Subsystem/driver initialization code that scheduled asynchronous probe
+functions, but which shares global resources with other drivers/subsystems
+that do not use the asynchronous call feature, need to do a full
+synchronization with the async_synchronize_full() function, before returning
+from their init function. This is to maintain strict ordering between the
+asynchronous and synchronous parts of the kernel.
+
+*/
+
+#include <linux/async.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <asm/atomic.h>
+
+static async_cookie_t next_cookie = 1;
+
+#define MAX_THREADS 256
+#define MAX_WORK 32768
+
+static LIST_HEAD(async_pending);
+static LIST_HEAD(async_running);
+static DEFINE_SPINLOCK(async_lock);
+
+struct async_entry {
+ struct list_head list;
+ async_cookie_t cookie;
+ async_func_ptr *func;
+ void *data;
+ struct list_head *running;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(async_done);
+static DECLARE_WAIT_QUEUE_HEAD(async_new);
+
+static atomic_t entry_count;
+static atomic_t thread_count;
+
+extern int initcall_debug;
+
+
+/*
+ * MUST be called with the lock held!
+ */
+static async_cookie_t __lowest_in_progress(struct list_head *running)
+{
+ struct async_entry *entry;
+ if (!list_empty(&async_pending)) {
+ entry = list_first_entry(&async_pending,
+ struct async_entry, list);
+ return entry->cookie;
+ } else if (!list_empty(running)) {
+ entry = list_first_entry(running,
+ struct async_entry, list);
+ return entry->cookie;
+ } else {
+ /* nothing in progress... next_cookie is "infinity" */
+ return next_cookie;
+ }
+
+}
+/*
+ * pick the first pending entry and run it
+ */
+static void run_one_entry(void)
+{
+ unsigned long flags;
+ struct async_entry *entry;
+ ktime_t calltime, delta, rettime;
+
+ /* 1) pick one task from the pending queue */
+
+ spin_lock_irqsave(&async_lock, flags);
+ if (list_empty(&async_pending))
+ goto out;
+ entry = list_first_entry(&async_pending, struct async_entry, list);
+
+ /* 2) move it to the running queue */
+ list_del(&entry->list);
+ list_add_tail(&entry->list, &async_running);
+ spin_unlock_irqrestore(&async_lock, flags);
+
+ /* 3) run it (and print duration)*/
+ if (initcall_debug && system_state == SYSTEM_BOOTING) {
+ printk("calling %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current));
+ calltime = ktime_get();
+ }
+ entry->func(entry->data, entry->cookie);
+ if (initcall_debug && system_state == SYSTEM_BOOTING) {
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie,
+ entry->func, ktime_to_ns(delta) >> 10);
+ }
+
+ /* 4) remove it from the running queue */
+ spin_lock_irqsave(&async_lock, flags);
+ list_del(&entry->list);
+
+ /* 5) free the entry */
+ kfree(entry);
+ atomic_dec(&entry_count);
+
+ spin_unlock_irqrestore(&async_lock, flags);
+
+ /* 6) wake up any waiters. */
+ wake_up(&async_done);
+ return;
+
+out:
+ spin_unlock_irqrestore(&async_lock, flags);
+}
+
+
+static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
+{
+ struct async_entry *entry;
+ unsigned long flags;
+ async_cookie_t newcookie;
+
+
+ /* allow irq-off callers */
+ entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
+
+ /*
+ * If we're out of memory or if there's too much work
+ * pending already, we execute synchronously.
+ */
+ if (!entry || atomic_read(&entry_count) > MAX_WORK) {
+ kfree(entry);
+ spin_lock_irqsave(&async_lock, flags);
+ newcookie = next_cookie++;
+ spin_unlock_irqrestore(&async_lock, flags);
+
+ /* low on memory.. run synchronously */
+ ptr(data, newcookie);
+ return newcookie;
+ }
+ entry->func = ptr;
+ entry->data = data;
+ entry->running = running;
+
+ spin_lock_irqsave(&async_lock, flags);
+ newcookie = entry->cookie = next_cookie++;
+ list_add_tail(&entry->list, &async_pending);
+ atomic_inc(&entry_count);
+ spin_unlock_irqrestore(&async_lock, flags);
+ wake_up(&async_new);
+ return newcookie;
+}
+
+async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
+{
+ return __async_schedule(ptr, data, &async_pending);
+}
+EXPORT_SYMBOL_GPL(async_schedule);
+
+async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running)
+{
+ return __async_schedule(ptr, data, running);
+}
+EXPORT_SYMBOL_GPL(async_schedule_special);
+
+void async_synchronize_full(void)
+{
+ async_synchronize_cookie(next_cookie);
+}
+EXPORT_SYMBOL_GPL(async_synchronize_full);
+
+void async_synchronize_full_special(struct list_head *list)
+{
+ async_synchronize_cookie_special(next_cookie, list);
+}
+EXPORT_SYMBOL_GPL(async_synchronize_full_special);
+
+void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running)
+{
+ ktime_t starttime, delta, endtime;
+
+ if (initcall_debug && system_state == SYSTEM_BOOTING) {
+ printk("async_waiting @ %i\n", task_pid_nr(current));
+ starttime = ktime_get();
+ }
+
+ wait_event(async_done, __lowest_in_progress(running) >= cookie);
+
+ if (initcall_debug && system_state == SYSTEM_BOOTING) {
+ endtime = ktime_get();
+ delta = ktime_sub(endtime, starttime);
+
+ printk("async_continuing @ %i after %lli usec\n",
+ task_pid_nr(current), ktime_to_ns(delta) >> 10);
+ }
+}
+EXPORT_SYMBOL_GPL(async_synchronize_cookie_special);
+
+void async_synchronize_cookie(async_cookie_t cookie)
+{
+ async_synchronize_cookie_special(cookie, &async_running);
+}
+EXPORT_SYMBOL_GPL(async_synchronize_cookie);
+
+
+static int async_thread(void *unused)
+{
+ DECLARE_WAITQUEUE(wq, current);
+ add_wait_queue(&async_new, &wq);
+
+ while (!kthread_should_stop()) {
+ int ret = HZ;
+ set_current_state(TASK_INTERRUPTIBLE);
+ /*
+ * check the list head without lock.. false positives
+ * are dealt with inside run_one_entry() while holding
+ * the lock.
+ */
+ rmb();
+ if (!list_empty(&async_pending))
+ run_one_entry();
+ else
+ ret = schedule_timeout(HZ);
+
+ if (ret == 0) {
+ /*
+ * we timed out, this means we as thread are redundant.
+ * we sign off and die, but we to avoid any races there
+ * is a last-straw check to see if work snuck in.
+ */
+ atomic_dec(&thread_count);
+ wmb(); /* manager must see our departure first */
+ if (list_empty(&async_pending))
+ break;
+ /*
+ * woops work came in between us timing out and us
+ * signing off; we need to stay alive and keep working.
+ */
+ atomic_inc(&thread_count);
+ }
+ }
+ remove_wait_queue(&async_new, &wq);
+
+ return 0;
+}
+
+static int async_manager_thread(void *unused)
+{
+ DECLARE_WAITQUEUE(wq, current);
+ add_wait_queue(&async_new, &wq);
+
+ while (!kthread_should_stop()) {
+ int tc, ec;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ tc = atomic_read(&thread_count);
+ rmb();
+ ec = atomic_read(&entry_count);
+
+ while (tc < ec && tc < MAX_THREADS) {
+ kthread_run(async_thread, NULL, "async/%i", tc);
+ atomic_inc(&thread_count);
+ tc++;
+ }
+
+ schedule();
+ }
+ remove_wait_queue(&async_new, &wq);
+
+ return 0;
+}
+
+static int __init async_init(void)
+{
+ kthread_run(async_manager_thread, NULL, "async/mgr");
+ return 0;
+}
+
+core_initcall(async_init);
diff --git a/kernel/capability.c b/kernel/capability.c
index c598d9d5be4f..688926e496be 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -306,7 +306,7 @@ int capable(int cap)
BUG();
}
- if (has_capability(current, cap)) {
+ if (security_capable(cap) == 0) {
current->flags |= PF_SUPERPRIV;
return 1;
}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 87bb0258fd27..f221446aa02d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -116,7 +116,6 @@ static int root_count;
* be called.
*/
static int need_forkexit_callback __read_mostly;
-static int need_mm_owner_callback __read_mostly;
/* convenient tests for these bits */
inline int cgroup_is_removed(const struct cgroup *cgrp)
@@ -2539,7 +2538,6 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
init_css_set.subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
need_forkexit_callback |= ss->fork || ss->exit;
- need_mm_owner_callback |= !!ss->mm_owner_changed;
/* At system boot, before all subsystems have been
* registered, no tasks have been forked, so we don't
@@ -2789,37 +2787,6 @@ void cgroup_fork_callbacks(struct task_struct *child)
}
}
-#ifdef CONFIG_MM_OWNER
-/**
- * cgroup_mm_owner_callbacks - run callbacks when the mm->owner changes
- * @p: the new owner
- *
- * Called on every change to mm->owner. mm_init_owner() does not
- * invoke this routine, since it assigns the mm->owner the first time
- * and does not change it.
- *
- * The callbacks are invoked with mmap_sem held in read mode.
- */
-void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
-{
- struct cgroup *oldcgrp, *newcgrp = NULL;
-
- if (need_mm_owner_callback) {
- int i;
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- oldcgrp = task_cgroup(old, ss->subsys_id);
- if (new)
- newcgrp = task_cgroup(new, ss->subsys_id);
- if (oldcgrp == newcgrp)
- continue;
- if (ss->mm_owner_changed)
- ss->mm_owner_changed(ss, oldcgrp, newcgrp, new);
- }
- }
-}
-#endif /* CONFIG_MM_OWNER */
-
/**
* cgroup_post_fork - called on a new task after adding it to the task list
* @child: the task in question
diff --git a/kernel/compat.c b/kernel/compat.c
index d52e2ec1deb5..42d56544460f 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -24,6 +24,7 @@
#include <linux/migrate.h>
#include <linux/posix-timers.h>
#include <linux/times.h>
+#include <linux/ptrace.h>
#include <asm/uaccess.h>
@@ -229,6 +230,7 @@ asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
return -EFAULT;
}
+ force_successful_syscall_return();
return compat_jiffies_to_clock_t(jiffies);
}
@@ -894,8 +896,9 @@ asmlinkage long compat_sys_time(compat_time_t __user * tloc)
if (tloc) {
if (put_user(i,tloc))
- i = -EFAULT;
+ return -EFAULT;
}
+ force_successful_syscall_return();
return i;
}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 30e74dd6d01b..79e40f00dcb8 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -379,8 +379,11 @@ static cpumask_var_t frozen_cpus;
int disable_nonboot_cpus(void)
{
- int cpu, first_cpu, error = 0;
+ int cpu, first_cpu, error;
+ error = stop_machine_create();
+ if (error)
+ return error;
cpu_maps_update_begin();
first_cpu = cpumask_first(cpu_online_mask);
/* We take down all of the non-boot CPUs in one shot to avoid races
@@ -409,6 +412,7 @@ int disable_nonboot_cpus(void)
printk(KERN_ERR "Non-boot CPUs are not disabled\n");
}
cpu_maps_update_done();
+ stop_machine_destroy();
return error;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 39c1a4c1c5a9..345ace5117de 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -240,6 +240,17 @@ static struct cpuset top_cpuset = {
static DEFINE_MUTEX(callback_mutex);
/*
+ * cpuset_buffer_lock protects both the cpuset_name and cpuset_nodelist
+ * buffers. They are statically allocated to prevent using excess stack
+ * when calling cpuset_print_task_mems_allowed().
+ */
+#define CPUSET_NAME_LEN (128)
+#define CPUSET_NODELIST_LEN (256)
+static char cpuset_name[CPUSET_NAME_LEN];
+static char cpuset_nodelist[CPUSET_NODELIST_LEN];
+static DEFINE_SPINLOCK(cpuset_buffer_lock);
+
+/*
* This is ugly, but preserves the userspace API for existing cpuset
* users. If someone tries to mount the "cpuset" filesystem, we
* silently switch it to mount "cgroup" instead
@@ -2356,6 +2367,29 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
}
+/**
+ * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
+ * @task: pointer to task_struct of some task.
+ *
+ * Description: Prints @task's name, cpuset name, and cached copy of its
+ * mems_allowed to the kernel log. Must hold task_lock(task) to allow
+ * dereferencing task_cs(task).
+ */
+void cpuset_print_task_mems_allowed(struct task_struct *tsk)
+{
+ struct dentry *dentry;
+
+ dentry = task_cs(tsk)->css.cgroup->dentry;
+ spin_lock(&cpuset_buffer_lock);
+ snprintf(cpuset_name, CPUSET_NAME_LEN,
+ dentry ? (const char *)dentry->d_name.name : "/");
+ nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
+ tsk->mems_allowed);
+ printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
+ tsk->comm, cpuset_name, cpuset_nodelist);
+ spin_unlock(&cpuset_buffer_lock);
+}
+
/*
* Collection of memory_pressure is suppressed unless
* this flag is enabled by writing "1" to the special
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c
index f013a0c2e111..038707404b76 100644
--- a/kernel/dma-coherent.c
+++ b/kernel/dma-coherent.c
@@ -109,20 +109,40 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret)
{
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+ struct dma_coherent_mem *mem;
int order = get_order(size);
+ int pageno;
- if (mem) {
- int page = bitmap_find_free_region(mem->bitmap, mem->size,
- order);
- if (page >= 0) {
- *dma_handle = mem->device_base + (page << PAGE_SHIFT);
- *ret = mem->virt_base + (page << PAGE_SHIFT);
- memset(*ret, 0, size);
- } else if (mem->flags & DMA_MEMORY_EXCLUSIVE)
- *ret = NULL;
+ if (!dev)
+ return 0;
+ mem = dev->dma_mem;
+ if (!mem)
+ return 0;
+ if (unlikely(size > mem->size))
+ return 0;
+
+ pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
+ if (pageno >= 0) {
+ /*
+ * Memory was found in the per-device arena.
+ */
+ *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+ *ret = mem->virt_base + (pageno << PAGE_SHIFT);
+ memset(*ret, 0, size);
+ } else if (mem->flags & DMA_MEMORY_EXCLUSIVE) {
+ /*
+ * The per-device arena is exhausted and we are not
+ * permitted to fall back to generic memory.
+ */
+ *ret = NULL;
+ } else {
+ /*
+ * The per-device arena is exhausted and we are
+ * permitted to fall back to generic memory.
+ */
+ return 0;
}
- return (mem != NULL);
+ return 1;
}
EXPORT_SYMBOL(dma_alloc_from_coherent);
diff --git a/kernel/exit.c b/kernel/exit.c
index c9e5a1c14e08..c7740fa3252c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -642,35 +642,31 @@ retry:
/*
* We found no owner yet mm_users > 1: this implies that we are
* most likely racing with swapoff (try_to_unuse()) or /proc or
- * ptrace or page migration (get_task_mm()). Mark owner as NULL,
- * so that subsystems can understand the callback and take action.
+ * ptrace or page migration (get_task_mm()). Mark owner as NULL.
*/
- down_write(&mm->mmap_sem);
- cgroup_mm_owner_callbacks(mm->owner, NULL);
mm->owner = NULL;
- up_write(&mm->mmap_sem);
return;
assign_new_owner:
BUG_ON(c == p);
get_task_struct(c);
- read_unlock(&tasklist_lock);
- down_write(&mm->mmap_sem);
/*
* The task_lock protects c->mm from changing.
* We always want mm->owner->mm == mm
*/
task_lock(c);
+ /*
+ * Delay read_unlock() till we have the task_lock()
+ * to ensure that c does not slip away underneath us
+ */
+ read_unlock(&tasklist_lock);
if (c->mm != mm) {
task_unlock(c);
- up_write(&mm->mmap_sem);
put_task_struct(c);
goto retry;
}
- cgroup_mm_owner_callbacks(mm->owner, c);
mm->owner = c;
task_unlock(c);
- up_write(&mm->mmap_sem);
put_task_struct(c);
}
#endif /* CONFIG_MM_OWNER */
@@ -1055,10 +1051,7 @@ NORET_TYPE void do_exit(long code)
preempt_count());
acct_update_integrals(tsk);
- if (tsk->mm) {
- update_hiwater_rss(tsk->mm);
- update_hiwater_vm(tsk->mm);
- }
+
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
hrtimer_cancel(&tsk->signal->real_timer);
diff --git a/kernel/fork.c b/kernel/fork.c
index 43cbf30669e6..7b8f2a78be3d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -400,6 +400,18 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
+static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
+
+static int __init coredump_filter_setup(char *s)
+{
+ default_dump_filter =
+ (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
+ MMF_DUMP_FILTER_MASK;
+ return 1;
+}
+
+__setup("coredump_filter=", coredump_filter_setup);
+
#include <linux/init_task.h>
static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
@@ -408,8 +420,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
INIT_LIST_HEAD(&mm->mmlist);
- mm->flags = (current->mm) ? current->mm->flags
- : MMF_DUMP_FILTER_DEFAULT;
+ mm->flags = (current->mm) ? current->mm->flags : default_dump_filter;
mm->core_state = NULL;
mm->nr_ptes = 0;
set_mm_counter(mm, file_rss, 0);
@@ -758,7 +769,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
{
struct sighand_struct *sig;
- if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
+ if (clone_flags & CLONE_SIGHAND) {
atomic_inc(&current->sighand->count);
return 0;
}
diff --git a/kernel/futex.c b/kernel/futex.c
index 7c6cbabe52b3..002aa189eb09 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -170,8 +170,11 @@ static void get_futex_key_refs(union futex_key *key)
*/
static void drop_futex_key_refs(union futex_key *key)
{
- if (!key->both.ptr)
+ if (!key->both.ptr) {
+ /* If we're here then we tried to put a key we failed to get */
+ WARN_ON_ONCE(1);
return;
+ }
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
@@ -730,8 +733,8 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
}
spin_unlock(&hb->lock);
-out:
put_futex_key(fshared, &key);
+out:
return ret;
}
@@ -755,7 +758,7 @@ retryfull:
goto out;
ret = get_futex_key(uaddr2, fshared, &key2);
if (unlikely(ret != 0))
- goto out;
+ goto out_put_key1;
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
@@ -777,12 +780,12 @@ retry:
* but we might get them from range checking
*/
ret = op_ret;
- goto out;
+ goto out_put_keys;
#endif
if (unlikely(op_ret != -EFAULT)) {
ret = op_ret;
- goto out;
+ goto out_put_keys;
}
/*
@@ -796,7 +799,7 @@ retry:
ret = futex_handle_fault((unsigned long)uaddr2,
attempt);
if (ret)
- goto out;
+ goto out_put_keys;
goto retry;
}
@@ -834,10 +837,11 @@ retry:
spin_unlock(&hb1->lock);
if (hb1 != hb2)
spin_unlock(&hb2->lock);
-out:
+out_put_keys:
put_futex_key(fshared, &key2);
+out_put_key1:
put_futex_key(fshared, &key1);
-
+out:
return ret;
}
@@ -854,13 +858,13 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
struct futex_q *this, *next;
int ret, drop_count = 0;
- retry:
+retry:
ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0))
goto out;
ret = get_futex_key(uaddr2, fshared, &key2);
if (unlikely(ret != 0))
- goto out;
+ goto out_put_key1;
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
@@ -882,7 +886,7 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
if (!ret)
goto retry;
- return ret;
+ goto out_put_keys;
}
if (curval != *cmpval) {
ret = -EAGAIN;
@@ -927,9 +931,11 @@ out_unlock:
while (--drop_count >= 0)
drop_futex_key_refs(&key1);
-out:
+out_put_keys:
put_futex_key(fshared, &key2);
+out_put_key1:
put_futex_key(fshared, &key1);
+out:
return ret;
}
@@ -990,7 +996,7 @@ static int unqueue_me(struct futex_q *q)
int ret = 0;
/* In the common case we don't take the spinlock, which is nice. */
- retry:
+retry:
lock_ptr = q->lock_ptr;
barrier();
if (lock_ptr != NULL) {
@@ -1172,11 +1178,11 @@ static int futex_wait(u32 __user *uaddr, int fshared,
q.pi_state = NULL;
q.bitset = bitset;
- retry:
+retry:
q.key = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr, fshared, &q.key);
if (unlikely(ret != 0))
- goto out_release_sem;
+ goto out;
hb = queue_lock(&q);
@@ -1204,6 +1210,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
if (unlikely(ret)) {
queue_unlock(&q, hb);
+ put_futex_key(fshared, &q.key);
ret = get_user(uval, uaddr);
@@ -1213,7 +1220,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
}
ret = -EWOULDBLOCK;
if (uval != val)
- goto out_unlock_release_sem;
+ goto out_unlock_put_key;
/* Only actually queue if *uaddr contained val. */
queue_me(&q, hb);
@@ -1305,11 +1312,11 @@ static int futex_wait(u32 __user *uaddr, int fshared,
return -ERESTART_RESTARTBLOCK;
}
- out_unlock_release_sem:
+out_unlock_put_key:
queue_unlock(&q, hb);
-
- out_release_sem:
put_futex_key(fshared, &q.key);
+
+out:
return ret;
}
@@ -1358,16 +1365,16 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
}
q.pi_state = NULL;
- retry:
+retry:
q.key = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr, fshared, &q.key);
if (unlikely(ret != 0))
- goto out_release_sem;
+ goto out;
- retry_unlocked:
+retry_unlocked:
hb = queue_lock(&q);
- retry_locked:
+retry_locked:
ret = lock_taken = 0;
/*
@@ -1388,14 +1395,14 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
*/
if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
ret = -EDEADLK;
- goto out_unlock_release_sem;
+ goto out_unlock_put_key;
}
/*
* Surprise - we got the lock. Just return to userspace:
*/
if (unlikely(!curval))
- goto out_unlock_release_sem;
+ goto out_unlock_put_key;
uval = curval;
@@ -1431,7 +1438,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
* We took the lock due to owner died take over.
*/
if (unlikely(lock_taken))
- goto out_unlock_release_sem;
+ goto out_unlock_put_key;
/*
* We dont have the lock. Look up the PI state (or create it if
@@ -1470,7 +1477,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
goto retry_locked;
}
default:
- goto out_unlock_release_sem;
+ goto out_unlock_put_key;
}
}
@@ -1561,16 +1568,17 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
destroy_hrtimer_on_stack(&to->timer);
return ret != -EINTR ? ret : -ERESTARTNOINTR;
- out_unlock_release_sem:
+out_unlock_put_key:
queue_unlock(&q, hb);
- out_release_sem:
+out_put_key:
put_futex_key(fshared, &q.key);
+out:
if (to)
destroy_hrtimer_on_stack(&to->timer);
return ret;
- uaddr_faulted:
+uaddr_faulted:
/*
* We have to r/w *(int __user *)uaddr, and we have to modify it
* atomically. Therefore, if we continue to fault after get_user()
@@ -1583,7 +1591,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
if (attempt++) {
ret = futex_handle_fault((unsigned long)uaddr, attempt);
if (ret)
- goto out_release_sem;
+ goto out_put_key;
goto retry_unlocked;
}
@@ -1675,9 +1683,9 @@ retry_unlocked:
out_unlock:
spin_unlock(&hb->lock);
-out:
put_futex_key(fshared, &key);
+out:
return ret;
pi_faulted:
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index eb2bfefa6dcc..1455b7651b6b 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -634,7 +634,6 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
{
}
-static void __run_hrtimer(struct hrtimer *timer);
/*
* When High resolution timers are active, try to reprogram. Note, that in case
@@ -646,13 +645,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
- /*
- * XXX: recursion check?
- * hrtimer_forward() should round up with timer granularity
- * so that we never get into inf recursion here,
- * it doesn't do that though
- */
- __run_hrtimer(timer);
+ spin_unlock(&base->cpu_base->lock);
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ spin_lock(&base->cpu_base->lock);
return 1;
}
return 0;
@@ -705,11 +700,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
}
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
-static inline int hrtimer_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
-{
- return 0;
-}
#endif /* CONFIG_HIGH_RES_TIMERS */
@@ -780,9 +770,11 @@ EXPORT_SYMBOL_GPL(hrtimer_forward);
*
* The timer is inserted in expiry order. Insertion into the
* red black tree is O(log(n)). Must hold the base lock.
+ *
+ * Returns 1 when the new timer is the leftmost timer in the tree.
*/
-static void enqueue_hrtimer(struct hrtimer *timer,
- struct hrtimer_clock_base *base, int reprogram)
+static int enqueue_hrtimer(struct hrtimer *timer,
+ struct hrtimer_clock_base *base)
{
struct rb_node **link = &base->active.rb_node;
struct rb_node *parent = NULL;
@@ -814,20 +806,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
* Insert the timer to the rbtree and check whether it
* replaces the first pending timer
*/
- if (leftmost) {
- /*
- * Reprogram the clock event device. When the timer is already
- * expired hrtimer_enqueue_reprogram has either called the
- * callback or added it to the pending list and raised the
- * softirq.
- *
- * This is a NOP for !HIGHRES
- */
- if (reprogram && hrtimer_enqueue_reprogram(timer, base))
- return;
-
+ if (leftmost)
base->first = &timer->node;
- }
rb_link_node(&timer->node, parent, link);
rb_insert_color(&timer->node, &base->active);
@@ -836,6 +816,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
* state of a possibly running callback.
*/
timer->state |= HRTIMER_STATE_ENQUEUED;
+
+ return leftmost;
}
/*
@@ -912,7 +894,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
{
struct hrtimer_clock_base *base, *new_base;
unsigned long flags;
- int ret;
+ int ret, leftmost;
base = lock_hrtimer_base(timer, &flags);
@@ -940,12 +922,16 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
timer_stats_hrtimer_set_start_info(timer);
+ leftmost = enqueue_hrtimer(timer, new_base);
+
/*
* Only allow reprogramming if the new base is on this CPU.
* (it might still be on another CPU if the timer was pending)
+ *
+ * XXX send_remote_softirq() ?
*/
- enqueue_hrtimer(timer, new_base,
- new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
+ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
+ hrtimer_enqueue_reprogram(timer, new_base);
unlock_hrtimer_base(timer, &flags);
@@ -1157,13 +1143,13 @@ static void __run_hrtimer(struct hrtimer *timer)
spin_lock(&cpu_base->lock);
/*
- * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
- * reprogramming of the event hardware. This happens at the end of this
- * function anyway.
+ * Note: We clear the CALLBACK bit after enqueue_hrtimer and
+ * we do not reprogramm the event hardware. Happens either in
+ * hrtimer_start_range_ns() or in hrtimer_interrupt()
*/
if (restart != HRTIMER_NORESTART) {
BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
- enqueue_hrtimer(timer, base, 0);
+ enqueue_hrtimer(timer, base);
}
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
@@ -1243,6 +1229,22 @@ void hrtimer_interrupt(struct clock_event_device *dev)
}
}
+/*
+ * local version of hrtimer_peek_ahead_timers() called with interrupts
+ * disabled.
+ */
+static void __hrtimer_peek_ahead_timers(void)
+{
+ struct tick_device *td;
+
+ if (!hrtimer_hres_active())
+ return;
+
+ td = &__get_cpu_var(tick_cpu_device);
+ if (td && td->evtdev)
+ hrtimer_interrupt(td->evtdev);
+}
+
/**
* hrtimer_peek_ahead_timers -- run soft-expired timers now
*
@@ -1254,20 +1256,23 @@ void hrtimer_interrupt(struct clock_event_device *dev)
*/
void hrtimer_peek_ahead_timers(void)
{
- struct tick_device *td;
unsigned long flags;
- if (!hrtimer_hres_active())
- return;
-
local_irq_save(flags);
- td = &__get_cpu_var(tick_cpu_device);
- if (td && td->evtdev)
- hrtimer_interrupt(td->evtdev);
+ __hrtimer_peek_ahead_timers();
local_irq_restore(flags);
}
-#endif /* CONFIG_HIGH_RES_TIMERS */
+static void run_hrtimer_softirq(struct softirq_action *h)
+{
+ hrtimer_peek_ahead_timers();
+}
+
+#else /* CONFIG_HIGH_RES_TIMERS */
+
+static inline void __hrtimer_peek_ahead_timers(void) { }
+
+#endif /* !CONFIG_HIGH_RES_TIMERS */
/*
* Called from timer softirq every jiffy, expire hrtimers:
@@ -1513,39 +1518,36 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
__remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
timer->base = new_base;
/*
- * Enqueue the timers on the new cpu, but do not reprogram
- * the timer as that would enable a deadlock between
- * hrtimer_enqueue_reprogramm() running the timer and us still
- * holding a nested base lock.
- *
- * Instead we tickle the hrtimer interrupt after the migration
- * is done, which will run all expired timers and re-programm
- * the timer device.
+ * Enqueue the timers on the new cpu. This does not
+ * reprogram the event device in case the timer
+ * expires before the earliest on this CPU, but we run
+ * hrtimer_interrupt after we migrated everything to
+ * sort out already expired timers and reprogram the
+ * event device.
*/
- enqueue_hrtimer(timer, new_base, 0);
+ enqueue_hrtimer(timer, new_base);
/* Clear the migration state bit */
timer->state &= ~HRTIMER_STATE_MIGRATE;
}
}
-static int migrate_hrtimers(int scpu)
+static void migrate_hrtimers(int scpu)
{
struct hrtimer_cpu_base *old_base, *new_base;
- int dcpu, i;
+ int i;
BUG_ON(cpu_online(scpu));
- old_base = &per_cpu(hrtimer_bases, scpu);
- new_base = &get_cpu_var(hrtimer_bases);
-
- dcpu = smp_processor_id();
-
tick_cancel_sched_timer(scpu);
+
+ local_irq_disable();
+ old_base = &per_cpu(hrtimer_bases, scpu);
+ new_base = &__get_cpu_var(hrtimer_bases);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
- spin_lock_irq(&new_base->lock);
+ spin_lock(&new_base->lock);
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
@@ -1554,15 +1556,11 @@ static int migrate_hrtimers(int scpu)
}
spin_unlock(&old_base->lock);
- spin_unlock_irq(&new_base->lock);
- put_cpu_var(hrtimer_bases);
+ spin_unlock(&new_base->lock);
- return dcpu;
-}
-
-static void tickle_timers(void *arg)
-{
- hrtimer_peek_ahead_timers();
+ /* Check, if we got expired work to do */
+ __hrtimer_peek_ahead_timers();
+ local_irq_enable();
}
#endif /* CONFIG_HOTPLUG_CPU */
@@ -1583,11 +1581,8 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
case CPU_DEAD:
case CPU_DEAD_FROZEN:
{
- int dcpu;
-
clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
- dcpu = migrate_hrtimers(scpu);
- smp_call_function_single(dcpu, tickle_timers, NULL, 0);
+ migrate_hrtimers(scpu);
break;
}
#endif
@@ -1608,6 +1603,9 @@ void __init hrtimers_init(void)
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
+#ifdef CONFIG_HIGH_RES_TIMERS
+ open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
+#endif
}
/**
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index cc0f7321b8ce..1de9700f416e 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/async.h>
#include "internals.h"
@@ -34,6 +35,10 @@ unsigned long probe_irq_on(void)
unsigned int status;
int i;
+ /*
+ * quiesce the kernel, or at least the asynchronous portion
+ */
+ async_synchronize_full();
mutex_lock(&probing_active);
/*
* something may have generated an irq long ago and we want to
diff --git a/kernel/kmod.c b/kernel/kmod.c
index b46dbb908669..a27a5f64443d 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -51,8 +51,8 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
/**
* request_module - try to load a kernel module
- * @fmt: printf style format string for the name of the module
- * @varargs: arguements as specified in the format string
+ * @fmt: printf style format string for the name of the module
+ * @...: arguments as specified in the format string
*
* Load a module using the user mode module loader. The function returns
* zero on success or a negative errno code on failure. Note that a
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9f8a3f25259a..1b9cbdc0127a 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -69,7 +69,7 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobe_enabled;
-DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
+static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
static struct {
spinlock_t lock ____cacheline_aligned_in_smp;
@@ -115,6 +115,7 @@ enum kprobe_slot_state {
SLOT_USED = 2,
};
+static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
static struct hlist_head kprobe_insn_pages;
static int kprobe_garbage_slots;
static int collect_garbage_slots(void);
@@ -144,10 +145,10 @@ loop_end:
}
/**
- * get_insn_slot() - Find a slot on an executable page for an instruction.
+ * __get_insn_slot() - Find a slot on an executable page for an instruction.
* We allocate an executable page if there's no room on existing ones.
*/
-kprobe_opcode_t __kprobes *get_insn_slot(void)
+static kprobe_opcode_t __kprobes *__get_insn_slot(void)
{
struct kprobe_insn_page *kip;
struct hlist_node *pos;
@@ -196,6 +197,15 @@ kprobe_opcode_t __kprobes *get_insn_slot(void)
return kip->insns;
}
+kprobe_opcode_t __kprobes *get_insn_slot(void)
+{
+ kprobe_opcode_t *ret;
+ mutex_lock(&kprobe_insn_mutex);
+ ret = __get_insn_slot();
+ mutex_unlock(&kprobe_insn_mutex);
+ return ret;
+}
+
/* Return 1 if all garbages are collected, otherwise 0. */
static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
{
@@ -226,9 +236,13 @@ static int __kprobes collect_garbage_slots(void)
{
struct kprobe_insn_page *kip;
struct hlist_node *pos, *next;
+ int safety;
/* Ensure no-one is preepmted on the garbages */
- if (check_safety() != 0)
+ mutex_unlock(&kprobe_insn_mutex);
+ safety = check_safety();
+ mutex_lock(&kprobe_insn_mutex);
+ if (safety != 0)
return -EAGAIN;
hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
@@ -251,6 +265,7 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
struct kprobe_insn_page *kip;
struct hlist_node *pos;
+ mutex_lock(&kprobe_insn_mutex);
hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
if (kip->insns <= slot &&
slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
@@ -267,6 +282,8 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
collect_garbage_slots();
+
+ mutex_unlock(&kprobe_insn_mutex);
}
#endif
@@ -310,7 +327,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
struct kprobe *kp;
list_for_each_entry_rcu(kp, &p->list, list) {
- if (kp->pre_handler) {
+ if (kp->pre_handler && !kprobe_gone(kp)) {
set_kprobe_instance(kp);
if (kp->pre_handler(kp, regs))
return 1;
@@ -326,7 +343,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
struct kprobe *kp;
list_for_each_entry_rcu(kp, &p->list, list) {
- if (kp->post_handler) {
+ if (kp->post_handler && !kprobe_gone(kp)) {
set_kprobe_instance(kp);
kp->post_handler(kp, regs, flags);
reset_kprobe_instance();
@@ -393,7 +410,7 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
hlist_add_head(&ri->hlist, head);
}
-void kretprobe_hash_lock(struct task_struct *tsk,
+void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
struct hlist_head **head, unsigned long *flags)
{
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
@@ -404,13 +421,15 @@ void kretprobe_hash_lock(struct task_struct *tsk,
spin_lock_irqsave(hlist_lock, *flags);
}
-static void kretprobe_table_lock(unsigned long hash, unsigned long *flags)
+static void __kprobes kretprobe_table_lock(unsigned long hash,
+ unsigned long *flags)
{
spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
spin_lock_irqsave(hlist_lock, *flags);
}
-void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags)
+void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
+ unsigned long *flags)
{
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
spinlock_t *hlist_lock;
@@ -419,7 +438,7 @@ void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags)
spin_unlock_irqrestore(hlist_lock, *flags);
}
-void kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
+void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
{
spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
spin_unlock_irqrestore(hlist_lock, *flags);
@@ -526,9 +545,10 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
ap->addr = p->addr;
ap->pre_handler = aggr_pre_handler;
ap->fault_handler = aggr_fault_handler;
- if (p->post_handler)
+ /* We don't care the kprobe which has gone. */
+ if (p->post_handler && !kprobe_gone(p))
ap->post_handler = aggr_post_handler;
- if (p->break_handler)
+ if (p->break_handler && !kprobe_gone(p))
ap->break_handler = aggr_break_handler;
INIT_LIST_HEAD(&ap->list);
@@ -547,17 +567,41 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
int ret = 0;
struct kprobe *ap;
+ if (kprobe_gone(old_p)) {
+ /*
+ * Attempting to insert new probe at the same location that
+ * had a probe in the module vaddr area which already
+ * freed. So, the instruction slot has already been
+ * released. We need a new slot for the new probe.
+ */
+ ret = arch_prepare_kprobe(old_p);
+ if (ret)
+ return ret;
+ }
if (old_p->pre_handler == aggr_pre_handler) {
copy_kprobe(old_p, p);
ret = add_new_kprobe(old_p, p);
+ ap = old_p;
} else {
ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
- if (!ap)
+ if (!ap) {
+ if (kprobe_gone(old_p))
+ arch_remove_kprobe(old_p);
return -ENOMEM;
+ }
add_aggr_kprobe(ap, old_p);
copy_kprobe(ap, p);
ret = add_new_kprobe(ap, p);
}
+ if (kprobe_gone(old_p)) {
+ /*
+ * If the old_p has gone, its breakpoint has been disarmed.
+ * We have to arm it again after preparing real kprobes.
+ */
+ ap->flags &= ~KPROBE_FLAG_GONE;
+ if (kprobe_enabled)
+ arch_arm_kprobe(ap);
+ }
return ret;
}
@@ -600,8 +644,7 @@ static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
return (kprobe_opcode_t *)(((char *)addr) + p->offset);
}
-static int __kprobes __register_kprobe(struct kprobe *p,
- unsigned long called_from)
+int __kprobes register_kprobe(struct kprobe *p)
{
int ret = 0;
struct kprobe *old_p;
@@ -620,28 +663,30 @@ static int __kprobes __register_kprobe(struct kprobe *p,
return -EINVAL;
}
- p->mod_refcounted = 0;
-
+ p->flags = 0;
/*
* Check if are we probing a module.
*/
probed_mod = __module_text_address((unsigned long) p->addr);
if (probed_mod) {
- struct module *calling_mod;
- calling_mod = __module_text_address(called_from);
/*
- * We must allow modules to probe themself and in this case
- * avoid incrementing the module refcount, so as to allow
- * unloading of self probing modules.
+ * We must hold a refcount of the probed module while updating
+ * its code to prohibit unexpected unloading.
*/
- if (calling_mod && calling_mod != probed_mod) {
- if (unlikely(!try_module_get(probed_mod))) {
- preempt_enable();
- return -EINVAL;
- }
- p->mod_refcounted = 1;
- } else
- probed_mod = NULL;
+ if (unlikely(!try_module_get(probed_mod))) {
+ preempt_enable();
+ return -EINVAL;
+ }
+ /*
+ * If the module freed .init.text, we couldn't insert
+ * kprobes in there.
+ */
+ if (within_module_init((unsigned long)p->addr, probed_mod) &&
+ probed_mod->state != MODULE_STATE_COMING) {
+ module_put(probed_mod);
+ preempt_enable();
+ return -EINVAL;
+ }
}
preempt_enable();
@@ -668,8 +713,9 @@ static int __kprobes __register_kprobe(struct kprobe *p,
out:
mutex_unlock(&kprobe_mutex);
- if (ret && probed_mod)
+ if (probed_mod)
module_put(probed_mod);
+
return ret;
}
@@ -697,16 +743,16 @@ valid_p:
list_is_singular(&old_p->list))) {
/*
* Only probe on the hash list. Disarm only if kprobes are
- * enabled - otherwise, the breakpoint would already have
- * been removed. We save on flushing icache.
+ * enabled and not gone - otherwise, the breakpoint would
+ * already have been removed. We save on flushing icache.
*/
- if (kprobe_enabled)
+ if (kprobe_enabled && !kprobe_gone(old_p))
arch_disarm_kprobe(p);
hlist_del_rcu(&old_p->hlist);
} else {
- if (p->break_handler)
+ if (p->break_handler && !kprobe_gone(p))
old_p->break_handler = NULL;
- if (p->post_handler) {
+ if (p->post_handler && !kprobe_gone(p)) {
list_for_each_entry_rcu(list_p, &old_p->list, list) {
if ((list_p != p) && (list_p->post_handler))
goto noclean;
@@ -721,39 +767,27 @@ noclean:
static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
{
- struct module *mod;
struct kprobe *old_p;
- if (p->mod_refcounted) {
- /*
- * Since we've already incremented refcount,
- * we don't need to disable preemption.
- */
- mod = module_text_address((unsigned long)p->addr);
- if (mod)
- module_put(mod);
- }
-
- if (list_empty(&p->list) || list_is_singular(&p->list)) {
- if (!list_empty(&p->list)) {
- /* "p" is the last child of an aggr_kprobe */
- old_p = list_entry(p->list.next, struct kprobe, list);
- list_del(&p->list);
- kfree(old_p);
- }
+ if (list_empty(&p->list))
arch_remove_kprobe(p);
+ else if (list_is_singular(&p->list)) {
+ /* "p" is the last child of an aggr_kprobe */
+ old_p = list_entry(p->list.next, struct kprobe, list);
+ list_del(&p->list);
+ arch_remove_kprobe(old_p);
+ kfree(old_p);
}
}
-static int __register_kprobes(struct kprobe **kps, int num,
- unsigned long called_from)
+int __kprobes register_kprobes(struct kprobe **kps, int num)
{
int i, ret = 0;
if (num <= 0)
return -EINVAL;
for (i = 0; i < num; i++) {
- ret = __register_kprobe(kps[i], called_from);
+ ret = register_kprobe(kps[i]);
if (ret < 0) {
if (i > 0)
unregister_kprobes(kps, i);
@@ -763,26 +797,11 @@ static int __register_kprobes(struct kprobe **kps, int num,
return ret;
}
-/*
- * Registration and unregistration functions for kprobe.
- */
-int __kprobes register_kprobe(struct kprobe *p)
-{
- return __register_kprobes(&p, 1,
- (unsigned long)__builtin_return_address(0));
-}
-
void __kprobes unregister_kprobe(struct kprobe *p)
{
unregister_kprobes(&p, 1);
}
-int __kprobes register_kprobes(struct kprobe **kps, int num)
-{
- return __register_kprobes(kps, num,
- (unsigned long)__builtin_return_address(0));
-}
-
void __kprobes unregister_kprobes(struct kprobe **kps, int num)
{
int i;
@@ -811,8 +830,7 @@ unsigned long __weak arch_deref_entry_point(void *entry)
return (unsigned long)entry;
}
-static int __register_jprobes(struct jprobe **jps, int num,
- unsigned long called_from)
+int __kprobes register_jprobes(struct jprobe **jps, int num)
{
struct jprobe *jp;
int ret = 0, i;
@@ -830,7 +848,7 @@ static int __register_jprobes(struct jprobe **jps, int num,
/* Todo: Verify probepoint is a function entry point */
jp->kp.pre_handler = setjmp_pre_handler;
jp->kp.break_handler = longjmp_break_handler;
- ret = __register_kprobe(&jp->kp, called_from);
+ ret = register_kprobe(&jp->kp);
}
if (ret < 0) {
if (i > 0)
@@ -843,8 +861,7 @@ static int __register_jprobes(struct jprobe **jps, int num,
int __kprobes register_jprobe(struct jprobe *jp)
{
- return __register_jprobes(&jp, 1,
- (unsigned long)__builtin_return_address(0));
+ return register_jprobes(&jp, 1);
}
void __kprobes unregister_jprobe(struct jprobe *jp)
@@ -852,12 +869,6 @@ void __kprobes unregister_jprobe(struct jprobe *jp)
unregister_jprobes(&jp, 1);
}
-int __kprobes register_jprobes(struct jprobe **jps, int num)
-{
- return __register_jprobes(jps, num,
- (unsigned long)__builtin_return_address(0));
-}
-
void __kprobes unregister_jprobes(struct jprobe **jps, int num)
{
int i;
@@ -920,8 +931,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
return 0;
}
-static int __kprobes __register_kretprobe(struct kretprobe *rp,
- unsigned long called_from)
+int __kprobes register_kretprobe(struct kretprobe *rp)
{
int ret = 0;
struct kretprobe_instance *inst;
@@ -967,21 +977,20 @@ static int __kprobes __register_kretprobe(struct kretprobe *rp,
rp->nmissed = 0;
/* Establish function entry probe point */
- ret = __register_kprobe(&rp->kp, called_from);
+ ret = register_kprobe(&rp->kp);
if (ret != 0)
free_rp_inst(rp);
return ret;
}
-static int __register_kretprobes(struct kretprobe **rps, int num,
- unsigned long called_from)
+int __kprobes register_kretprobes(struct kretprobe **rps, int num)
{
int ret = 0, i;
if (num <= 0)
return -EINVAL;
for (i = 0; i < num; i++) {
- ret = __register_kretprobe(rps[i], called_from);
+ ret = register_kretprobe(rps[i]);
if (ret < 0) {
if (i > 0)
unregister_kretprobes(rps, i);
@@ -991,23 +1000,11 @@ static int __register_kretprobes(struct kretprobe **rps, int num,
return ret;
}
-int __kprobes register_kretprobe(struct kretprobe *rp)
-{
- return __register_kretprobes(&rp, 1,
- (unsigned long)__builtin_return_address(0));
-}
-
void __kprobes unregister_kretprobe(struct kretprobe *rp)
{
unregister_kretprobes(&rp, 1);
}
-int __kprobes register_kretprobes(struct kretprobe **rps, int num)
-{
- return __register_kretprobes(rps, num,
- (unsigned long)__builtin_return_address(0));
-}
-
void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
{
int i;
@@ -1055,6 +1052,72 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
#endif /* CONFIG_KRETPROBES */
+/* Set the kprobe gone and remove its instruction buffer. */
+static void __kprobes kill_kprobe(struct kprobe *p)
+{
+ struct kprobe *kp;
+ p->flags |= KPROBE_FLAG_GONE;
+ if (p->pre_handler == aggr_pre_handler) {
+ /*
+ * If this is an aggr_kprobe, we have to list all the
+ * chained probes and mark them GONE.
+ */
+ list_for_each_entry_rcu(kp, &p->list, list)
+ kp->flags |= KPROBE_FLAG_GONE;
+ p->post_handler = NULL;
+ p->break_handler = NULL;
+ }
+ /*
+ * Here, we can remove insn_slot safely, because no thread calls
+ * the original probed function (which will be freed soon) any more.
+ */
+ arch_remove_kprobe(p);
+}
+
+/* Module notifier call back, checking kprobes on the module */
+static int __kprobes kprobes_module_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct module *mod = data;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct kprobe *p;
+ unsigned int i;
+ int checkcore = (val == MODULE_STATE_GOING);
+
+ if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
+ return NOTIFY_DONE;
+
+ /*
+ * When MODULE_STATE_GOING was notified, both of module .text and
+ * .init.text sections would be freed. When MODULE_STATE_LIVE was
+ * notified, only .init.text section would be freed. We need to
+ * disable kprobes which have been inserted in the sections.
+ */
+ mutex_lock(&kprobe_mutex);
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ head = &kprobe_table[i];
+ hlist_for_each_entry_rcu(p, node, head, hlist)
+ if (within_module_init((unsigned long)p->addr, mod) ||
+ (checkcore &&
+ within_module_core((unsigned long)p->addr, mod))) {
+ /*
+ * The vaddr this probe is installed will soon
+ * be vfreed buy not synced to disk. Hence,
+ * disarming the breakpoint isn't needed.
+ */
+ kill_kprobe(p);
+ }
+ }
+ mutex_unlock(&kprobe_mutex);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block kprobe_module_nb = {
+ .notifier_call = kprobes_module_callback,
+ .priority = 0
+};
+
static int __init init_kprobes(void)
{
int i, err = 0;
@@ -1111,6 +1174,9 @@ static int __init init_kprobes(void)
err = arch_init_kprobes();
if (!err)
err = register_die_notifier(&kprobe_exceptions_nb);
+ if (!err)
+ err = register_module_notifier(&kprobe_module_nb);
+
kprobes_initialized = (err == 0);
if (!err)
@@ -1131,10 +1197,12 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
else
kprobe_type = "k";
if (sym)
- seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type,
- sym, offset, (modname ? modname : " "));
+ seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type,
+ sym, offset, (modname ? modname : " "),
+ (kprobe_gone(p) ? "[GONE]" : ""));
else
- seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr);
+ seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr,
+ (kprobe_gone(p) ? "[GONE]" : ""));
}
static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -1215,7 +1283,8 @@ static void __kprobes enable_all_kprobes(void)
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
- arch_arm_kprobe(p);
+ if (!kprobe_gone(p))
+ arch_arm_kprobe(p);
}
kprobe_enabled = true;
@@ -1244,7 +1313,7 @@ static void __kprobes disable_all_kprobes(void)
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) {
- if (!arch_trampoline_kprobe(p))
+ if (!arch_trampoline_kprobe(p) && !kprobe_gone(p))
arch_disarm_kprobe(p);
}
}
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 08dd8ed86c77..528dd78e7e7e 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -24,7 +24,7 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
static struct kobj_attribute _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
-#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
+#if defined(CONFIG_HOTPLUG)
/* current uevent sequence number */
static ssize_t uevent_seqnum_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -137,7 +137,7 @@ struct kobject *kernel_kobj;
EXPORT_SYMBOL_GPL(kernel_kobj);
static struct attribute * kernel_attrs[] = {
-#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
+#if defined(CONFIG_HOTPLUG)
&uevent_seqnum_attr.attr,
&uevent_helper_attr.attr,
#endif
diff --git a/kernel/module.c b/kernel/module.c
index f47cce910f25..c9332c90d5a0 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -43,7 +43,6 @@
#include <linux/device.h>
#include <linux/string.h>
#include <linux/mutex.h>
-#include <linux/unwind.h>
#include <linux/rculist.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
@@ -51,6 +50,7 @@
#include <asm/sections.h>
#include <linux/tracepoint.h>
#include <linux/ftrace.h>
+#include <linux/async.h>
#if 0
#define DEBUGP printk
@@ -817,6 +817,7 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
mod->exit();
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod);
+ async_synchronize_full();
mutex_lock(&module_mutex);
/* Store the name of the last unloaded module for diagnostic purposes */
strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
@@ -1449,8 +1450,6 @@ static void free_module(struct module *mod)
remove_sect_attrs(mod);
mod_kobject_remove(mod);
- unwind_remove_table(mod->unwind_info, 0);
-
/* Arch-specific cleanup. */
module_arch_cleanup(mod);
@@ -1867,7 +1866,6 @@ static noinline struct module *load_module(void __user *umod,
unsigned int symindex = 0;
unsigned int strindex = 0;
unsigned int modindex, versindex, infoindex, pcpuindex;
- unsigned int unwindex = 0;
unsigned int num_kp, num_mcount;
struct kernel_param *kp;
struct module *mod;
@@ -1957,9 +1955,6 @@ static noinline struct module *load_module(void __user *umod,
versindex = find_sec(hdr, sechdrs, secstrings, "__versions");
infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo");
pcpuindex = find_pcpusec(hdr, sechdrs, secstrings);
-#ifdef ARCH_UNWIND_SECTION_NAME
- unwindex = find_sec(hdr, sechdrs, secstrings, ARCH_UNWIND_SECTION_NAME);
-#endif
/* Don't keep modinfo and version sections. */
sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
@@ -1969,8 +1964,6 @@ static noinline struct module *load_module(void __user *umod,
sechdrs[symindex].sh_flags |= SHF_ALLOC;
sechdrs[strindex].sh_flags |= SHF_ALLOC;
#endif
- if (unwindex)
- sechdrs[unwindex].sh_flags |= SHF_ALLOC;
/* Check module struct version now, before we try to use module. */
if (!check_modstruct_version(sechdrs, versindex, mod)) {
@@ -2267,11 +2260,6 @@ static noinline struct module *load_module(void __user *umod,
add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
add_notes_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
- /* Size of section 0 is 0, so this works well if no unwind info. */
- mod->unwind_info = unwind_add_table(mod,
- (void *)sechdrs[unwindex].sh_addr,
- sechdrs[unwindex].sh_size);
-
/* Get rid of temporary copy */
vfree(hdr);
@@ -2366,11 +2354,12 @@ sys_init_module(void __user *umod,
/* Now it's a first class citizen! Wake up anyone waiting for it. */
mod->state = MODULE_STATE_LIVE;
wake_up(&module_wq);
+ blocking_notifier_call_chain(&module_notify_list,
+ MODULE_STATE_LIVE, mod);
mutex_lock(&module_mutex);
/* Drop initial reference. */
module_put(mod);
- unwind_remove_table(mod->unwind_info, 1);
module_free(mod, mod->module_init);
mod->module_init = NULL;
mod->init_size = 0;
@@ -2405,7 +2394,7 @@ static const char *get_ksymbol(struct module *mod,
unsigned long nextval;
/* At worse, next value is at end of module */
- if (within(addr, mod->module_init, mod->init_size))
+ if (within_module_init(addr, mod))
nextval = (unsigned long)mod->module_init+mod->init_text_size;
else
nextval = (unsigned long)mod->module_core+mod->core_text_size;
@@ -2453,8 +2442,8 @@ const char *module_address_lookup(unsigned long addr,
preempt_disable();
list_for_each_entry_rcu(mod, &modules, list) {
- if (within(addr, mod->module_init, mod->init_size)
- || within(addr, mod->module_core, mod->core_size)) {
+ if (within_module_init(addr, mod) ||
+ within_module_core(addr, mod)) {
if (modname)
*modname = mod->name;
ret = get_ksymbol(mod, addr, size, offset);
@@ -2476,8 +2465,8 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
preempt_disable();
list_for_each_entry_rcu(mod, &modules, list) {
- if (within(addr, mod->module_init, mod->init_size) ||
- within(addr, mod->module_core, mod->core_size)) {
+ if (within_module_init(addr, mod) ||
+ within_module_core(addr, mod)) {
const char *sym;
sym = get_ksymbol(mod, addr, NULL, NULL);
@@ -2500,8 +2489,8 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
preempt_disable();
list_for_each_entry_rcu(mod, &modules, list) {
- if (within(addr, mod->module_init, mod->init_size) ||
- within(addr, mod->module_core, mod->core_size)) {
+ if (within_module_init(addr, mod) ||
+ within_module_core(addr, mod)) {
const char *sym;
sym = get_ksymbol(mod, addr, size, offset);
@@ -2720,7 +2709,7 @@ int is_module_address(unsigned long addr)
preempt_disable();
list_for_each_entry_rcu(mod, &modules, list) {
- if (within(addr, mod->module_core, mod->core_size)) {
+ if (within_module_core(addr, mod)) {
preempt_enable();
return 1;
}
diff --git a/kernel/panic.c b/kernel/panic.c
index 13f06349a786..2a2ff36ff44d 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -299,6 +299,8 @@ static int init_oops_id(void)
{
if (!oops_id)
get_random_bytes(&oops_id, sizeof(oops_id));
+ else
+ oops_id++;
return 0;
}
diff --git a/kernel/pid.c b/kernel/pid.c
index 064e76afa507..af9224cdd6c0 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -475,7 +475,7 @@ pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
EXPORT_SYMBOL(task_session_nr_ns);
/*
- * Used by proc to find the first pid that is greater then or equal to nr.
+ * Used by proc to find the first pid that is greater than or equal to nr.
*
* If there is a pid at nr this function is exactly the same as find_pid_ns.
*/
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 613f16941b85..239988873971 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -615,7 +615,7 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
/* this may fail if the RTC hasn't been initialized */
status = rtc_read_time(rtc, &alm.time);
if (status < 0) {
- printk(err_readtime, rtc->dev.bus_id, status);
+ printk(err_readtime, dev_name(&rtc->dev), status);
return;
}
rtc_tm_to_time(&alm.time, &now);
@@ -626,7 +626,7 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
status = rtc_set_alarm(rtc, &alm);
if (status < 0) {
- printk(err_wakealarm, rtc->dev.bus_id, status);
+ printk(err_wakealarm, dev_name(&rtc->dev), status);
return;
}
@@ -660,7 +660,7 @@ static int __init has_wakealarm(struct device *dev, void *name_ptr)
if (!device_may_wakeup(candidate->dev.parent))
return 0;
- *(char **)name_ptr = dev->bus_id;
+ *(const char **)name_ptr = dev_name(dev);
return 1;
}
diff --git a/kernel/printk.c b/kernel/printk.c
index e651ab05655f..7015733793e8 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -619,7 +619,7 @@ static int acquire_console_semaphore_for_printk(unsigned int cpu)
static const char recursion_bug_msg [] =
KERN_CRIT "BUG: recent printk recursion!\n";
static int recursion_bug;
- static int new_text_line = 1;
+static int new_text_line = 1;
static char printk_buf[1024];
asmlinkage int vprintk(const char *fmt, va_list args)
diff --git a/kernel/profile.c b/kernel/profile.c
index d18e2d2654f2..784933acf5b8 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -445,7 +445,6 @@ void profile_tick(int type)
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <asm/uaccess.h>
-#include <asm/ptrace.h>
static int prof_cpu_mask_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index ad63af8b2521..d92a76a881aa 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -77,8 +77,15 @@ void wakeme_after_rcu(struct rcu_head *head)
* sections are delimited by rcu_read_lock() and rcu_read_unlock(),
* and may be nested.
*/
-void synchronize_rcu(void); /* Makes kernel-doc tools happy */
-synchronize_rcu_xxx(synchronize_rcu, call_rcu)
+void synchronize_rcu(void)
+{
+ struct rcu_synchronize rcu;
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ call_rcu(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+}
EXPORT_SYMBOL_GPL(synchronize_rcu);
static void rcu_barrier_callback(struct rcu_head *notused)
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index f9dc8f3720f6..33cfc50781f9 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -1177,7 +1177,16 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
* in -rt this does -not- necessarily result in all currently executing
* interrupt -handlers- having completed.
*/
-synchronize_rcu_xxx(__synchronize_sched, call_rcu_sched)
+void __synchronize_sched(void)
+{
+ struct rcu_synchronize rcu;
+
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ call_rcu_sched(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+}
EXPORT_SYMBOL_GPL(__synchronize_sched);
/*
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 3245b40952c6..1cff28db56b6 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -136,7 +136,7 @@ static int stutter_pause_test = 0;
#endif
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
-#define FULLSTOP_SIGNALED 1 /* Bail due to signal. */
+#define FULLSTOP_SHUTDOWN 1 /* Bail due to system shutdown/panic. */
#define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */
static int fullstop; /* stop generating callbacks at test end. */
DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */
@@ -151,12 +151,10 @@ rcutorture_shutdown_notify(struct notifier_block *unused1,
{
if (fullstop)
return NOTIFY_DONE;
- if (signal_pending(current)) {
- mutex_lock(&fullstop_mutex);
- if (!ACCESS_ONCE(fullstop))
- fullstop = FULLSTOP_SIGNALED;
- mutex_unlock(&fullstop_mutex);
- }
+ mutex_lock(&fullstop_mutex);
+ if (!fullstop)
+ fullstop = FULLSTOP_SHUTDOWN;
+ mutex_unlock(&fullstop_mutex);
return NOTIFY_DONE;
}
@@ -624,7 +622,7 @@ rcu_torture_writer(void *arg)
rcu_stutter_wait();
} while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
- while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
+ while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
schedule_timeout_uninterruptible(1);
return 0;
}
@@ -649,7 +647,7 @@ rcu_torture_fakewriter(void *arg)
} while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
- while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
+ while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
schedule_timeout_uninterruptible(1);
return 0;
}
@@ -759,7 +757,7 @@ rcu_torture_reader(void *arg)
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
if (irqreader && cur_ops->irqcapable)
del_timer_sync(&t);
- while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
+ while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
schedule_timeout_uninterruptible(1);
return 0;
}
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index a342b032112c..f2d8638e6c60 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -79,7 +79,10 @@ struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
#ifdef CONFIG_NO_HZ
-DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks);
+DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
+ .dynticks_nesting = 1,
+ .dynticks = 1,
+};
#endif /* #ifdef CONFIG_NO_HZ */
static int blimit = 10; /* Maximum callbacks per softirq. */
@@ -572,6 +575,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
/* Special-case the common single-level case. */
if (NUM_RCU_NODES == 1) {
rnp->qsmask = rnp->qsmaskinit;
+ rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
@@ -1379,13 +1383,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
static void __cpuinit rcu_online_cpu(int cpu)
{
-#ifdef CONFIG_NO_HZ
- struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
-
- rdtp->dynticks_nesting = 1;
- rdtp->dynticks |= 1; /* need consecutive #s even for hotplug. */
- rdtp->dynticks_nmi = (rdtp->dynticks_nmi + 1) & ~0x1;
-#endif /* #ifdef CONFIG_NO_HZ */
rcu_init_percpu_data(cpu, &rcu_state);
rcu_init_percpu_data(cpu, &rcu_bh_state);
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
diff --git a/kernel/resource.c b/kernel/resource.c
index e633106b12f6..ca6a1536b205 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -623,7 +623,7 @@ resource_size_t resource_alignment(struct resource *res)
*/
struct resource * __request_region(struct resource *parent,
resource_size_t start, resource_size_t n,
- const char *name)
+ const char *name, int flags)
{
struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
@@ -634,6 +634,7 @@ struct resource * __request_region(struct resource *parent,
res->start = start;
res->end = start + n - 1;
res->flags = IORESOURCE_BUSY;
+ res->flags |= flags;
write_lock(&resource_lock);
@@ -679,7 +680,7 @@ int __check_region(struct resource *parent, resource_size_t start,
{
struct resource * res;
- res = __request_region(parent, start, n, "check-region");
+ res = __request_region(parent, start, n, "check-region", 0);
if (!res)
return -EBUSY;
@@ -776,7 +777,7 @@ struct resource * __devm_request_region(struct device *dev,
dr->start = start;
dr->n = n;
- res = __request_region(parent, start, n, name);
+ res = __request_region(parent, start, n, name, 0);
if (res)
devres_add(dev, dr);
else
@@ -876,3 +877,57 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
return err;
}
+
+#ifdef CONFIG_STRICT_DEVMEM
+static int strict_iomem_checks = 1;
+#else
+static int strict_iomem_checks;
+#endif
+
+/*
+ * check if an address is reserved in the iomem resource tree
+ * returns 1 if reserved, 0 if not reserved.
+ */
+int iomem_is_exclusive(u64 addr)
+{
+ struct resource *p = &iomem_resource;
+ int err = 0;
+ loff_t l;
+ int size = PAGE_SIZE;
+
+ if (!strict_iomem_checks)
+ return 0;
+
+ addr = addr & PAGE_MASK;
+
+ read_lock(&resource_lock);
+ for (p = p->child; p ; p = r_next(NULL, p, &l)) {
+ /*
+ * We can probably skip the resources without
+ * IORESOURCE_IO attribute?
+ */
+ if (p->start >= addr + size)
+ break;
+ if (p->end < addr)
+ continue;
+ if (p->flags & IORESOURCE_BUSY &&
+ p->flags & IORESOURCE_EXCLUSIVE) {
+ err = 1;
+ break;
+ }
+ }
+ read_unlock(&resource_lock);
+
+ return err;
+}
+
+static int __init strict_iomem(char *str)
+{
+ if (strstr(str, "relaxed"))
+ strict_iomem_checks = 0;
+ if (strstr(str, "strict"))
+ strict_iomem_checks = 1;
+ return 1;
+}
+
+__setup("iomem=", strict_iomem);
diff --git a/kernel/sched.c b/kernel/sched.c
index 545c6fccd1dc..deb5ac8c12f3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3728,8 +3728,13 @@ redo:
}
double_unlock_balance(this_rq, busiest);
+ /*
+ * Should not call ttwu while holding a rq->lock
+ */
+ spin_unlock(&this_rq->lock);
if (active_balance)
wake_up_process(busiest->migration_thread);
+ spin_lock(&this_rq->lock);
} else
sd->nr_balance_failed = 0;
@@ -6957,7 +6962,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
spin_unlock_irqrestore(&rq->lock, flags);
}
-static int init_rootdomain(struct root_domain *rd, bool bootmem)
+static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
{
memset(rd, 0, sizeof(*rd));
@@ -6970,7 +6975,7 @@ static int init_rootdomain(struct root_domain *rd, bool bootmem)
}
if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
- goto free_rd;
+ goto out;
if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
goto free_span;
if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
@@ -6986,8 +6991,7 @@ free_online:
free_cpumask_var(rd->online);
free_span:
free_cpumask_var(rd->span);
-free_rd:
- kfree(rd);
+out:
return -ENOMEM;
}
@@ -7987,7 +7991,7 @@ match2:
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
-int arch_reinit_sched_domains(void)
+static void arch_reinit_sched_domains(void)
{
get_online_cpus();
@@ -7996,13 +8000,10 @@ int arch_reinit_sched_domains(void)
rebuild_sched_domains();
put_online_cpus();
-
- return 0;
}
static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
{
- int ret;
unsigned int level = 0;
if (sscanf(buf, "%u", &level) != 1)
@@ -8023,9 +8024,9 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
else
sched_mc_power_savings = level;
- ret = arch_reinit_sched_domains();
+ arch_reinit_sched_domains();
- return ret ? ret : count;
+ return count;
}
#ifdef CONFIG_SCHED_MC
@@ -8060,7 +8061,7 @@ static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
sched_smt_power_savings_store);
#endif
-int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
+int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
{
int err = 0;
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index e8ab096ddfe3..a0b0852414cc 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -124,7 +124,7 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
clock = scd->tick_gtod + delta;
min_clock = wrap_max(scd->tick_gtod, scd->clock);
- max_clock = scd->tick_gtod + TICK_NSEC;
+ max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
clock = wrap_max(clock, min_clock);
clock = wrap_min(clock, max_clock);
@@ -227,6 +227,9 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
*/
void sched_clock_idle_wakeup_event(u64 delta_ns)
{
+ if (timekeeping_suspended)
+ return;
+
sched_clock_tick();
touch_softlockup_watchdog();
}
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 018b7be1db2e..1e00bfacf9b8 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -151,7 +151,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
*
* Returns: -ENOMEM if memory fails.
*/
-int cpupri_init(struct cpupri *cp, bool bootmem)
+int __init_refok cpupri_init(struct cpupri *cp, bool bootmem)
{
int i;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 56c0efe902a7..e0c0b4bc3f08 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -386,20 +386,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
#endif
/*
- * delta *= P[w / rw]
- */
-static inline unsigned long
-calc_delta_weight(unsigned long delta, struct sched_entity *se)
-{
- for_each_sched_entity(se) {
- delta = calc_delta_mine(delta,
- se->load.weight, &cfs_rq_of(se)->load);
- }
-
- return delta;
-}
-
-/*
* delta /= w
*/
static inline unsigned long
@@ -440,12 +426,20 @@ static u64 __sched_period(unsigned long nr_running)
*/
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- unsigned long nr_running = cfs_rq->nr_running;
+ u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
- if (unlikely(!se->on_rq))
- nr_running++;
+ for_each_sched_entity(se) {
+ struct load_weight *load = &cfs_rq->load;
- return calc_delta_weight(__sched_period(nr_running), se);
+ if (unlikely(!se->on_rq)) {
+ struct load_weight lw = cfs_rq->load;
+
+ update_load_add(&lw, se->load.weight);
+ load = &lw;
+ }
+ slice = calc_delta_mine(slice, se->load.weight, load);
+ }
+ return slice;
}
/*
diff --git a/kernel/signal.c b/kernel/signal.c
index 8e95855ff3cf..3152ac3b62e2 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -858,7 +858,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_USER;
- q->info.si_pid = task_pid_vnr(current);
+ q->info.si_pid = task_tgid_nr_ns(current,
+ task_active_pid_ns(t));
q->info.si_uid = current_uid();
break;
case (unsigned long) SEND_SIG_PRIV:
diff --git a/kernel/sys.c b/kernel/sys.c
index d356d79e84ac..763c3c17ded3 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -33,6 +33,7 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/seccomp.h>
#include <linux/cpu.h>
+#include <linux/ptrace.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
@@ -927,6 +928,7 @@ asmlinkage long sys_times(struct tms __user * tbuf)
if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
return -EFAULT;
}
+ force_successful_syscall_return();
return (long) jiffies_64_to_clock_t(get_jiffies_64());
}
@@ -1627,6 +1629,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
utime = stime = cputime_zero;
if (who == RUSAGE_THREAD) {
+ utime = task_utime(current);
+ stime = task_stime(current);
accumulate_thread_rusage(p, r);
goto out;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ff6d45c7626f..92f6e5bc3c24 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -87,10 +87,6 @@ extern int rcutorture_runnable;
#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
/* Constants used for minimum and maximum */
-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_DETECT_SOFTLOCKUP)
-static int one = 1;
-#endif
-
#ifdef CONFIG_DETECT_SOFTLOCKUP
static int sixty = 60;
static int neg_one = -1;
@@ -101,6 +97,7 @@ static int two = 2;
#endif
static int zero;
+static int one = 1;
static int one_hundred = 100;
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
@@ -952,12 +949,22 @@ static struct ctl_table vm_table[] = {
.data = &dirty_background_ratio,
.maxlen = sizeof(dirty_background_ratio),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = &dirty_background_ratio_handler,
.strategy = &sysctl_intvec,
.extra1 = &zero,
.extra2 = &one_hundred,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "dirty_background_bytes",
+ .data = &dirty_background_bytes,
+ .maxlen = sizeof(dirty_background_bytes),
+ .mode = 0644,
+ .proc_handler = &dirty_background_bytes_handler,
+ .strategy = &sysctl_intvec,
+ .extra1 = &one,
+ },
+ {
.ctl_name = VM_DIRTY_RATIO,
.procname = "dirty_ratio",
.data = &vm_dirty_ratio,
@@ -969,6 +976,16 @@ static struct ctl_table vm_table[] = {
.extra2 = &one_hundred,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "dirty_bytes",
+ .data = &vm_dirty_bytes,
+ .maxlen = sizeof(vm_dirty_bytes),
+ .mode = 0644,
+ .proc_handler = &dirty_bytes_handler,
+ .strategy = &sysctl_intvec,
+ .extra1 = &one,
+ },
+ {
.procname = "dirty_writeback_centisecs",
.data = &dirty_writeback_interval,
.maxlen = sizeof(dirty_writeback_interval),
diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index 06b6395b45b2..4f104515a19b 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -22,21 +22,11 @@
static u32 rand1, preh_val, posth_val, jph_val;
static int errors, handler_errors, num_tests;
+static u32 (*target)(u32 value);
+static u32 (*target2)(u32 value);
static noinline u32 kprobe_target(u32 value)
{
- /*
- * gcc ignores noinline on some architectures unless we stuff
- * sufficient lard into the function. The get_kprobe() here is
- * just for that.
- *
- * NOTE: We aren't concerned about the correctness of get_kprobe()
- * here; hence, this call is neither under !preempt nor with the
- * kprobe_mutex held. This is fine(tm)
- */
- if (get_kprobe((void *)0xdeadbeef))
- printk(KERN_INFO "Kprobe smoke test: probe on 0xdeadbeef!\n");
-
return (value / div_factor);
}
@@ -74,7 +64,7 @@ static int test_kprobe(void)
return ret;
}
- ret = kprobe_target(rand1);
+ ret = target(rand1);
unregister_kprobe(&kp);
if (preh_val == 0) {
@@ -92,6 +82,84 @@ static int test_kprobe(void)
return 0;
}
+static noinline u32 kprobe_target2(u32 value)
+{
+ return (value / div_factor) + 1;
+}
+
+static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs)
+{
+ preh_val = (rand1 / div_factor) + 1;
+ return 0;
+}
+
+static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ if (preh_val != (rand1 / div_factor) + 1) {
+ handler_errors++;
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "incorrect value in post_handler2\n");
+ }
+ posth_val = preh_val + div_factor;
+}
+
+static struct kprobe kp2 = {
+ .symbol_name = "kprobe_target2",
+ .pre_handler = kp_pre_handler2,
+ .post_handler = kp_post_handler2
+};
+
+static int test_kprobes(void)
+{
+ int ret;
+ struct kprobe *kps[2] = {&kp, &kp2};
+
+ kp.addr = 0; /* addr should be cleard for reusing kprobe. */
+ ret = register_kprobes(kps, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "register_kprobes returned %d\n", ret);
+ return ret;
+ }
+
+ preh_val = 0;
+ posth_val = 0;
+ ret = target(rand1);
+
+ if (preh_val == 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "kprobe pre_handler not called\n");
+ handler_errors++;
+ }
+
+ if (posth_val == 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "kprobe post_handler not called\n");
+ handler_errors++;
+ }
+
+ preh_val = 0;
+ posth_val = 0;
+ ret = target2(rand1);
+
+ if (preh_val == 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "kprobe pre_handler2 not called\n");
+ handler_errors++;
+ }
+
+ if (posth_val == 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "kprobe post_handler2 not called\n");
+ handler_errors++;
+ }
+
+ unregister_kprobes(kps, 2);
+ return 0;
+
+}
+
static u32 j_kprobe_target(u32 value)
{
if (value != rand1) {
@@ -121,7 +189,7 @@ static int test_jprobe(void)
return ret;
}
- ret = kprobe_target(rand1);
+ ret = target(rand1);
unregister_jprobe(&jp);
if (jph_val == 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
@@ -132,6 +200,43 @@ static int test_jprobe(void)
return 0;
}
+static struct jprobe jp2 = {
+ .entry = j_kprobe_target,
+ .kp.symbol_name = "kprobe_target2"
+};
+
+static int test_jprobes(void)
+{
+ int ret;
+ struct jprobe *jps[2] = {&jp, &jp2};
+
+ jp.kp.addr = 0; /* addr should be cleard for reusing kprobe. */
+ ret = register_jprobes(jps, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "register_jprobes returned %d\n", ret);
+ return ret;
+ }
+
+ jph_val = 0;
+ ret = target(rand1);
+ if (jph_val == 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "jprobe handler not called\n");
+ handler_errors++;
+ }
+
+ jph_val = 0;
+ ret = target2(rand1);
+ if (jph_val == 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "jprobe handler2 not called\n");
+ handler_errors++;
+ }
+ unregister_jprobes(jps, 2);
+
+ return 0;
+}
#ifdef CONFIG_KRETPROBES
static u32 krph_val;
@@ -177,7 +282,7 @@ static int test_kretprobe(void)
return ret;
}
- ret = kprobe_target(rand1);
+ ret = target(rand1);
unregister_kretprobe(&rp);
if (krph_val != rand1) {
printk(KERN_ERR "Kprobe smoke test failed: "
@@ -187,12 +292,72 @@ static int test_kretprobe(void)
return 0;
}
+
+static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long ret = regs_return_value(regs);
+
+ if (ret != (rand1 / div_factor) + 1) {
+ handler_errors++;
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "incorrect value in kretprobe handler2\n");
+ }
+ if (krph_val == 0) {
+ handler_errors++;
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "call to kretprobe entry handler failed\n");
+ }
+
+ krph_val = rand1;
+ return 0;
+}
+
+static struct kretprobe rp2 = {
+ .handler = return_handler2,
+ .entry_handler = entry_handler,
+ .kp.symbol_name = "kprobe_target2"
+};
+
+static int test_kretprobes(void)
+{
+ int ret;
+ struct kretprobe *rps[2] = {&rp, &rp2};
+
+ rp.kp.addr = 0; /* addr should be cleard for reusing kprobe. */
+ ret = register_kretprobes(rps, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "register_kretprobe returned %d\n", ret);
+ return ret;
+ }
+
+ krph_val = 0;
+ ret = target(rand1);
+ if (krph_val != rand1) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "kretprobe handler not called\n");
+ handler_errors++;
+ }
+
+ krph_val = 0;
+ ret = target2(rand1);
+ if (krph_val != rand1) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "kretprobe handler2 not called\n");
+ handler_errors++;
+ }
+ unregister_kretprobes(rps, 2);
+ return 0;
+}
#endif /* CONFIG_KRETPROBES */
int init_test_probes(void)
{
int ret;
+ target = kprobe_target;
+ target2 = kprobe_target2;
+
do {
rand1 = random32();
} while (rand1 <= div_factor);
@@ -204,15 +369,30 @@ int init_test_probes(void)
errors++;
num_tests++;
+ ret = test_kprobes();
+ if (ret < 0)
+ errors++;
+
+ num_tests++;
ret = test_jprobe();
if (ret < 0)
errors++;
+ num_tests++;
+ ret = test_jprobes();
+ if (ret < 0)
+ errors++;
+
#ifdef CONFIG_KRETPROBES
num_tests++;
ret = test_kretprobe();
if (ret < 0)
errors++;
+
+ num_tests++;
+ ret = test_kretprobes();
+ if (ret < 0)
+ errors++;
#endif /* CONFIG_KRETPROBES */
if (errors)
diff --git a/kernel/time.c b/kernel/time.c
index d63a4336fad6..4886e3ce83a4 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -37,6 +37,7 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/math64.h>
+#include <linux/ptrace.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -65,8 +66,9 @@ asmlinkage long sys_time(time_t __user * tloc)
if (tloc) {
if (put_user(i,tloc))
- i = -EFAULT;
+ return -EFAULT;
}
+ force_successful_syscall_return();
return i;
}
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 1ca99557e929..06f197560f3b 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -45,7 +45,7 @@
*
* The value 8 is somewhat carefully chosen, as anything
* larger can result in overflows. NSEC_PER_JIFFY grows as
- * HZ shrinks, so values greater then 8 overflow 32bits when
+ * HZ shrinks, so values greater than 8 overflow 32bits when
* HZ=100.
*/
#define JIFFIES_SHIFT 8
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index fa05e88aa76f..900f1b6598d1 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -46,6 +46,9 @@ struct timespec xtime __attribute__ ((aligned (16)));
struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
static unsigned long total_sleep_time; /* seconds */
+/* flag for if timekeeping is suspended */
+int __read_mostly timekeeping_suspended;
+
static struct timespec xtime_cache __attribute__ ((aligned (16)));
void update_xtime_cache(u64 nsec)
{
@@ -92,6 +95,8 @@ void getnstimeofday(struct timespec *ts)
unsigned long seq;
s64 nsecs;
+ WARN_ON(timekeeping_suspended);
+
do {
seq = read_seqbegin(&xtime_lock);
@@ -299,8 +304,6 @@ void __init timekeeping_init(void)
write_sequnlock_irqrestore(&xtime_lock, flags);
}
-/* flag for if timekeeping is suspended */
-static int timekeeping_suspended;
/* time in seconds when suspend began */
static unsigned long timekeeping_suspend_time;
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 2dc06ab35716..43f891b05a4b 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -92,8 +92,8 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
mm = get_task_mm(p);
if (mm) {
/* adjust to KB unit */
- stats->hiwater_rss = mm->hiwater_rss * PAGE_SIZE / KB;
- stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB;
+ stats->hiwater_rss = get_mm_hiwater_rss(mm) * PAGE_SIZE / KB;
+ stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB;
mmput(mm);
}
stats->read_char = p->ioac.rchar;