summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2013-08-23 14:12:22 +0200
committerTakashi Iwai <tiwai@suse.de>2013-08-23 14:12:22 +0200
commit68538bf2bce557c3b5fe8c59b034d45352500db1 (patch)
treea84b68990cadcbfc277acc7b7f2b75716750e203 /kernel
parenta8cc20999799a94929a56393ff39b32245e33d64 (diff)
parent43bcb402f84fe459102120b4e2d28d7117f16cd0 (diff)
downloadlinux-68538bf2bce557c3b5fe8c59b034d45352500db1.tar.gz
linux-68538bf2bce557c3b5fe8c59b034d45352500db1.tar.bz2
linux-68538bf2bce557c3b5fe8c59b034d45352500db1.zip
Merge tag 'asoc-v3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-next
ASoC: Updates for v3.12 - DAPM is now mandatory for CODEC drivers in order to avoid the repeated regressions in the special cases for non-DAPM CODECs and make it easier to integrate with other components on boards. All existing drivers have had some level of DAPM support added. - A lot of cleanups in DAPM plus support for maintaining controls in a specific state while a DAPM widget all contributed by Lars-Peter Clausen. - Core helpers for bitbanged AC'97 reset from Markus Pargmann. - New drivers and support for Analog Devices ADAU1702 and ADAU1401(a), Asahi Kasei Microdevices AK4554, Atmel AT91ASM9x5 and WM8904 based machines, Freescale S/PDIF and SSI AC'97, Renesas R-Car SoCs, Samsung Exynos5420 SoCs, Texas Instruments PCM1681 and PCM1792A and Wolfson Microelectronics WM8997. - Support for building drivers that can support it cross-platform for compile test.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/cgroup.c35
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/cpuset.c6
-rw-r--r--kernel/events/core.c6
-rw-r--r--kernel/fork.c8
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/mutex.c4
-rw-r--r--kernel/power/autosleep.c3
-rw-r--r--kernel/power/process.c11
-rw-r--r--kernel/power/qos.c20
-rw-r--r--kernel/printk/Makefile2
-rw-r--r--kernel/printk/braille.c49
-rw-r--r--kernel/printk/braille.h48
-rw-r--r--kernel/printk/console_cmdline.h14
-rw-r--r--kernel/printk/printk.c (renamed from kernel/printk.c)185
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/ptrace.c1
-rw-r--r--kernel/rcutorture.c6
-rw-r--r--kernel/rcutree.c6
-rw-r--r--kernel/rcutree.h4
-rw-r--r--kernel/rcutree_plugin.h6
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/sched/core.c108
-rw-r--r--kernel/sched/cpupri.c4
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/softirq.c8
-rw-r--r--kernel/sysctl.c6
-rw-r--r--kernel/time/tick-sched.c11
-rw-r--r--kernel/timer.c10
-rw-r--r--kernel/trace/ftrace.c105
-rw-r--r--kernel/trace/ring_buffer.c26
-rw-r--r--kernel/trace/trace.c254
-rw-r--r--kernel/trace/trace.h18
-rw-r--r--kernel/trace/trace_event_perf.c10
-rw-r--r--kernel/trace/trace_events.c292
-rw-r--r--kernel/trace/trace_events_filter.c21
-rw-r--r--kernel/trace/trace_functions.c2
-rw-r--r--kernel/trace/trace_functions_graph.c54
-rw-r--r--kernel/trace/trace_kprobe.c50
-rw-r--r--kernel/trace/trace_mmiotrace.c8
-rw-r--r--kernel/trace/trace_output.c14
-rw-r--r--kernel/trace/trace_syscalls.c26
-rw-r--r--kernel/trace/trace_uprobe.c53
-rw-r--r--kernel/user_namespace.c17
-rw-r--r--kernel/wait.c3
-rw-r--r--kernel/workqueue.c48
50 files changed, 971 insertions, 632 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 470839d1a30e..35ef1185e359 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
-obj-y = fork.o exec_domain.o panic.o printk.o \
+obj-y = fork.o exec_domain.o panic.o \
cpu.o exit.o itimer.o time.o softirq.o resource.o \
sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
@@ -24,6 +24,7 @@ endif
obj-y += sched/
obj-y += power/
+obj-y += printk/
obj-y += cpu/
obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 0e0b20b8c5db..781845a013ab 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1845,36 +1845,43 @@ out:
EXPORT_SYMBOL_GPL(cgroup_path);
/**
- * task_cgroup_path_from_hierarchy - cgroup path of a task on a hierarchy
+ * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
* @task: target task
- * @hierarchy_id: the hierarchy to look up @task's cgroup from
* @buf: the buffer to write the path into
* @buflen: the length of the buffer
*
- * Determine @task's cgroup on the hierarchy specified by @hierarchy_id and
- * copy its path into @buf. This function grabs cgroup_mutex and shouldn't
- * be used inside locks used by cgroup controller callbacks.
+ * Determine @task's cgroup on the first (the one with the lowest non-zero
+ * hierarchy_id) cgroup hierarchy and copy its path into @buf. This
+ * function grabs cgroup_mutex and shouldn't be used inside locks used by
+ * cgroup controller callbacks.
+ *
+ * Returns 0 on success, fails with -%ENAMETOOLONG if @buflen is too short.
*/
-int task_cgroup_path_from_hierarchy(struct task_struct *task, int hierarchy_id,
- char *buf, size_t buflen)
+int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
{
struct cgroupfs_root *root;
- struct cgroup *cgrp = NULL;
- int ret = -ENOENT;
+ struct cgroup *cgrp;
+ int hierarchy_id = 1, ret = 0;
+
+ if (buflen < 2)
+ return -ENAMETOOLONG;
mutex_lock(&cgroup_mutex);
- root = idr_find(&cgroup_hierarchy_idr, hierarchy_id);
+ root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
+
if (root) {
cgrp = task_cgroup_from_root(task, root);
ret = cgroup_path(cgrp, buf, buflen);
+ } else {
+ /* if no hierarchy exists, everyone is in "/" */
+ memcpy(buf, "/", 2);
}
mutex_unlock(&cgroup_mutex);
-
return ret;
}
-EXPORT_SYMBOL_GPL(task_cgroup_path_from_hierarchy);
+EXPORT_SYMBOL_GPL(task_cgroup_path);
/*
* Control Group taskset
@@ -4328,8 +4335,10 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
}
err = percpu_ref_init(&css->refcnt, css_release);
- if (err)
+ if (err) {
+ ss->css_free(cgrp);
goto err_free_all;
+ }
init_cgroup_css(css, ss, cgrp);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 198a38883e64..b2b227b82123 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -366,7 +366,7 @@ EXPORT_SYMBOL(cpu_down);
#endif /*CONFIG_HOTPLUG_CPU*/
/* Requires cpu_add_remove_lock to be held */
-static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
+static int _cpu_up(unsigned int cpu, int tasks_frozen)
{
int ret, nr_calls = 0;
void *hcpu = (void *)(long)cpu;
@@ -419,7 +419,7 @@ out:
return ret;
}
-int __cpuinit cpu_up(unsigned int cpu)
+int cpu_up(unsigned int cpu)
{
int err = 0;
@@ -618,7 +618,7 @@ core_initcall(cpu_hotplug_pm_sync_init);
* It must be called by the arch code on the new cpu, before the new cpu
* enables interrupts and before the "boot" cpu returns from __cpu_up().
*/
-void __cpuinit notify_cpu_starting(unsigned int cpu)
+void notify_cpu_starting(unsigned int cpu)
{
unsigned long val = CPU_STARTING;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index e5657788fedd..010a0083c0ae 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1608,11 +1608,13 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
{
struct cpuset *cs = cgroup_cs(cgrp);
cpuset_filetype_t type = cft->private;
- int retval = -ENODEV;
+ int retval = 0;
mutex_lock(&cpuset_mutex);
- if (!is_cpuset_online(cs))
+ if (!is_cpuset_online(cs)) {
+ retval = -ENODEV;
goto out_unlock;
+ }
switch (type) {
case FILE_CPU_EXCLUSIVE:
diff --git a/kernel/events/core.c b/kernel/events/core.c
index eba8fb5834ae..f86599e8c123 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6234,8 +6234,6 @@ perf_event_mux_interval_ms_store(struct device *dev,
return count;
}
-#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
-
static struct device_attribute pmu_dev_attrs[] = {
__ATTR_RO(type),
__ATTR_RW(perf_event_mux_interval_ms),
@@ -7630,7 +7628,7 @@ static void __init perf_event_init_all_cpus(void)
}
}
-static void __cpuinit perf_event_init_cpu(int cpu)
+static void perf_event_init_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
@@ -7719,7 +7717,7 @@ static struct notifier_block perf_reboot_notifier = {
.priority = INT_MIN,
};
-static int __cpuinit
+static int
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
diff --git a/kernel/fork.c b/kernel/fork.c
index 66635c80a813..e23bb19e2a3e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1546,7 +1546,7 @@ static inline void init_idle_pids(struct pid_link *links)
}
}
-struct task_struct * __cpuinit fork_idle(int cpu)
+struct task_struct *fork_idle(int cpu)
{
struct task_struct *task;
task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0);
@@ -1679,6 +1679,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
int __user *, parent_tidptr,
int __user *, child_tidptr,
int, tls_val)
+#elif defined(CONFIG_CLONE_BACKWARDS3)
+SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
+ int, stack_size,
+ int __user *, parent_tidptr,
+ int __user *, child_tidptr,
+ int, tls_val)
#else
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
int __user *, parent_tidptr,
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 8b2afc1c9df0..b462fa197517 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -33,7 +33,7 @@ static DEFINE_SPINLOCK(freezer_lock);
*/
bool freezing_slow_path(struct task_struct *p)
{
- if (p->flags & PF_NOFREEZE)
+ if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
return false;
if (pm_nosig_freezing || cgroup_freezing(p))
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f0f4fe29cd21..383319bae3f7 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1659,7 +1659,7 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
/*
* Functions related to boot-time initialization:
*/
-static void __cpuinit init_hrtimers_cpu(int cpu)
+static void init_hrtimers_cpu(int cpu)
{
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
@@ -1740,7 +1740,7 @@ static void migrate_hrtimers(int scpu)
#endif /* CONFIG_HOTPLUG_CPU */
-static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
+static int hrtimer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
int scpu = (long)hcpu;
@@ -1773,7 +1773,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata hrtimers_nb = {
+static struct notifier_block hrtimers_nb = {
.notifier_call = hrtimer_cpu_notify,
};
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ff05f4bd86eb..a52ee7bb830d 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -686,7 +686,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
might_sleep();
ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
0, &ctx->dep_map, _RET_IP_, ctx);
- if (!ret && ctx->acquired > 0)
+ if (!ret && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
return ret;
@@ -702,7 +702,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
0, &ctx->dep_map, _RET_IP_, ctx);
- if (!ret && ctx->acquired > 0)
+ if (!ret && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
return ret;
diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c
index c6422ffeda9a..9012ecf7b814 100644
--- a/kernel/power/autosleep.c
+++ b/kernel/power/autosleep.c
@@ -32,7 +32,8 @@ static void try_to_suspend(struct work_struct *work)
mutex_lock(&autosleep_lock);
- if (!pm_save_wakeup_count(initial_count)) {
+ if (!pm_save_wakeup_count(initial_count) ||
+ system_state != SYSTEM_RUNNING) {
mutex_unlock(&autosleep_lock);
goto out;
}
diff --git a/kernel/power/process.c b/kernel/power/process.c
index fc0df8486449..06ec8869dbf1 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -109,6 +109,8 @@ static int try_to_freeze_tasks(bool user_only)
/**
* freeze_processes - Signal user space processes to enter the refrigerator.
+ * The current thread will not be frozen. The same process that calls
+ * freeze_processes must later call thaw_processes.
*
* On success, returns 0. On failure, -errno and system is fully thawed.
*/
@@ -120,6 +122,9 @@ int freeze_processes(void)
if (error)
return error;
+ /* Make sure this task doesn't get frozen */
+ current->flags |= PF_SUSPEND_TASK;
+
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
@@ -168,6 +173,7 @@ int freeze_kernel_threads(void)
void thaw_processes(void)
{
struct task_struct *g, *p;
+ struct task_struct *curr = current;
if (pm_freezing)
atomic_dec(&system_freezing_cnt);
@@ -182,10 +188,15 @@ void thaw_processes(void)
read_lock(&tasklist_lock);
do_each_thread(g, p) {
+ /* No other threads should have PF_SUSPEND_TASK set */
+ WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
__thaw_task(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
+ WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
+ curr->flags &= ~PF_SUSPEND_TASK;
+
usermodehelper_enable();
schedule();
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 06fe28589e9c..a394297f8b2f 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -296,6 +296,17 @@ int pm_qos_request_active(struct pm_qos_request *req)
}
EXPORT_SYMBOL_GPL(pm_qos_request_active);
+static void __pm_qos_update_request(struct pm_qos_request *req,
+ s32 new_value)
+{
+ trace_pm_qos_update_request(req->pm_qos_class, new_value);
+
+ if (new_value != req->node.prio)
+ pm_qos_update_target(
+ pm_qos_array[req->pm_qos_class]->constraints,
+ &req->node, PM_QOS_UPDATE_REQ, new_value);
+}
+
/**
* pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
* @work: work struct for the delayed work (timeout)
@@ -308,7 +319,7 @@ static void pm_qos_work_fn(struct work_struct *work)
struct pm_qos_request,
work);
- pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
+ __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
}
/**
@@ -364,12 +375,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
}
cancel_delayed_work_sync(&req->work);
-
- trace_pm_qos_update_request(req->pm_qos_class, new_value);
- if (new_value != req->node.prio)
- pm_qos_update_target(
- pm_qos_array[req->pm_qos_class]->constraints,
- &req->node, PM_QOS_UPDATE_REQ, new_value);
+ __pm_qos_update_request(req, new_value);
}
EXPORT_SYMBOL_GPL(pm_qos_update_request);
diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile
new file mode 100644
index 000000000000..85405bdcf2b3
--- /dev/null
+++ b/kernel/printk/Makefile
@@ -0,0 +1,2 @@
+obj-y = printk.o
+obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c
new file mode 100644
index 000000000000..276762f3a460
--- /dev/null
+++ b/kernel/printk/braille.c
@@ -0,0 +1,49 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/string.h>
+
+#include "console_cmdline.h"
+#include "braille.h"
+
+char *_braille_console_setup(char **str, char **brl_options)
+{
+ if (!memcmp(*str, "brl,", 4)) {
+ *brl_options = "";
+ *str += 4;
+ } else if (!memcmp(str, "brl=", 4)) {
+ *brl_options = *str + 4;
+ *str = strchr(*brl_options, ',');
+ if (!*str)
+ pr_err("need port name after brl=\n");
+ else
+ *((*str)++) = 0;
+ } else
+ return NULL;
+
+ return *str;
+}
+
+int
+_braille_register_console(struct console *console, struct console_cmdline *c)
+{
+ int rtn = 0;
+
+ if (c->brl_options) {
+ console->flags |= CON_BRL;
+ rtn = braille_register_console(console, c->index, c->options,
+ c->brl_options);
+ }
+
+ return rtn;
+}
+
+int
+_braille_unregister_console(struct console *console)
+{
+ if (console->flags & CON_BRL)
+ return braille_unregister_console(console);
+
+ return 0;
+}
diff --git a/kernel/printk/braille.h b/kernel/printk/braille.h
new file mode 100644
index 000000000000..769d771145c8
--- /dev/null
+++ b/kernel/printk/braille.h
@@ -0,0 +1,48 @@
+#ifndef _PRINTK_BRAILLE_H
+#define _PRINTK_BRAILLE_H
+
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+
+static inline void
+braille_set_options(struct console_cmdline *c, char *brl_options)
+{
+ c->brl_options = brl_options;
+}
+
+char *
+_braille_console_setup(char **str, char **brl_options);
+
+int
+_braille_register_console(struct console *console, struct console_cmdline *c);
+
+int
+_braille_unregister_console(struct console *console);
+
+#else
+
+static inline void
+braille_set_options(struct console_cmdline *c, char *brl_options)
+{
+}
+
+static inline char *
+_braille_console_setup(char **str, char **brl_options)
+{
+ return NULL;
+}
+
+static inline int
+_braille_register_console(struct console *console, struct console_cmdline *c)
+{
+ return 0;
+}
+
+static inline int
+_braille_unregister_console(struct console *console)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h
new file mode 100644
index 000000000000..cbd69d842341
--- /dev/null
+++ b/kernel/printk/console_cmdline.h
@@ -0,0 +1,14 @@
+#ifndef _CONSOLE_CMDLINE_H
+#define _CONSOLE_CMDLINE_H
+
+struct console_cmdline
+{
+ char name[8]; /* Name of the driver */
+ int index; /* Minor dev. to use */
+ char *options; /* Options for the driver */
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+ char *brl_options; /* Options for braille driver */
+#endif
+};
+
+#endif
diff --git a/kernel/printk.c b/kernel/printk/printk.c
index d37d45c90ae6..5b5a7080e2a5 100644
--- a/kernel/printk.c
+++ b/kernel/printk/printk.c
@@ -51,6 +51,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>
+#include "console_cmdline.h"
+#include "braille.h"
+
/* printk's without a loglevel use this.. */
#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
@@ -105,19 +108,11 @@ static struct console *exclusive_console;
/*
* Array of consoles built from command line options (console=)
*/
-struct console_cmdline
-{
- char name[8]; /* Name of the driver */
- int index; /* Minor dev. to use */
- char *options; /* Options for the driver */
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- char *brl_options; /* Options for braille driver */
-#endif
-};
#define MAX_CMDLINECONSOLES 8
static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
+
static int selected_console = -1;
static int preferred_console = -1;
int console_set_on_cmdline;
@@ -178,7 +173,7 @@ static int console_may_schedule;
* 67 "g"
* 0032 00 00 00 padding to next message header
*
- * The 'struct log' buffer header must never be directly exported to
+ * The 'struct printk_log' buffer header must never be directly exported to
* userspace, it is a kernel-private implementation detail that might
* need to be changed in the future, when the requirements change.
*
@@ -200,7 +195,7 @@ enum log_flags {
LOG_CONT = 8, /* text is a fragment of a continuation line */
};
-struct log {
+struct printk_log {
u64 ts_nsec; /* timestamp in nanoseconds */
u16 len; /* length of entire record */
u16 text_len; /* length of text buffer */
@@ -248,7 +243,7 @@ static u32 clear_idx;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
#define LOG_ALIGN 4
#else
-#define LOG_ALIGN __alignof__(struct log)
+#define LOG_ALIGN __alignof__(struct printk_log)
#endif
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
@@ -259,35 +254,35 @@ static u32 log_buf_len = __LOG_BUF_LEN;
static volatile unsigned int logbuf_cpu = UINT_MAX;
/* human readable text of the record */
-static char *log_text(const struct log *msg)
+static char *log_text(const struct printk_log *msg)
{
- return (char *)msg + sizeof(struct log);
+ return (char *)msg + sizeof(struct printk_log);
}
/* optional key/value pair dictionary attached to the record */
-static char *log_dict(const struct log *msg)
+static char *log_dict(const struct printk_log *msg)
{
- return (char *)msg + sizeof(struct log) + msg->text_len;
+ return (char *)msg + sizeof(struct printk_log) + msg->text_len;
}
/* get record by index; idx must point to valid msg */
-static struct log *log_from_idx(u32 idx)
+static struct printk_log *log_from_idx(u32 idx)
{
- struct log *msg = (struct log *)(log_buf + idx);
+ struct printk_log *msg = (struct printk_log *)(log_buf + idx);
/*
* A length == 0 record is the end of buffer marker. Wrap around and
* read the message at the start of the buffer.
*/
if (!msg->len)
- return (struct log *)log_buf;
+ return (struct printk_log *)log_buf;
return msg;
}
/* get next record; idx must point to valid msg */
static u32 log_next(u32 idx)
{
- struct log *msg = (struct log *)(log_buf + idx);
+ struct printk_log *msg = (struct printk_log *)(log_buf + idx);
/* length == 0 indicates the end of the buffer; wrap */
/*
@@ -296,7 +291,7 @@ static u32 log_next(u32 idx)
* return the one after that.
*/
if (!msg->len) {
- msg = (struct log *)log_buf;
+ msg = (struct printk_log *)log_buf;
return msg->len;
}
return idx + msg->len;
@@ -308,11 +303,11 @@ static void log_store(int facility, int level,
const char *dict, u16 dict_len,
const char *text, u16 text_len)
{
- struct log *msg;
+ struct printk_log *msg;
u32 size, pad_len;
/* number of '\0' padding bytes to next message */
- size = sizeof(struct log) + text_len + dict_len;
+ size = sizeof(struct printk_log) + text_len + dict_len;
pad_len = (-size) & (LOG_ALIGN - 1);
size += pad_len;
@@ -324,7 +319,7 @@ static void log_store(int facility, int level,
else
free = log_first_idx - log_next_idx;
- if (free > size + sizeof(struct log))
+ if (free > size + sizeof(struct printk_log))
break;
/* drop old messages until we have enough contiuous space */
@@ -332,18 +327,18 @@ static void log_store(int facility, int level,
log_first_seq++;
}
- if (log_next_idx + size + sizeof(struct log) >= log_buf_len) {
+ if (log_next_idx + size + sizeof(struct printk_log) >= log_buf_len) {
/*
* This message + an additional empty header does not fit
* at the end of the buffer. Add an empty header with len == 0
* to signify a wrap around.
*/
- memset(log_buf + log_next_idx, 0, sizeof(struct log));
+ memset(log_buf + log_next_idx, 0, sizeof(struct printk_log));
log_next_idx = 0;
}
/* fill message */
- msg = (struct log *)(log_buf + log_next_idx);
+ msg = (struct printk_log *)(log_buf + log_next_idx);
memcpy(log_text(msg), text, text_len);
msg->text_len = text_len;
memcpy(log_dict(msg), dict, dict_len);
@@ -356,7 +351,7 @@ static void log_store(int facility, int level,
else
msg->ts_nsec = local_clock();
memset(log_dict(msg) + dict_len, 0, pad_len);
- msg->len = sizeof(struct log) + text_len + dict_len + pad_len;
+ msg->len = sizeof(struct printk_log) + text_len + dict_len + pad_len;
/* insert message */
log_next_idx += msg->len;
@@ -479,7 +474,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct devkmsg_user *user = file->private_data;
- struct log *msg;
+ struct printk_log *msg;
u64 ts_usec;
size_t i;
char cont = '-';
@@ -724,14 +719,14 @@ void log_buf_kexec_setup(void)
VMCOREINFO_SYMBOL(log_first_idx);
VMCOREINFO_SYMBOL(log_next_idx);
/*
- * Export struct log size and field offsets. User space tools can
+ * Export struct printk_log size and field offsets. User space tools can
* parse it and detect any changes to structure down the line.
*/
- VMCOREINFO_STRUCT_SIZE(log);
- VMCOREINFO_OFFSET(log, ts_nsec);
- VMCOREINFO_OFFSET(log, len);
- VMCOREINFO_OFFSET(log, text_len);
- VMCOREINFO_OFFSET(log, dict_len);
+ VMCOREINFO_STRUCT_SIZE(printk_log);
+ VMCOREINFO_OFFSET(printk_log, ts_nsec);
+ VMCOREINFO_OFFSET(printk_log, len);
+ VMCOREINFO_OFFSET(printk_log, text_len);
+ VMCOREINFO_OFFSET(printk_log, dict_len);
}
#endif
@@ -884,7 +879,7 @@ static size_t print_time(u64 ts, char *buf)
(unsigned long)ts, rem_nsec / 1000);
}
-static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
+static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf)
{
size_t len = 0;
unsigned int prefix = (msg->facility << 3) | msg->level;
@@ -907,7 +902,7 @@ static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
return len;
}
-static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
bool syslog, char *buf, size_t size)
{
const char *text = log_text(msg);
@@ -969,7 +964,7 @@ static size_t msg_print_text(const struct log *msg, enum log_flags prev,
static int syslog_print(char __user *buf, int size)
{
char *text;
- struct log *msg;
+ struct printk_log *msg;
int len = 0;
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
@@ -1060,7 +1055,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
idx = clear_idx;
prev = 0;
while (seq < log_next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
len += msg_print_text(msg, prev, true, NULL, 0);
prev = msg->flags;
@@ -1073,7 +1068,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
idx = clear_idx;
prev = 0;
while (len > size && seq < log_next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
len -= msg_print_text(msg, prev, true, NULL, 0);
prev = msg->flags;
@@ -1087,7 +1082,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
len = 0;
prev = 0;
while (len >= 0 && seq < next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
int textlen;
textlen = msg_print_text(msg, prev, true, text,
@@ -1233,7 +1228,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
error = 0;
while (seq < log_next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
error += msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
@@ -1719,10 +1714,10 @@ static struct cont {
u8 level;
bool flushed:1;
} cont;
-static struct log *log_from_idx(u32 idx) { return NULL; }
+static struct printk_log *log_from_idx(u32 idx) { return NULL; }
static u32 log_next(u32 idx) { return 0; }
static void call_console_drivers(int level, const char *text, size_t len) {}
-static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
bool syslog, char *buf, size_t size) { return 0; }
static size_t cont_print_text(char *text, size_t size) { return 0; }
@@ -1761,23 +1756,23 @@ static int __add_preferred_console(char *name, int idx, char *options,
* See if this tty is not yet registered, and
* if we have a slot free.
*/
- for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
- if (strcmp(console_cmdline[i].name, name) == 0 &&
- console_cmdline[i].index == idx) {
- if (!brl_options)
- selected_console = i;
- return 0;
+ for (i = 0, c = console_cmdline;
+ i < MAX_CMDLINECONSOLES && c->name[0];
+ i++, c++) {
+ if (strcmp(c->name, name) == 0 && c->index == idx) {
+ if (!brl_options)
+ selected_console = i;
+ return 0;
}
+ }
if (i == MAX_CMDLINECONSOLES)
return -E2BIG;
if (!brl_options)
selected_console = i;
- c = &console_cmdline[i];
strlcpy(c->name, name, sizeof(c->name));
c->options = options;
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- c->brl_options = brl_options;
-#endif
+ braille_set_options(c, brl_options);
+
c->index = idx;
return 0;
}
@@ -1790,20 +1785,8 @@ static int __init console_setup(char *str)
char *s, *options, *brl_options = NULL;
int idx;
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- if (!memcmp(str, "brl,", 4)) {
- brl_options = "";
- str += 4;
- } else if (!memcmp(str, "brl=", 4)) {
- brl_options = str + 4;
- str = strchr(brl_options, ',');
- if (!str) {
- printk(KERN_ERR "need port name after brl=\n");
- return 1;
- }
- *(str++) = 0;
- }
-#endif
+ if (_braille_console_setup(&str, &brl_options))
+ return 1;
/*
* Decode str into name, index, options.
@@ -1858,15 +1841,15 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha
struct console_cmdline *c;
int i;
- for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
- if (strcmp(console_cmdline[i].name, name) == 0 &&
- console_cmdline[i].index == idx) {
- c = &console_cmdline[i];
- strlcpy(c->name, name_new, sizeof(c->name));
- c->name[sizeof(c->name) - 1] = 0;
- c->options = options;
- c->index = idx_new;
- return i;
+ for (i = 0, c = console_cmdline;
+ i < MAX_CMDLINECONSOLES && c->name[0];
+ i++, c++)
+ if (strcmp(c->name, name) == 0 && c->index == idx) {
+ strlcpy(c->name, name_new, sizeof(c->name));
+ c->name[sizeof(c->name) - 1] = 0;
+ c->options = options;
+ c->index = idx_new;
+ return i;
}
/* not found */
return -1;
@@ -1921,7 +1904,7 @@ void resume_console(void)
* called when a new CPU comes online (or fails to come up), and ensures
* that any such output gets printed.
*/
-static int __cpuinit console_cpu_notify(struct notifier_block *self,
+static int console_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
switch (action) {
@@ -2046,7 +2029,7 @@ void console_unlock(void)
console_cont_flush(text, sizeof(text));
again:
for (;;) {
- struct log *msg;
+ struct printk_log *msg;
size_t len;
int level;
@@ -2241,6 +2224,7 @@ void register_console(struct console *newcon)
int i;
unsigned long flags;
struct console *bcon = NULL;
+ struct console_cmdline *c;
/*
* before we register a new CON_BOOT console, make sure we don't
@@ -2288,30 +2272,25 @@ void register_console(struct console *newcon)
* See if this console matches one we selected on
* the command line.
*/
- for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0];
- i++) {
- if (strcmp(console_cmdline[i].name, newcon->name) != 0)
+ for (i = 0, c = console_cmdline;
+ i < MAX_CMDLINECONSOLES && c->name[0];
+ i++, c++) {
+ if (strcmp(c->name, newcon->name) != 0)
continue;
if (newcon->index >= 0 &&
- newcon->index != console_cmdline[i].index)
+ newcon->index != c->index)
continue;
if (newcon->index < 0)
- newcon->index = console_cmdline[i].index;
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- if (console_cmdline[i].brl_options) {
- newcon->flags |= CON_BRL;
- braille_register_console(newcon,
- console_cmdline[i].index,
- console_cmdline[i].options,
- console_cmdline[i].brl_options);
+ newcon->index = c->index;
+
+ if (_braille_register_console(newcon, c))
return;
- }
-#endif
+
if (newcon->setup &&
newcon->setup(newcon, console_cmdline[i].options) != 0)
break;
newcon->flags |= CON_ENABLED;
- newcon->index = console_cmdline[i].index;
+ newcon->index = c->index;
if (i == selected_console) {
newcon->flags |= CON_CONSDEV;
preferred_console = selected_console;
@@ -2394,13 +2373,13 @@ EXPORT_SYMBOL(register_console);
int unregister_console(struct console *console)
{
struct console *a, *b;
- int res = 1;
+ int res;
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- if (console->flags & CON_BRL)
- return braille_unregister_console(console);
-#endif
+ res = _braille_unregister_console(console);
+ if (res)
+ return res;
+ res = 1;
console_lock();
if (console_drivers == console) {
console_drivers=console->next;
@@ -2666,7 +2645,7 @@ void kmsg_dump(enum kmsg_dump_reason reason)
bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
char *line, size_t size, size_t *len)
{
- struct log *msg;
+ struct printk_log *msg;
size_t l = 0;
bool ret = false;
@@ -2778,7 +2757,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
idx = dumper->cur_idx;
prev = 0;
while (seq < dumper->next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
l += msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
@@ -2791,7 +2770,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
idx = dumper->cur_idx;
prev = 0;
while (l > size && seq < dumper->next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
l -= msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
@@ -2806,7 +2785,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
l = 0;
prev = 0;
while (seq < dumper->next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
l += msg_print_text(msg, prev, syslog, buf + l, size - l);
idx = log_next(idx);
diff --git a/kernel/profile.c b/kernel/profile.c
index 0bf400737660..6631e1ef55ab 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -331,7 +331,7 @@ out:
put_cpu();
}
-static int __cpuinit profile_cpu_callback(struct notifier_block *info,
+static int profile_cpu_callback(struct notifier_block *info,
unsigned long action, void *__cpu)
{
int node, cpu = (unsigned long)__cpu;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 4041f5747e73..a146ee327f6a 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -469,7 +469,6 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
/* Architecture-specific hardware disable .. */
ptrace_disable(child);
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- flush_ptrace_hw_breakpoint(child);
write_lock_irq(&tasklist_lock);
/*
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index b1fa5510388d..f4871e52c546 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -1476,7 +1476,7 @@ rcu_torture_shutdown(void *arg)
* Execute random CPU-hotplug operations at the interval specified
* by the onoff_interval.
*/
-static int __cpuinit
+static int
rcu_torture_onoff(void *arg)
{
int cpu;
@@ -1558,7 +1558,7 @@ rcu_torture_onoff(void *arg)
return 0;
}
-static int __cpuinit
+static int
rcu_torture_onoff_init(void)
{
int ret;
@@ -1601,7 +1601,7 @@ static void rcu_torture_onoff_cleanup(void)
* CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
* induces a CPU stall for the time specified by stall_cpu.
*/
-static int __cpuinit rcu_torture_stall(void *args)
+static int rcu_torture_stall(void *args)
{
unsigned long stop_at;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index e08abb9461ac..068de3a93606 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -2910,7 +2910,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
* can accept some slop in the rsp->completed access due to the fact
* that this CPU cannot possibly have any RCU callbacks in flight yet.
*/
-static void __cpuinit
+static void
rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
{
unsigned long flags;
@@ -2962,7 +2962,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
mutex_unlock(&rsp->onoff_mutex);
}
-static void __cpuinit rcu_prepare_cpu(int cpu)
+static void rcu_prepare_cpu(int cpu)
{
struct rcu_state *rsp;
@@ -2974,7 +2974,7 @@ static void __cpuinit rcu_prepare_cpu(int cpu)
/*
* Handle CPU online/offline notification events.
*/
-static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
+static int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 4a39d364493c..b3832581043c 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -521,10 +521,10 @@ static void invoke_rcu_callbacks_kthread(void);
static bool rcu_is_callbacks_kthread(void);
#ifdef CONFIG_RCU_BOOST
static void rcu_preempt_do_callbacks(void);
-static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
+static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp);
#endif /* #ifdef CONFIG_RCU_BOOST */
-static void __cpuinit rcu_prepare_kthreads(int cpu);
+static void rcu_prepare_kthreads(int cpu);
static void rcu_cleanup_after_idle(int cpu);
static void rcu_prepare_for_idle(int cpu);
static void rcu_idle_count_callbacks_posted(void);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 63098a59216e..769e12e3151b 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1352,7 +1352,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
* already exist. We only create this kthread for preemptible RCU.
* Returns zero if all is well, a negated errno otherwise.
*/
-static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
+static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp)
{
int rnp_index = rnp - &rsp->node[0];
@@ -1507,7 +1507,7 @@ static int __init rcu_spawn_kthreads(void)
}
early_initcall(rcu_spawn_kthreads);
-static void __cpuinit rcu_prepare_kthreads(int cpu)
+static void rcu_prepare_kthreads(int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
struct rcu_node *rnp = rdp->mynode;
@@ -1549,7 +1549,7 @@ static int __init rcu_scheduler_really_started(void)
}
early_initcall(rcu_scheduler_really_started);
-static void __cpuinit rcu_prepare_kthreads(int cpu)
+static void rcu_prepare_kthreads(int cpu)
{
}
diff --git a/kernel/relay.c b/kernel/relay.c
index b91488ba2e5a..5001c9887db1 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -516,7 +516,7 @@ static void setup_callbacks(struct rchan *chan,
*
* Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
*/
-static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
+static int relay_hotcpu_callback(struct notifier_block *nb,
unsigned long action,
void *hcpu)
{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0d8eb4525e76..05c39f030314 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p)
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
+ *
+ * Return: 1 if the task is currently executing. 0 otherwise.
*/
inline int task_curr(const struct task_struct *p)
{
@@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
* the simpler "current->state = TASK_RUNNING" to mark yourself
* runnable without the overhead of this.
*
- * Returns %true if @p was woken up, %false if it was already running
+ * Return: %true if @p was woken up, %false if it was already running.
* or @state didn't match @p's state.
*/
static int
@@ -1491,7 +1493,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
unsigned long flags;
int cpu, success = 0;
- smp_wmb();
+ /*
+ * If we are going to wake up a thread waiting for CONDITION we
+ * need to ensure that CONDITION=1 done by the caller can not be
+ * reordered with p->state check below. This pairs with mb() in
+ * set_current_state() the waiting thread does.
+ */
+ smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
if (!(p->state & state))
goto out;
@@ -1577,8 +1585,9 @@ out:
* @p: The process to be woken up.
*
* Attempt to wake up the nominated process and move it to the set of runnable
- * processes. Returns 1 if the process was woken up, 0 if it was already
- * running.
+ * processes.
+ *
+ * Return: 1 if the process was woken up, 0 if it was already running.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
@@ -2191,6 +2200,8 @@ void scheduler_tick(void)
* This makes sure that uptime, CFS vruntime, load
* balancing, etc... continue to move forward, even
* with a very low granularity.
+ *
+ * Return: Maximum deferment in nanoseconds.
*/
u64 scheduler_tick_max_deferment(void)
{
@@ -2394,6 +2405,12 @@ need_resched:
if (sched_feat(HRTICK))
hrtick_clear(rq);
+ /*
+ * Make sure that signal_pending_state()->signal_pending() below
+ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
+ * done by the caller to avoid the race with signal_wake_up().
+ */
+ smp_mb__before_spinlock();
raw_spin_lock_irq(&rq->lock);
switch_count = &prev->nivcsw;
@@ -2796,8 +2813,8 @@ EXPORT_SYMBOL(wait_for_completion);
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible.
*
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
+ * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
+ * till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -2829,8 +2846,8 @@ EXPORT_SYMBOL(wait_for_completion_io);
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible. The caller is accounted as waiting for IO.
*
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
+ * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
+ * till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
@@ -2846,7 +2863,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout);
* This waits for completion of a specific task to be signaled. It is
* interruptible.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_interruptible(struct completion *x)
{
@@ -2865,8 +2882,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. It is interruptible. The timeout is in jiffies.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
+ * or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_interruptible_timeout(struct completion *x,
@@ -2883,7 +2900,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
* This waits to be signaled for completion of a specific task. It can be
* interrupted by a kill signal.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_killable(struct completion *x)
{
@@ -2903,8 +2920,8 @@ EXPORT_SYMBOL(wait_for_completion_killable);
* signaled or for a specified timeout to expire. It can be
* interrupted by a kill signal. The timeout is in jiffies.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
+ * or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_killable_timeout(struct completion *x,
@@ -2918,7 +2935,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout);
* try_wait_for_completion - try to decrement a completion without blocking
* @x: completion structure
*
- * Returns: 0 if a decrement cannot be done without blocking
+ * Return: 0 if a decrement cannot be done without blocking
* 1 if a decrement succeeded.
*
* If a completion is being used as a counting completion,
@@ -2945,7 +2962,7 @@ EXPORT_SYMBOL(try_wait_for_completion);
* completion_done - Test to see if a completion has any waiters
* @x: completion structure
*
- * Returns: 0 if there are waiters (wait_for_completion() in progress)
+ * Return: 0 if there are waiters (wait_for_completion() in progress)
* 1 if there are no waiters.
*
*/
@@ -3182,7 +3199,7 @@ SYSCALL_DEFINE1(nice, int, increment)
* task_prio - return the priority value of a given task.
* @p: the task in question.
*
- * This is the priority value as seen by users in /proc.
+ * Return: The priority value as seen by users in /proc.
* RT tasks are offset by -200. Normal tasks are centered
* around 0, value goes from -16 to +15.
*/
@@ -3194,6 +3211,8 @@ int task_prio(const struct task_struct *p)
/**
* task_nice - return the nice value of a given task.
* @p: the task in question.
+ *
+ * Return: The nice value [ -20 ... 0 ... 19 ].
*/
int task_nice(const struct task_struct *p)
{
@@ -3204,6 +3223,8 @@ EXPORT_SYMBOL(task_nice);
/**
* idle_cpu - is a given cpu idle currently?
* @cpu: the processor in question.
+ *
+ * Return: 1 if the CPU is currently idle. 0 otherwise.
*/
int idle_cpu(int cpu)
{
@@ -3226,6 +3247,8 @@ int idle_cpu(int cpu)
/**
* idle_task - return the idle task for a given cpu.
* @cpu: the processor in question.
+ *
+ * Return: The idle task for the cpu @cpu.
*/
struct task_struct *idle_task(int cpu)
{
@@ -3235,6 +3258,8 @@ struct task_struct *idle_task(int cpu)
/**
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
+ *
+ * The task of @pid, if found. %NULL otherwise.
*/
static struct task_struct *find_process_by_pid(pid_t pid)
{
@@ -3432,6 +3457,8 @@ recheck:
* @policy: new policy.
* @param: structure containing the new RT priority.
*
+ * Return: 0 on success. An error code otherwise.
+ *
* NOTE that the task may be already dead.
*/
int sched_setscheduler(struct task_struct *p, int policy,
@@ -3451,6 +3478,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
* current context has permission. For example, this is needed in
* stop_machine(): we create temporary high priority worker threads,
* but our caller might not have that capability.
+ *
+ * Return: 0 on success. An error code otherwise.
*/
int sched_setscheduler_nocheck(struct task_struct *p, int policy,
const struct sched_param *param)
@@ -3485,6 +3514,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
* @pid: the pid in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
struct sched_param __user *, param)
@@ -3500,6 +3531,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
* sys_sched_setparam - set/change the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
{
@@ -3509,6 +3542,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
/**
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
* @pid: the pid in question.
+ *
+ * Return: On success, the policy of the thread. Otherwise, a negative error
+ * code.
*/
SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
{
@@ -3535,6 +3571,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
* sys_sched_getparam - get the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the RT priority.
+ *
+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
+ * code.
*/
SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
{
@@ -3659,6 +3698,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to the new cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
@@ -3710,6 +3751,8 @@ out_unlock:
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to hold the current cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
@@ -3744,6 +3787,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
*
* This function yields the current CPU to other tasks. If there are no
* other threads running on this CPU then this function will return.
+ *
+ * Return: 0.
*/
SYSCALL_DEFINE0(sched_yield)
{
@@ -3869,7 +3914,7 @@ EXPORT_SYMBOL(yield);
* It's the caller's job to ensure that the target task struct
* can't go away on us before we can do any checks.
*
- * Returns:
+ * Return:
* true (>0) if we indeed boosted the target task.
* false (0) if we failed to boost the target.
* -ESRCH if there's no task to yield to.
@@ -3972,8 +4017,9 @@ long __sched io_schedule_timeout(long timeout)
* sys_sched_get_priority_max - return maximum RT priority.
* @policy: scheduling class.
*
- * this syscall returns the maximum rt_priority that can be used
- * by a given scheduling class.
+ * Return: On success, this syscall returns the maximum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
*/
SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
{
@@ -3997,8 +4043,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
* sys_sched_get_priority_min - return minimum RT priority.
* @policy: scheduling class.
*
- * this syscall returns the minimum rt_priority that can be used
- * by a given scheduling class.
+ * Return: On success, this syscall returns the minimum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
*/
SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
{
@@ -4024,6 +4071,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
*
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
+ *
+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
+ * an error code.
*/
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
struct timespec __user *, interval)
@@ -4133,7 +4183,7 @@ void show_state_filter(unsigned long state_filter)
debug_show_all_locks();
}
-void __cpuinit init_idle_bootup_task(struct task_struct *idle)
+void init_idle_bootup_task(struct task_struct *idle)
{
idle->sched_class = &idle_sched_class;
}
@@ -4146,7 +4196,7 @@ void __cpuinit init_idle_bootup_task(struct task_struct *idle)
* NOTE: this function does not set the idle thread's NEED_RESCHED
* flag, to make booting more robust.
*/
-void __cpuinit init_idle(struct task_struct *idle, int cpu)
+void init_idle(struct task_struct *idle, int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
@@ -4630,7 +4680,7 @@ static void set_rq_offline(struct rq *rq)
* migration_call - callback that gets triggered when a CPU is added.
* Here we can start up the necessary migration thread for the new CPU.
*/
-static int __cpuinit
+static int
migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int cpu = (long)hcpu;
@@ -4684,12 +4734,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
* happens before everything else. This has to be lower priority than
* the notifier in the perf_event subsystem, though.
*/
-static struct notifier_block __cpuinitdata migration_notifier = {
+static struct notifier_block migration_notifier = {
.notifier_call = migration_call,
.priority = CPU_PRI_MIGRATION,
};
-static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
+static int sched_cpu_active(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
@@ -4702,7 +4752,7 @@ static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
}
}
-static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
+static int sched_cpu_inactive(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
@@ -6632,6 +6682,8 @@ void normalize_rt_tasks(void)
* @cpu: the processor in question.
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ *
+ * Return: The current task for @cpu.
*/
struct task_struct *curr_task(int cpu)
{
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 1095e878a46f..8b836b376d91 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -62,7 +62,7 @@ static int convert_prio(int prio)
* any discrepancies created by racing against the uncertainty of the current
* priority configuration.
*
- * Returns: (int)bool - CPUs were found
+ * Return: (int)bool - CPUs were found
*/
int cpupri_find(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask)
@@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
* cpupri_init - initialize the cpupri structure
* @cp: The cpupri context
*
- * Returns: -ENOMEM if memory fails.
+ * Return: -ENOMEM on memory allocation failure.
*/
int cpupri_init(struct cpupri *cp)
{
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f77f9c527449..68f1609ca149 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -851,7 +851,7 @@ void task_numa_fault(int node, int pages, bool migrated)
{
struct task_struct *p = current;
- if (!sched_feat_numa(NUMA))
+ if (!numabalancing_enabled)
return;
/* FIXME: Allocate task-specific structure for placement policy here */
@@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
*/
update_entity_load_avg(curr, 1);
update_cfs_rq_blocked_load(cfs_rq, 1);
+ update_cfs_shares(cfs_rq);
#ifdef CONFIG_SCHED_HRTICK
/*
@@ -4280,6 +4281,8 @@ struct sg_lb_stats {
* get_sd_load_idx - Obtain the load index for a given sched domain.
* @sd: The sched_domain whose load_idx is to be obtained.
* @idle: The Idle status of the CPU for whose sd load_icx is obtained.
+ *
+ * Return: The load index.
*/
static inline int get_sd_load_idx(struct sched_domain *sd,
enum cpu_idle_type idle)
@@ -4574,6 +4577,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
*
* Determine if @sg is a busier group than the previously selected
* busiest group.
+ *
+ * Return: %true if @sg is a busier group than the previously selected
+ * busiest group. %false otherwise.
*/
static bool update_sd_pick_busiest(struct lb_env *env,
struct sd_lb_stats *sds,
@@ -4691,7 +4697,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
* assuming lower CPU number will be equivalent to lower a SMT thread
* number.
*
- * Returns 1 when packing is required and a task should be moved to
+ * Return: 1 when packing is required and a task should be moved to
* this CPU. The amount of the imbalance is returned in *imbalance.
*
* @env: The load balancing environment.
@@ -4869,7 +4875,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* @balance: Pointer to a variable indicating if this_cpu
* is the appropriate cpu to perform load balancing at this_level.
*
- * Returns: - the busiest group if imbalance exists.
+ * Return: - The busiest group if imbalance exists.
* - If no imbalance and user has opted for power-savings balance,
* return the least loaded group whose CPUs can be
* put to idle by rebalancing its tasks onto our group.
@@ -5506,7 +5512,7 @@ void nohz_balance_enter_idle(int cpu)
set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
}
-static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
+static int sched_ilb_notifier(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
@@ -5786,7 +5792,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
entity_tick(cfs_rq, se, queued);
}
- if (sched_feat_numa(NUMA))
+ if (numabalancing_enabled)
task_tick_numa(rq, curr);
update_rq_runnable_avg(rq, 1);
diff --git a/kernel/smp.c b/kernel/smp.c
index 4dba0f7b72ad..fe9f773d7114 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
+static struct notifier_block hotplug_cfd_notifier = {
.notifier_call = hotplug_cfd,
};
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 02fc5c933673..eb89e1807408 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -24,7 +24,7 @@
*/
static DEFINE_PER_CPU(struct task_struct *, idle_threads);
-struct task_struct * __cpuinit idle_thread_get(unsigned int cpu)
+struct task_struct *idle_thread_get(unsigned int cpu)
{
struct task_struct *tsk = per_cpu(idle_threads, cpu);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ca25e6e704a2..be3d3514c325 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -699,7 +699,7 @@ void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
}
EXPORT_SYMBOL(send_remote_softirq);
-static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
+static int remote_softirq_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
/*
@@ -728,7 +728,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
+static struct notifier_block remote_softirq_cpu_notifier = {
.notifier_call = remote_softirq_cpu_notify,
};
@@ -830,7 +830,7 @@ static void takeover_tasklets(unsigned int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int __cpuinit cpu_callback(struct notifier_block *nfb,
+static int cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -845,7 +845,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata cpu_nfb = {
+static struct notifier_block cpu_nfb = {
.notifier_call = cpu_callback
};
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ac09d98490aa..07f6fc468e17 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2346,7 +2346,11 @@ static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp,
int write, void *data)
{
if (write) {
- *valp = msecs_to_jiffies(*negp ? -*lvalp : *lvalp);
+ unsigned long jif = msecs_to_jiffies(*negp ? -*lvalp : *lvalp);
+
+ if (jif > INT_MAX)
+ return 1;
+ *valp = (int)jif;
} else {
int val = *valp;
unsigned long lval;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 69601726a745..e77edc97e036 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -298,7 +298,7 @@ static int __init tick_nohz_full_setup(char *str)
}
__setup("nohz_full=", tick_nohz_full_setup);
-static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb,
+static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -827,13 +827,10 @@ void tick_nohz_irq_exit(void)
{
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
- if (ts->inidle) {
- /* Cancel the timer because CPU already waken up from the C-states*/
- menu_hrtimer_cancel();
+ if (ts->inidle)
__tick_nohz_idle_enter(ts);
- } else {
+ else
tick_nohz_full_stop_tick(ts);
- }
}
/**
@@ -931,8 +928,6 @@ void tick_nohz_idle_exit(void)
ts->inidle = 0;
- /* Cancel the timer because CPU already waken up from the C-states*/
- menu_hrtimer_cancel();
if (ts->idle_active || ts->tick_stopped)
now = ktime_get();
diff --git a/kernel/timer.c b/kernel/timer.c
index 15bc1b41021d..4296d13db3d1 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1505,11 +1505,11 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout)
}
EXPORT_SYMBOL(schedule_timeout_uninterruptible);
-static int __cpuinit init_timers_cpu(int cpu)
+static int init_timers_cpu(int cpu)
{
int j;
struct tvec_base *base;
- static char __cpuinitdata tvec_base_done[NR_CPUS];
+ static char tvec_base_done[NR_CPUS];
if (!tvec_base_done[cpu]) {
static char boot_done;
@@ -1577,7 +1577,7 @@ static void migrate_timer_list(struct tvec_base *new_base, struct list_head *hea
}
}
-static void __cpuinit migrate_timers(int cpu)
+static void migrate_timers(int cpu)
{
struct tvec_base *old_base;
struct tvec_base *new_base;
@@ -1610,7 +1610,7 @@ static void __cpuinit migrate_timers(int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int __cpuinit timer_cpu_notify(struct notifier_block *self,
+static int timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -1635,7 +1635,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata timers_nb = {
+static struct notifier_block timers_nb = {
.notifier_call = timer_cpu_notify,
};
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 67708f46baae..a6d098c6df3f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1441,12 +1441,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
* the hashes are freed with call_rcu_sched().
*/
static int
-ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
+ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
{
struct ftrace_hash *filter_hash;
struct ftrace_hash *notrace_hash;
int ret;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ /*
+ * There's a small race when adding ops that the ftrace handler
+ * that wants regs, may be called without them. We can not
+ * allow that handler to be called if regs is NULL.
+ */
+ if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
+ return 0;
+#endif
+
filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
@@ -2159,12 +2169,57 @@ static cycle_t ftrace_update_time;
static unsigned long ftrace_update_cnt;
unsigned long ftrace_update_tot_cnt;
-static int ops_traces_mod(struct ftrace_ops *ops)
+static inline int ops_traces_mod(struct ftrace_ops *ops)
{
- struct ftrace_hash *hash;
+ /*
+ * Filter_hash being empty will default to trace module.
+ * But notrace hash requires a test of individual module functions.
+ */
+ return ftrace_hash_empty(ops->filter_hash) &&
+ ftrace_hash_empty(ops->notrace_hash);
+}
+
+/*
+ * Check if the current ops references the record.
+ *
+ * If the ops traces all functions, then it was already accounted for.
+ * If the ops does not trace the current record function, skip it.
+ * If the ops ignores the function via notrace filter, skip it.
+ */
+static inline bool
+ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
+{
+ /* If ops isn't enabled, ignore it */
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+ return 0;
+
+ /* If ops traces all mods, we already accounted for it */
+ if (ops_traces_mod(ops))
+ return 0;
+
+ /* The function must be in the filter */
+ if (!ftrace_hash_empty(ops->filter_hash) &&
+ !ftrace_lookup_ip(ops->filter_hash, rec->ip))
+ return 0;
+
+ /* If in notrace hash, we ignore it too */
+ if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
+ return 0;
- hash = ops->filter_hash;
- return ftrace_hash_empty(hash);
+ return 1;
+}
+
+static int referenced_filters(struct dyn_ftrace *rec)
+{
+ struct ftrace_ops *ops;
+ int cnt = 0;
+
+ for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
+ if (ops_references_rec(ops, rec))
+ cnt++;
+ }
+
+ return cnt;
}
static int ftrace_update_code(struct module *mod)
@@ -2173,6 +2228,7 @@ static int ftrace_update_code(struct module *mod)
struct dyn_ftrace *p;
cycle_t start, stop;
unsigned long ref = 0;
+ bool test = false;
int i;
/*
@@ -2186,9 +2242,12 @@ static int ftrace_update_code(struct module *mod)
for (ops = ftrace_ops_list;
ops != &ftrace_list_end; ops = ops->next) {
- if (ops->flags & FTRACE_OPS_FL_ENABLED &&
- ops_traces_mod(ops))
- ref++;
+ if (ops->flags & FTRACE_OPS_FL_ENABLED) {
+ if (ops_traces_mod(ops))
+ ref++;
+ else
+ test = true;
+ }
}
}
@@ -2198,12 +2257,16 @@ static int ftrace_update_code(struct module *mod)
for (pg = ftrace_new_pgs; pg; pg = pg->next) {
for (i = 0; i < pg->index; i++) {
+ int cnt = ref;
+
/* If something went wrong, bail without enabling anything */
if (unlikely(ftrace_disabled))
return -1;
p = &pg->records[i];
- p->flags = ref;
+ if (test)
+ cnt += referenced_filters(p);
+ p->flags = cnt;
/*
* Do the initial record conversion from mcount jump
@@ -2223,7 +2286,7 @@ static int ftrace_update_code(struct module *mod)
* conversion puts the module to the correct state, thus
* passing the ftrace_make_call check.
*/
- if (ftrace_start_up && ref) {
+ if (ftrace_start_up && cnt) {
int failed = __ftrace_replace_code(p, 1);
if (failed)
ftrace_bug(failed, p->ip);
@@ -3374,6 +3437,12 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
return add_hash_entry(hash, ip);
}
+static void ftrace_ops_update_code(struct ftrace_ops *ops)
+{
+ if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
+ ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+}
+
static int
ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
unsigned long ip, int remove, int reset, int enable)
@@ -3416,9 +3485,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
- if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
- && ftrace_enabled)
- ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ if (!ret)
+ ftrace_ops_update_code(ops);
mutex_unlock(&ftrace_lock);
@@ -3645,9 +3713,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(iter->ops, filter_hash,
orig_hash, iter->hash);
- if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
- && ftrace_enabled)
- ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ if (!ret)
+ ftrace_ops_update_code(iter->ops);
mutex_unlock(&ftrace_lock);
}
@@ -4218,7 +4285,7 @@ static inline void ftrace_startup_enable(int command) { }
# define ftrace_shutdown_sysctl() do { } while (0)
static inline int
-ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
+ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
{
return 1;
}
@@ -4241,7 +4308,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
do_for_each_ftrace_op(op, ftrace_control_list) {
if (!(op->flags & FTRACE_OPS_FL_STUB) &&
!ftrace_function_local_disabled(op) &&
- ftrace_ops_test(op, ip))
+ ftrace_ops_test(op, ip, regs))
op->func(ip, parent_ip, op, regs);
} while_for_each_ftrace_op(op);
trace_recursion_clear(TRACE_CONTROL_BIT);
@@ -4274,7 +4341,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
*/
preempt_disable_notrace();
do_for_each_ftrace_op(op, ftrace_ops_list) {
- if (ftrace_ops_test(op, ip))
+ if (ftrace_ops_test(op, ip, regs))
op->func(ip, parent_ip, op, regs);
} while_for_each_ftrace_op(op);
preempt_enable_notrace();
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index e444ff88f0a4..cc2f66f68dc5 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -36,11 +36,11 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
{
int ret;
- ret = trace_seq_printf(s, "# compressed entry header\n");
- ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
- ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
- ret = trace_seq_printf(s, "\tarray : 32 bits\n");
- ret = trace_seq_printf(s, "\n");
+ ret = trace_seq_puts(s, "# compressed entry header\n");
+ ret = trace_seq_puts(s, "\ttype_len : 5 bits\n");
+ ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n");
+ ret = trace_seq_puts(s, "\tarray : 32 bits\n");
+ ret = trace_seq_putc(s, '\n');
ret = trace_seq_printf(s, "\tpadding : type == %d\n",
RINGBUF_TYPE_PADDING);
ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
@@ -1066,7 +1066,7 @@ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
}
/**
- * check_pages - integrity check of buffer pages
+ * rb_check_pages - integrity check of buffer pages
* @cpu_buffer: CPU buffer with pages to test
*
* As a safety measure we check to make sure the data pages have not
@@ -1258,7 +1258,7 @@ static int rb_cpu_notify(struct notifier_block *self,
#endif
/**
- * ring_buffer_alloc - allocate a new ring_buffer
+ * __ring_buffer_alloc - allocate a new ring_buffer
* @size: the size in bytes per cpu that is needed.
* @flags: attributes to set for the ring buffer.
*
@@ -1607,6 +1607,7 @@ static void update_pages_handler(struct work_struct *work)
* ring_buffer_resize - resize the ring buffer
* @buffer: the buffer to resize.
* @size: the new size.
+ * @cpu_id: the cpu buffer to resize
*
* Minimum size is 2 * BUF_PAGE_SIZE.
*
@@ -3956,11 +3957,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
* expected.
*
* After a sequence of ring_buffer_read_prepare calls, the user is
- * expected to make at least one call to ring_buffer_prepare_sync.
+ * expected to make at least one call to ring_buffer_read_prepare_sync.
* Afterwards, ring_buffer_read_start is invoked to get things going
* for real.
*
- * This overall must be paired with ring_buffer_finish.
+ * This overall must be paired with ring_buffer_read_finish.
*/
struct ring_buffer_iter *
ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
@@ -4009,7 +4010,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
* an intervening ring_buffer_read_prepare_sync must have been
* performed.
*
- * Must be paired with ring_buffer_finish.
+ * Must be paired with ring_buffer_read_finish.
*/
void
ring_buffer_read_start(struct ring_buffer_iter *iter)
@@ -4031,7 +4032,7 @@ ring_buffer_read_start(struct ring_buffer_iter *iter)
EXPORT_SYMBOL_GPL(ring_buffer_read_start);
/**
- * ring_buffer_finish - finish reading the iterator of the buffer
+ * ring_buffer_read_finish - finish reading the iterator of the buffer
* @iter: The iterator retrieved by ring_buffer_start
*
* This re-enables the recording to the buffer, and frees the
@@ -4346,6 +4347,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
/**
* ring_buffer_alloc_read_page - allocate a page to read from buffer
* @buffer: the buffer to allocate for.
+ * @cpu: the cpu buffer to allocate.
*
* This function is used in conjunction with ring_buffer_read_page.
* When reading a full page from the ring buffer, these functions
@@ -4403,7 +4405,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
* to swap with a page in the ring buffer.
*
* for example:
- * rpage = ring_buffer_alloc_read_page(buffer);
+ * rpage = ring_buffer_alloc_read_page(buffer, cpu);
* if (!rpage)
* return error;
* ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0cd500bffd9b..496f94d57698 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -243,20 +243,25 @@ int filter_current_check_discard(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(filter_current_check_discard);
-cycle_t ftrace_now(int cpu)
+cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
{
u64 ts;
/* Early boot up does not have a buffer yet */
- if (!global_trace.trace_buffer.buffer)
+ if (!buf->buffer)
return trace_clock_local();
- ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
- ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
+ ts = ring_buffer_time_stamp(buf->buffer, cpu);
+ ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
return ts;
}
+cycle_t ftrace_now(int cpu)
+{
+ return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
+}
+
/**
* tracing_is_enabled - Show if global_trace has been disabled
*
@@ -1211,7 +1216,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
/* Make sure all commits have finished */
synchronize_sched();
- buf->time_start = ftrace_now(buf->cpu);
+ buf->time_start = buffer_ftrace_now(buf, buf->cpu);
for_each_online_cpu(cpu)
ring_buffer_reset_cpu(buffer, cpu);
@@ -1219,23 +1224,17 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
ring_buffer_record_enable(buffer);
}
-void tracing_reset_current(int cpu)
-{
- tracing_reset(&global_trace.trace_buffer, cpu);
-}
-
+/* Must have trace_types_lock held */
void tracing_reset_all_online_cpus(void)
{
struct trace_array *tr;
- mutex_lock(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
tracing_reset_online_cpus(&tr->max_buffer);
#endif
}
- mutex_unlock(&trace_types_lock);
}
#define SAVED_CMDLINES 128
@@ -2843,6 +2842,17 @@ static int s_show(struct seq_file *m, void *v)
return 0;
}
+/*
+ * Should be used after trace_array_get(), trace_types_lock
+ * ensures that i_cdev was already initialized.
+ */
+static inline int tracing_get_cpu(struct inode *inode)
+{
+ if (inode->i_cdev) /* See trace_create_cpu_file() */
+ return (long)inode->i_cdev - 1;
+ return RING_BUFFER_ALL_CPUS;
+}
+
static const struct seq_operations tracer_seq_ops = {
.start = s_start,
.next = s_next,
@@ -2851,9 +2861,9 @@ static const struct seq_operations tracer_seq_ops = {
};
static struct trace_iterator *
-__tracing_open(struct trace_array *tr, struct trace_cpu *tc,
- struct inode *inode, struct file *file, bool snapshot)
+__tracing_open(struct inode *inode, struct file *file, bool snapshot)
{
+ struct trace_array *tr = inode->i_private;
struct trace_iterator *iter;
int cpu;
@@ -2894,8 +2904,8 @@ __tracing_open(struct trace_array *tr, struct trace_cpu *tc,
iter->trace_buffer = &tr->trace_buffer;
iter->snapshot = snapshot;
iter->pos = -1;
+ iter->cpu_file = tracing_get_cpu(inode);
mutex_init(&iter->mutex);
- iter->cpu_file = tc->cpu;
/* Notify the tracer early; before we stop tracing. */
if (iter->trace && iter->trace->open)
@@ -2971,45 +2981,22 @@ static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
filp->private_data = inode->i_private;
return 0;
-
-}
-
-static int tracing_open_generic_tc(struct inode *inode, struct file *filp)
-{
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
-
- if (tracing_disabled)
- return -ENODEV;
-
- if (trace_array_get(tr) < 0)
- return -ENODEV;
-
- filp->private_data = inode->i_private;
-
- return 0;
-
}
static int tracing_release(struct inode *inode, struct file *file)
{
+ struct trace_array *tr = inode->i_private;
struct seq_file *m = file->private_data;
struct trace_iterator *iter;
- struct trace_array *tr;
int cpu;
- /* Writes do not use seq_file, need to grab tr from inode */
if (!(file->f_mode & FMODE_READ)) {
- struct trace_cpu *tc = inode->i_private;
-
- trace_array_put(tc->tr);
+ trace_array_put(tr);
return 0;
}
+ /* Writes do not use seq_file */
iter = m->private;
- tr = iter->tr;
- trace_array_put(tr);
-
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
@@ -3023,6 +3010,9 @@ static int tracing_release(struct inode *inode, struct file *file)
if (!iter->snapshot)
/* reenable tracing if it was previously enabled */
tracing_start_tr(tr);
+
+ __trace_array_put(tr);
+
mutex_unlock(&trace_types_lock);
mutex_destroy(&iter->mutex);
@@ -3042,15 +3032,6 @@ static int tracing_release_generic_tr(struct inode *inode, struct file *file)
return 0;
}
-static int tracing_release_generic_tc(struct inode *inode, struct file *file)
-{
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
-
- trace_array_put(tr);
- return 0;
-}
-
static int tracing_single_release_tr(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
@@ -3062,8 +3043,7 @@ static int tracing_single_release_tr(struct inode *inode, struct file *file)
static int tracing_open(struct inode *inode, struct file *file)
{
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
+ struct trace_array *tr = inode->i_private;
struct trace_iterator *iter;
int ret = 0;
@@ -3071,16 +3051,17 @@ static int tracing_open(struct inode *inode, struct file *file)
return -ENODEV;
/* If this file was open for write, then erase contents */
- if ((file->f_mode & FMODE_WRITE) &&
- (file->f_flags & O_TRUNC)) {
- if (tc->cpu == RING_BUFFER_ALL_CPUS)
+ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+ int cpu = tracing_get_cpu(inode);
+
+ if (cpu == RING_BUFFER_ALL_CPUS)
tracing_reset_online_cpus(&tr->trace_buffer);
else
- tracing_reset(&tr->trace_buffer, tc->cpu);
+ tracing_reset(&tr->trace_buffer, cpu);
}
if (file->f_mode & FMODE_READ) {
- iter = __tracing_open(tr, tc, inode, file, false);
+ iter = __tracing_open(inode, file, false);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
else if (trace_flags & TRACE_ITER_LATENCY_FMT)
@@ -3447,6 +3428,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
static int tracing_trace_options_open(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
+ int ret;
if (tracing_disabled)
return -ENODEV;
@@ -3454,7 +3436,11 @@ static int tracing_trace_options_open(struct inode *inode, struct file *file)
if (trace_array_get(tr) < 0)
return -ENODEV;
- return single_open(file, tracing_trace_options_show, inode->i_private);
+ ret = single_open(file, tracing_trace_options_show, inode->i_private);
+ if (ret < 0)
+ trace_array_put(tr);
+
+ return ret;
}
static const struct file_operations tracing_iter_fops = {
@@ -3537,14 +3523,14 @@ static const char readme_msg[] =
"\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n"
"\t\t\t Read the contents for more information\n"
#endif
-#ifdef CONFIG_STACKTRACE
+#ifdef CONFIG_STACK_TRACER
" stack_trace\t\t- Shows the max stack trace when active\n"
" stack_max_size\t- Shows current max stack size that was traced\n"
"\t\t\t Write into this file to reset the max size (trigger a new trace)\n"
#ifdef CONFIG_DYNAMIC_FTRACE
" stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n"
#endif
-#endif /* CONFIG_STACKTRACE */
+#endif /* CONFIG_STACK_TRACER */
;
static ssize_t
@@ -3941,8 +3927,7 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
static int tracing_open_pipe(struct inode *inode, struct file *filp)
{
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
+ struct trace_array *tr = inode->i_private;
struct trace_iterator *iter;
int ret = 0;
@@ -3958,6 +3943,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter) {
ret = -ENOMEM;
+ __trace_array_put(tr);
goto out;
}
@@ -3987,9 +3973,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
if (trace_clocks[tr->clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
- iter->cpu_file = tc->cpu;
- iter->tr = tc->tr;
- iter->trace_buffer = &tc->tr->trace_buffer;
+ iter->tr = tr;
+ iter->trace_buffer = &tr->trace_buffer;
+ iter->cpu_file = tracing_get_cpu(inode);
mutex_init(&iter->mutex);
filp->private_data = iter;
@@ -4012,8 +3998,7 @@ fail:
static int tracing_release_pipe(struct inode *inode, struct file *file)
{
struct trace_iterator *iter = file->private_data;
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
+ struct trace_array *tr = inode->i_private;
mutex_lock(&trace_types_lock);
@@ -4166,6 +4151,7 @@ waitagain:
memset(&iter->seq, 0,
sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq));
+ cpumask_clear(iter->started);
iter->pos = -1;
trace_event_read_lock();
@@ -4366,15 +4352,16 @@ static ssize_t
tracing_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- struct trace_cpu *tc = filp->private_data;
- struct trace_array *tr = tc->tr;
+ struct inode *inode = file_inode(filp);
+ struct trace_array *tr = inode->i_private;
+ int cpu = tracing_get_cpu(inode);
char buf[64];
int r = 0;
ssize_t ret;
mutex_lock(&trace_types_lock);
- if (tc->cpu == RING_BUFFER_ALL_CPUS) {
+ if (cpu == RING_BUFFER_ALL_CPUS) {
int cpu, buf_size_same;
unsigned long size;
@@ -4401,7 +4388,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
} else
r = sprintf(buf, "X\n");
} else
- r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
+ r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
mutex_unlock(&trace_types_lock);
@@ -4413,7 +4400,8 @@ static ssize_t
tracing_entries_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- struct trace_cpu *tc = filp->private_data;
+ struct inode *inode = file_inode(filp);
+ struct trace_array *tr = inode->i_private;
unsigned long val;
int ret;
@@ -4427,8 +4415,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
/* value is in KB */
val <<= 10;
-
- ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
+ ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
if (ret < 0)
return ret;
@@ -4482,7 +4469,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
/* disable tracing ? */
if (trace_flags & TRACE_ITER_STOP_ON_FREE)
- tracing_off();
+ tracer_tracing_off(tr);
/* resize the ring buffer to 0 */
tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
@@ -4647,12 +4634,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
* New clock may not be consistent with the previous clock.
* Reset the buffer so that it doesn't have incomparable timestamps.
*/
- tracing_reset_online_cpus(&global_trace.trace_buffer);
+ tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
- tracing_reset_online_cpus(&global_trace.max_buffer);
+ tracing_reset_online_cpus(&tr->max_buffer);
#endif
mutex_unlock(&trace_types_lock);
@@ -4689,8 +4676,7 @@ struct ftrace_buffer_info {
#ifdef CONFIG_TRACER_SNAPSHOT
static int tracing_snapshot_open(struct inode *inode, struct file *file)
{
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
+ struct trace_array *tr = inode->i_private;
struct trace_iterator *iter;
struct seq_file *m;
int ret = 0;
@@ -4699,26 +4685,29 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
return -ENODEV;
if (file->f_mode & FMODE_READ) {
- iter = __tracing_open(tr, tc, inode, file, true);
+ iter = __tracing_open(inode, file, true);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
} else {
/* Writes still need the seq_file to hold the private data */
+ ret = -ENOMEM;
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (!m)
- return -ENOMEM;
+ goto out;
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter) {
kfree(m);
- return -ENOMEM;
+ goto out;
}
+ ret = 0;
+
iter->tr = tr;
- iter->trace_buffer = &tc->tr->max_buffer;
- iter->cpu_file = tc->cpu;
+ iter->trace_buffer = &tr->max_buffer;
+ iter->cpu_file = tracing_get_cpu(inode);
m->private = iter;
file->private_data = m;
}
-
+out:
if (ret < 0)
trace_array_put(tr);
@@ -4873,11 +4862,11 @@ static const struct file_operations tracing_pipe_fops = {
};
static const struct file_operations tracing_entries_fops = {
- .open = tracing_open_generic_tc,
+ .open = tracing_open_generic_tr,
.read = tracing_entries_read,
.write = tracing_entries_write,
.llseek = generic_file_llseek,
- .release = tracing_release_generic_tc,
+ .release = tracing_release_generic_tr,
};
static const struct file_operations tracing_total_entries_fops = {
@@ -4929,8 +4918,7 @@ static const struct file_operations snapshot_raw_fops = {
static int tracing_buffers_open(struct inode *inode, struct file *filp)
{
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
+ struct trace_array *tr = inode->i_private;
struct ftrace_buffer_info *info;
int ret;
@@ -4948,10 +4936,8 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
mutex_lock(&trace_types_lock);
- tr->ref++;
-
info->iter.tr = tr;
- info->iter.cpu_file = tc->cpu;
+ info->iter.cpu_file = tracing_get_cpu(inode);
info->iter.trace = tr->current_trace;
info->iter.trace_buffer = &tr->trace_buffer;
info->spare = NULL;
@@ -5268,14 +5254,14 @@ static ssize_t
tracing_stats_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
- struct trace_cpu *tc = filp->private_data;
- struct trace_array *tr = tc->tr;
+ struct inode *inode = file_inode(filp);
+ struct trace_array *tr = inode->i_private;
struct trace_buffer *trace_buf = &tr->trace_buffer;
+ int cpu = tracing_get_cpu(inode);
struct trace_seq *s;
unsigned long cnt;
unsigned long long t;
unsigned long usec_rem;
- int cpu = tc->cpu;
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s)
@@ -5328,9 +5314,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
}
static const struct file_operations tracing_stats_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = tracing_stats_read,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
};
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -5519,10 +5506,20 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
return tr->percpu_dir;
}
+static struct dentry *
+trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
+ void *data, long cpu, const struct file_operations *fops)
+{
+ struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
+
+ if (ret) /* See tracing_get_cpu() */
+ ret->d_inode->i_cdev = (void *)(cpu + 1);
+ return ret;
+}
+
static void
tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
{
- struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
struct dentry *d_cpu;
char cpu_dir[30]; /* 30 characters should be more than enough */
@@ -5538,28 +5535,28 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
}
/* per cpu trace_pipe */
- trace_create_file("trace_pipe", 0444, d_cpu,
- (void *)&data->trace_cpu, &tracing_pipe_fops);
+ trace_create_cpu_file("trace_pipe", 0444, d_cpu,
+ tr, cpu, &tracing_pipe_fops);
/* per cpu trace */
- trace_create_file("trace", 0644, d_cpu,
- (void *)&data->trace_cpu, &tracing_fops);
+ trace_create_cpu_file("trace", 0644, d_cpu,
+ tr, cpu, &tracing_fops);
- trace_create_file("trace_pipe_raw", 0444, d_cpu,
- (void *)&data->trace_cpu, &tracing_buffers_fops);
+ trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
+ tr, cpu, &tracing_buffers_fops);
- trace_create_file("stats", 0444, d_cpu,
- (void *)&data->trace_cpu, &tracing_stats_fops);
+ trace_create_cpu_file("stats", 0444, d_cpu,
+ tr, cpu, &tracing_stats_fops);
- trace_create_file("buffer_size_kb", 0444, d_cpu,
- (void *)&data->trace_cpu, &tracing_entries_fops);
+ trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
+ tr, cpu, &tracing_entries_fops);
#ifdef CONFIG_TRACER_SNAPSHOT
- trace_create_file("snapshot", 0644, d_cpu,
- (void *)&data->trace_cpu, &snapshot_fops);
+ trace_create_cpu_file("snapshot", 0644, d_cpu,
+ tr, cpu, &snapshot_fops);
- trace_create_file("snapshot_raw", 0444, d_cpu,
- (void *)&data->trace_cpu, &snapshot_raw_fops);
+ trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
+ tr, cpu, &snapshot_raw_fops);
#endif
}
@@ -5868,17 +5865,6 @@ struct dentry *trace_instance_dir;
static void
init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
-static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
-{
- int cpu;
-
- for_each_tracing_cpu(cpu) {
- memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
- per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
- per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
- }
-}
-
static int
allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
{
@@ -5896,8 +5882,6 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
return -ENOMEM;
}
- init_trace_buffers(tr, buf);
-
/* Allocate the first page for all buffers */
set_buffer_entries(&tr->trace_buffer,
ring_buffer_size(tr->trace_buffer.buffer, 0));
@@ -5964,17 +5948,15 @@ static int new_instance_create(const char *name)
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
goto out_free_tr;
- /* Holder for file callbacks */
- tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
- tr->trace_cpu.tr = tr;
-
tr->dir = debugfs_create_dir(name, trace_instance_dir);
if (!tr->dir)
goto out_free_tr;
ret = event_trace_add_tracer(tr->dir, tr);
- if (ret)
+ if (ret) {
+ debugfs_remove_recursive(tr->dir);
goto out_free_tr;
+ }
init_tracer_debugfs(tr, tr->dir);
@@ -6120,13 +6102,13 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
tr, &tracing_iter_fops);
trace_create_file("trace", 0644, d_tracer,
- (void *)&tr->trace_cpu, &tracing_fops);
+ tr, &tracing_fops);
trace_create_file("trace_pipe", 0444, d_tracer,
- (void *)&tr->trace_cpu, &tracing_pipe_fops);
+ tr, &tracing_pipe_fops);
trace_create_file("buffer_size_kb", 0644, d_tracer,
- (void *)&tr->trace_cpu, &tracing_entries_fops);
+ tr, &tracing_entries_fops);
trace_create_file("buffer_total_size_kb", 0444, d_tracer,
tr, &tracing_total_entries_fops);
@@ -6141,11 +6123,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
&trace_clock_fops);
trace_create_file("tracing_on", 0644, d_tracer,
- tr, &rb_simple_fops);
+ tr, &rb_simple_fops);
#ifdef CONFIG_TRACER_SNAPSHOT
trace_create_file("snapshot", 0644, d_tracer,
- (void *)&tr->trace_cpu, &snapshot_fops);
+ tr, &snapshot_fops);
#endif
for_each_tracing_cpu(cpu)
@@ -6439,10 +6421,6 @@ __init static int tracer_alloc_buffers(void)
global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
- /* Holder for file callbacks */
- global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
- global_trace.trace_cpu.tr = &global_trace;
-
INIT_LIST_HEAD(&global_trace.systems);
INIT_LIST_HEAD(&global_trace.events);
list_add(&global_trace.list, &ftrace_trace_arrays);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 4a4f6e1828b6..afaae41b0a02 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -130,19 +130,12 @@ enum trace_flag_type {
struct trace_array;
-struct trace_cpu {
- struct trace_array *tr;
- struct dentry *dir;
- int cpu;
-};
-
/*
* The CPU trace array - it consists of thousands of trace entries
* plus some other descriptor data: (for example which task started
* the trace, etc.)
*/
struct trace_array_cpu {
- struct trace_cpu trace_cpu;
atomic_t disabled;
void *buffer_page; /* ring buffer spare */
@@ -196,7 +189,6 @@ struct trace_array {
bool allocated_snapshot;
#endif
int buffer_disabled;
- struct trace_cpu trace_cpu; /* place holder */
#ifdef CONFIG_FTRACE_SYSCALLS
int sys_refcount_enter;
int sys_refcount_exit;
@@ -214,7 +206,6 @@ struct trace_array {
struct dentry *event_dir;
struct list_head systems;
struct list_head events;
- struct task_struct *waiter;
int ref;
};
@@ -680,6 +671,15 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,
struct trace_array *tr);
extern int trace_selftest_startup_branch(struct tracer *trace,
struct trace_array *tr);
+/*
+ * Tracer data references selftest functions that only occur
+ * on boot up. These can be __init functions. Thus, when selftests
+ * are enabled, then the tracers need to reference __init functions.
+ */
+#define __tracer_data __refdata
+#else
+/* Tracers are seldom changed. Optimize when selftests are disabled. */
+#define __tracer_data __read_mostly
#endif /* CONFIG_FTRACE_STARTUP_TEST */
extern void *head_page(struct trace_array_cpu *data);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 84b1e045faba..80c36bcf66e8 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -236,6 +236,10 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
+ if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
+ "perf buffer not large enough"))
+ return NULL;
+
pc = preempt_count();
*rctxp = perf_swevent_get_recursion_context();
@@ -266,6 +270,10 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
struct pt_regs regs;
int rctx;
+ head = this_cpu_ptr(event_function.perf_events);
+ if (hlist_empty(head))
+ return;
+
#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
sizeof(u64)) - sizeof(u32))
@@ -279,8 +287,6 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
entry->ip = ip;
entry->parent_ip = parent_ip;
-
- head = this_cpu_ptr(event_function.perf_events);
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
1, &regs, head, NULL);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 7d854290bf81..29a7ebcfb426 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -409,33 +409,42 @@ static void put_system(struct ftrace_subsystem_dir *dir)
mutex_unlock(&event_mutex);
}
-/*
- * Open and update trace_array ref count.
- * Must have the current trace_array passed to it.
- */
-static int tracing_open_generic_file(struct inode *inode, struct file *filp)
+static void remove_subsystem(struct ftrace_subsystem_dir *dir)
{
- struct ftrace_event_file *file = inode->i_private;
- struct trace_array *tr = file->tr;
- int ret;
+ if (!dir)
+ return;
- if (trace_array_get(tr) < 0)
- return -ENODEV;
+ if (!--dir->nr_events) {
+ debugfs_remove_recursive(dir->entry);
+ list_del(&dir->list);
+ __put_system_dir(dir);
+ }
+}
- ret = tracing_open_generic(inode, filp);
- if (ret < 0)
- trace_array_put(tr);
- return ret;
+static void *event_file_data(struct file *filp)
+{
+ return ACCESS_ONCE(file_inode(filp)->i_private);
}
-static int tracing_release_generic_file(struct inode *inode, struct file *filp)
+static void remove_event_file_dir(struct ftrace_event_file *file)
{
- struct ftrace_event_file *file = inode->i_private;
- struct trace_array *tr = file->tr;
+ struct dentry *dir = file->dir;
+ struct dentry *child;
- trace_array_put(tr);
+ if (dir) {
+ spin_lock(&dir->d_lock); /* probably unneeded */
+ list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
+ if (child->d_inode) /* probably unneeded */
+ child->d_inode->i_private = NULL;
+ }
+ spin_unlock(&dir->d_lock);
- return 0;
+ debugfs_remove_recursive(dir);
+ }
+
+ list_del(&file->list);
+ remove_subsystem(file->system);
+ kmem_cache_free(file_cachep, file);
}
/*
@@ -679,15 +688,25 @@ static ssize_t
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_file *file = filp->private_data;
+ struct ftrace_event_file *file;
+ unsigned long flags;
char buf[4] = "0";
- if (file->flags & FTRACE_EVENT_FL_ENABLED &&
- !(file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
+ mutex_lock(&event_mutex);
+ file = event_file_data(filp);
+ if (likely(file))
+ flags = file->flags;
+ mutex_unlock(&event_mutex);
+
+ if (!file)
+ return -ENODEV;
+
+ if (flags & FTRACE_EVENT_FL_ENABLED &&
+ !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
strcpy(buf, "1");
- if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
- file->flags & FTRACE_EVENT_FL_SOFT_MODE)
+ if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
+ flags & FTRACE_EVENT_FL_SOFT_MODE)
strcat(buf, "*");
strcat(buf, "\n");
@@ -699,13 +718,10 @@ static ssize_t
event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_file *file = filp->private_data;
+ struct ftrace_event_file *file;
unsigned long val;
int ret;
- if (!file)
- return -EINVAL;
-
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
@@ -717,8 +733,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
switch (val) {
case 0:
case 1:
+ ret = -ENODEV;
mutex_lock(&event_mutex);
- ret = ftrace_event_enable_disable(file, val);
+ file = event_file_data(filp);
+ if (likely(file))
+ ret = ftrace_event_enable_disable(file, val);
mutex_unlock(&event_mutex);
break;
@@ -825,65 +844,39 @@ enum {
static void *f_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct ftrace_event_call *call = m->private;
- struct ftrace_event_field *field;
+ struct ftrace_event_call *call = event_file_data(m->private);
struct list_head *common_head = &ftrace_common_fields;
struct list_head *head = trace_get_fields(call);
+ struct list_head *node = v;
(*pos)++;
switch ((unsigned long)v) {
case FORMAT_HEADER:
- if (unlikely(list_empty(common_head)))
- return NULL;
-
- field = list_entry(common_head->prev,
- struct ftrace_event_field, link);
- return field;
+ node = common_head;
+ break;
case FORMAT_FIELD_SEPERATOR:
- if (unlikely(list_empty(head)))
- return NULL;
-
- field = list_entry(head->prev, struct ftrace_event_field, link);
- return field;
+ node = head;
+ break;
case FORMAT_PRINTFMT:
/* all done */
return NULL;
}
- field = v;
- if (field->link.prev == common_head)
+ node = node->prev;
+ if (node == common_head)
return (void *)FORMAT_FIELD_SEPERATOR;
- else if (field->link.prev == head)
+ else if (node == head)
return (void *)FORMAT_PRINTFMT;
-
- field = list_entry(field->link.prev, struct ftrace_event_field, link);
-
- return field;
-}
-
-static void *f_start(struct seq_file *m, loff_t *pos)
-{
- loff_t l = 0;
- void *p;
-
- /* Start by showing the header */
- if (!*pos)
- return (void *)FORMAT_HEADER;
-
- p = (void *)FORMAT_HEADER;
- do {
- p = f_next(m, p, &l);
- } while (p && l < *pos);
-
- return p;
+ else
+ return node;
}
static int f_show(struct seq_file *m, void *v)
{
- struct ftrace_event_call *call = m->private;
+ struct ftrace_event_call *call = event_file_data(m->private);
struct ftrace_event_field *field;
const char *array_descriptor;
@@ -904,8 +897,7 @@ static int f_show(struct seq_file *m, void *v)
return 0;
}
- field = v;
-
+ field = list_entry(v, struct ftrace_event_field, link);
/*
* Smartly shows the array type(except dynamic array).
* Normal:
@@ -932,8 +924,25 @@ static int f_show(struct seq_file *m, void *v)
return 0;
}
+static void *f_start(struct seq_file *m, loff_t *pos)
+{
+ void *p = (void *)FORMAT_HEADER;
+ loff_t l = 0;
+
+ /* ->stop() is called even if ->start() fails */
+ mutex_lock(&event_mutex);
+ if (!event_file_data(m->private))
+ return ERR_PTR(-ENODEV);
+
+ while (l < *pos && p)
+ p = f_next(m, p, &l);
+
+ return p;
+}
+
static void f_stop(struct seq_file *m, void *p)
{
+ mutex_unlock(&event_mutex);
}
static const struct seq_operations trace_format_seq_ops = {
@@ -945,7 +954,6 @@ static const struct seq_operations trace_format_seq_ops = {
static int trace_format_open(struct inode *inode, struct file *file)
{
- struct ftrace_event_call *call = inode->i_private;
struct seq_file *m;
int ret;
@@ -954,7 +962,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
return ret;
m = file->private_data;
- m->private = call;
+ m->private = file;
return 0;
}
@@ -962,45 +970,47 @@ static int trace_format_open(struct inode *inode, struct file *file)
static ssize_t
event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
- struct ftrace_event_call *call = filp->private_data;
- struct trace_seq *s;
- int r;
+ int id = (long)event_file_data(filp);
+ char buf[32];
+ int len;
if (*ppos)
return 0;
- s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (!s)
- return -ENOMEM;
+ if (unlikely(!id))
+ return -ENODEV;
- trace_seq_init(s);
- trace_seq_printf(s, "%d\n", call->event.type);
+ len = sprintf(buf, "%d\n", id);
- r = simple_read_from_buffer(ubuf, cnt, ppos,
- s->buffer, s->len);
- kfree(s);
- return r;
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
}
static ssize_t
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_call *call = filp->private_data;
+ struct ftrace_event_call *call;
struct trace_seq *s;
- int r;
+ int r = -ENODEV;
if (*ppos)
return 0;
s = kmalloc(sizeof(*s), GFP_KERNEL);
+
if (!s)
return -ENOMEM;
trace_seq_init(s);
- print_event_filter(call, s);
- r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
+ mutex_lock(&event_mutex);
+ call = event_file_data(filp);
+ if (call)
+ print_event_filter(call, s);
+ mutex_unlock(&event_mutex);
+
+ if (call)
+ r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
kfree(s);
@@ -1011,9 +1021,9 @@ static ssize_t
event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_call *call = filp->private_data;
+ struct ftrace_event_call *call;
char *buf;
- int err;
+ int err = -ENODEV;
if (cnt >= PAGE_SIZE)
return -EINVAL;
@@ -1028,7 +1038,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
}
buf[cnt] = '\0';
- err = apply_event_filter(call, buf);
+ mutex_lock(&event_mutex);
+ call = event_file_data(filp);
+ if (call)
+ err = apply_event_filter(call, buf);
+ mutex_unlock(&event_mutex);
+
free_page((unsigned long) buf);
if (err < 0)
return err;
@@ -1218,6 +1233,7 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
static int ftrace_event_avail_open(struct inode *inode, struct file *file);
static int ftrace_event_set_open(struct inode *inode, struct file *file);
+static int ftrace_event_release(struct inode *inode, struct file *file);
static const struct seq_operations show_event_seq_ops = {
.start = t_start,
@@ -1245,14 +1261,13 @@ static const struct file_operations ftrace_set_event_fops = {
.read = seq_read,
.write = ftrace_event_write,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = ftrace_event_release,
};
static const struct file_operations ftrace_enable_fops = {
- .open = tracing_open_generic_file,
+ .open = tracing_open_generic,
.read = event_enable_read,
.write = event_enable_write,
- .release = tracing_release_generic_file,
.llseek = default_llseek,
};
@@ -1264,7 +1279,6 @@ static const struct file_operations ftrace_event_format_fops = {
};
static const struct file_operations ftrace_event_id_fops = {
- .open = tracing_open_generic,
.read = event_id_read,
.llseek = default_llseek,
};
@@ -1323,6 +1337,15 @@ ftrace_event_open(struct inode *inode, struct file *file,
return ret;
}
+static int ftrace_event_release(struct inode *inode, struct file *file)
+{
+ struct trace_array *tr = inode->i_private;
+
+ trace_array_put(tr);
+
+ return seq_release(inode, file);
+}
+
static int
ftrace_event_avail_open(struct inode *inode, struct file *file)
{
@@ -1336,12 +1359,19 @@ ftrace_event_set_open(struct inode *inode, struct file *file)
{
const struct seq_operations *seq_ops = &show_set_event_seq_ops;
struct trace_array *tr = inode->i_private;
+ int ret;
+
+ if (trace_array_get(tr) < 0)
+ return -ENODEV;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
ftrace_clear_events(tr);
- return ftrace_event_open(inode, file, seq_ops);
+ ret = ftrace_event_open(inode, file, seq_ops);
+ if (ret < 0)
+ trace_array_put(tr);
+ return ret;
}
static struct event_subsystem *
@@ -1496,8 +1526,8 @@ event_create_dir(struct dentry *parent,
#ifdef CONFIG_PERF_EVENTS
if (call->event.type && call->class->reg)
- trace_create_file("id", 0444, file->dir, call,
- id);
+ trace_create_file("id", 0444, file->dir,
+ (void *)(long)call->event.type, id);
#endif
/*
@@ -1522,33 +1552,16 @@ event_create_dir(struct dentry *parent,
return 0;
}
-static void remove_subsystem(struct ftrace_subsystem_dir *dir)
-{
- if (!dir)
- return;
-
- if (!--dir->nr_events) {
- debugfs_remove_recursive(dir->entry);
- list_del(&dir->list);
- __put_system_dir(dir);
- }
-}
-
static void remove_event_from_tracers(struct ftrace_event_call *call)
{
struct ftrace_event_file *file;
struct trace_array *tr;
do_for_each_event_file_safe(tr, file) {
-
if (file->event_call != call)
continue;
- list_del(&file->list);
- debugfs_remove_recursive(file->dir);
- remove_subsystem(file->system);
- kmem_cache_free(file_cachep, file);
-
+ remove_event_file_dir(file);
/*
* The do_for_each_event_file_safe() is
* a double loop. After finding the call for this
@@ -1700,16 +1713,53 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
destroy_preds(call);
}
+static int probe_remove_event_call(struct ftrace_event_call *call)
+{
+ struct trace_array *tr;
+ struct ftrace_event_file *file;
+
+#ifdef CONFIG_PERF_EVENTS
+ if (call->perf_refcount)
+ return -EBUSY;
+#endif
+ do_for_each_event_file(tr, file) {
+ if (file->event_call != call)
+ continue;
+ /*
+ * We can't rely on ftrace_event_enable_disable(enable => 0)
+ * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
+ * TRACE_REG_UNREGISTER.
+ */
+ if (file->flags & FTRACE_EVENT_FL_ENABLED)
+ return -EBUSY;
+ /*
+ * The do_for_each_event_file_safe() is
+ * a double loop. After finding the call for this
+ * trace_array, we use break to jump to the next
+ * trace_array.
+ */
+ break;
+ } while_for_each_event_file();
+
+ __trace_remove_event_call(call);
+
+ return 0;
+}
+
/* Remove an event_call */
-void trace_remove_event_call(struct ftrace_event_call *call)
+int trace_remove_event_call(struct ftrace_event_call *call)
{
+ int ret;
+
mutex_lock(&trace_types_lock);
mutex_lock(&event_mutex);
down_write(&trace_event_sem);
- __trace_remove_event_call(call);
+ ret = probe_remove_event_call(call);
up_write(&trace_event_sem);
mutex_unlock(&event_mutex);
mutex_unlock(&trace_types_lock);
+
+ return ret;
}
#define for_each_event(event, start, end) \
@@ -2278,12 +2328,8 @@ __trace_remove_event_dirs(struct trace_array *tr)
{
struct ftrace_event_file *file, *next;
- list_for_each_entry_safe(file, next, &tr->events, list) {
- list_del(&file->list);
- debugfs_remove_recursive(file->dir);
- remove_subsystem(file->system);
- kmem_cache_free(file_cachep, file);
- }
+ list_for_each_entry_safe(file, next, &tr->events, list)
+ remove_event_file_dir(file);
}
static void
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0d883dc057d6..97daa8cf958d 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -637,17 +637,15 @@ static void append_filter_err(struct filter_parse_state *ps,
free_page((unsigned long) buf);
}
+/* caller must hold event_mutex */
void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
{
- struct event_filter *filter;
+ struct event_filter *filter = call->filter;
- mutex_lock(&event_mutex);
- filter = call->filter;
if (filter && filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string);
else
- trace_seq_printf(s, "none\n");
- mutex_unlock(&event_mutex);
+ trace_seq_puts(s, "none\n");
}
void print_subsystem_event_filter(struct event_subsystem *system,
@@ -660,7 +658,7 @@ void print_subsystem_event_filter(struct event_subsystem *system,
if (filter && filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string);
else
- trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
+ trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
mutex_unlock(&event_mutex);
}
@@ -1841,23 +1839,22 @@ static int create_system_filter(struct event_subsystem *system,
return err;
}
+/* caller must hold event_mutex */
int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
{
struct event_filter *filter;
- int err = 0;
-
- mutex_lock(&event_mutex);
+ int err;
if (!strcmp(strstrip(filter_string), "0")) {
filter_disable(call);
filter = call->filter;
if (!filter)
- goto out_unlock;
+ return 0;
RCU_INIT_POINTER(call->filter, NULL);
/* Make sure the filter is not being used */
synchronize_sched();
__free_filter(filter);
- goto out_unlock;
+ return 0;
}
err = create_filter(call, filter_string, true, &filter);
@@ -1884,8 +1881,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
__free_filter(tmp);
}
}
-out_unlock:
- mutex_unlock(&event_mutex);
return err;
}
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index b863f93b30f3..38fe1483c508 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -199,7 +199,7 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
return 0;
}
-static struct tracer function_trace __read_mostly =
+static struct tracer function_trace __tracer_data =
{
.name = "function",
.init = function_trace_init,
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 8388bc99f2ee..b5c09242683d 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -446,7 +446,7 @@ print_graph_proc(struct trace_seq *s, pid_t pid)
/* First spaces to align center */
for (i = 0; i < spaces / 2; i++) {
- ret = trace_seq_printf(s, " ");
+ ret = trace_seq_putc(s, ' ');
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
@@ -457,7 +457,7 @@ print_graph_proc(struct trace_seq *s, pid_t pid)
/* Last spaces to align center */
for (i = 0; i < spaces - (spaces / 2); i++) {
- ret = trace_seq_printf(s, " ");
+ ret = trace_seq_putc(s, ' ');
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
@@ -503,7 +503,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
------------------------------------------
*/
- ret = trace_seq_printf(s,
+ ret = trace_seq_puts(s,
" ------------------------------------------\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -516,7 +516,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
- ret = trace_seq_printf(s, " => ");
+ ret = trace_seq_puts(s, " => ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -524,7 +524,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
- ret = trace_seq_printf(s,
+ ret = trace_seq_puts(s,
"\n ------------------------------------------\n\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -645,7 +645,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
ret = print_graph_proc(s, pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
- ret = trace_seq_printf(s, " | ");
+ ret = trace_seq_puts(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
@@ -657,9 +657,9 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
return ret;
if (type == TRACE_GRAPH_ENT)
- ret = trace_seq_printf(s, "==========>");
+ ret = trace_seq_puts(s, "==========>");
else
- ret = trace_seq_printf(s, "<==========");
+ ret = trace_seq_puts(s, "<==========");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -668,7 +668,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
if (ret != TRACE_TYPE_HANDLED)
return ret;
- ret = trace_seq_printf(s, "\n");
+ ret = trace_seq_putc(s, '\n');
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -705,13 +705,13 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
len += strlen(nsecs_str);
}
- ret = trace_seq_printf(s, " us ");
+ ret = trace_seq_puts(s, " us ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* Print remaining spaces to fit the row's width */
for (i = len; i < 7; i++) {
- ret = trace_seq_printf(s, " ");
+ ret = trace_seq_putc(s, ' ');
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
@@ -731,13 +731,13 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,
/* No real adata, just filling the column with spaces */
switch (duration) {
case DURATION_FILL_FULL:
- ret = trace_seq_printf(s, " | ");
+ ret = trace_seq_puts(s, " | ");
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
case DURATION_FILL_START:
- ret = trace_seq_printf(s, " ");
+ ret = trace_seq_puts(s, " ");
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
case DURATION_FILL_END:
- ret = trace_seq_printf(s, " |");
+ ret = trace_seq_puts(s, " |");
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
}
@@ -745,10 +745,10 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,
if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
/* Duration exceeded 100 msecs */
if (duration > 100000ULL)
- ret = trace_seq_printf(s, "! ");
+ ret = trace_seq_puts(s, "! ");
/* Duration exceeded 10 msecs */
else if (duration > 10000ULL)
- ret = trace_seq_printf(s, "+ ");
+ ret = trace_seq_puts(s, "+ ");
}
/*
@@ -757,7 +757,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,
* to fill out the space.
*/
if (ret == -1)
- ret = trace_seq_printf(s, " ");
+ ret = trace_seq_puts(s, " ");
/* Catching here any failure happenned above */
if (!ret)
@@ -767,7 +767,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,
if (ret != TRACE_TYPE_HANDLED)
return ret;
- ret = trace_seq_printf(s, "| ");
+ ret = trace_seq_puts(s, "| ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -817,7 +817,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
/* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
- ret = trace_seq_printf(s, " ");
+ ret = trace_seq_putc(s, ' ');
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
@@ -858,7 +858,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
/* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
- ret = trace_seq_printf(s, " ");
+ ret = trace_seq_putc(s, ' ');
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
@@ -917,7 +917,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
- ret = trace_seq_printf(s, " | ");
+ ret = trace_seq_puts(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
@@ -1117,7 +1117,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
/* Closing brace */
for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
- ret = trace_seq_printf(s, " ");
+ ret = trace_seq_putc(s, ' ');
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
@@ -1129,7 +1129,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
* belongs to, write out the function name.
*/
if (func_match) {
- ret = trace_seq_printf(s, "}\n");
+ ret = trace_seq_puts(s, "}\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
} else {
@@ -1179,13 +1179,13 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
/* Indentation */
if (depth > 0)
for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
- ret = trace_seq_printf(s, " ");
+ ret = trace_seq_putc(s, ' ');
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* The comment */
- ret = trace_seq_printf(s, "/* ");
+ ret = trace_seq_puts(s, "/* ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -1216,7 +1216,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
s->len--;
}
- ret = trace_seq_printf(s, " */\n");
+ ret = trace_seq_puts(s, " */\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -1448,7 +1448,7 @@ static struct trace_event graph_trace_ret_event = {
.funcs = &graph_functions
};
-static struct tracer graph_trace __read_mostly = {
+static struct tracer graph_trace __tracer_data = {
.name = "function_graph",
.open = graph_trace_open,
.pipe_open = graph_trace_open,
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 7ed6976493c8..243f6834d026 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -95,7 +95,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
}
static int register_probe_event(struct trace_probe *tp);
-static void unregister_probe_event(struct trace_probe *tp);
+static int unregister_probe_event(struct trace_probe *tp);
static DEFINE_MUTEX(probe_lock);
static LIST_HEAD(probe_list);
@@ -243,11 +243,11 @@ find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
static int
disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
{
+ struct event_file_link *link = NULL;
+ int wait = 0;
int ret = 0;
if (file) {
- struct event_file_link *link;
-
link = find_event_file_link(tp, file);
if (!link) {
ret = -EINVAL;
@@ -255,10 +255,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
}
list_del_rcu(&link->list);
- /* synchronize with kprobe_trace_func/kretprobe_trace_func */
- synchronize_sched();
- kfree(link);
-
+ wait = 1;
if (!list_empty(&tp->files))
goto out;
@@ -271,8 +268,22 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
disable_kretprobe(&tp->rp);
else
disable_kprobe(&tp->rp.kp);
+ wait = 1;
}
out:
+ if (wait) {
+ /*
+ * Synchronize with kprobe_trace_func/kretprobe_trace_func
+ * to ensure disabled (all running handlers are finished).
+ * This is not only for kfree(), but also the caller,
+ * trace_remove_event_call() supposes it for releasing
+ * event_call related objects, which will be accessed in
+ * the kprobe_trace_func/kretprobe_trace_func.
+ */
+ synchronize_sched();
+ kfree(link); /* Ignored if link == NULL */
+ }
+
return ret;
}
@@ -340,9 +351,12 @@ static int unregister_trace_probe(struct trace_probe *tp)
if (trace_probe_is_enabled(tp))
return -EBUSY;
+ /* Will fail if probe is being used by ftrace or perf */
+ if (unregister_probe_event(tp))
+ return -EBUSY;
+
__unregister_trace_probe(tp);
list_del(&tp->list);
- unregister_probe_event(tp);
return 0;
}
@@ -621,7 +635,9 @@ static int release_all_trace_probes(void)
/* TODO: Use batch unregistration */
while (!list_empty(&probe_list)) {
tp = list_entry(probe_list.next, struct trace_probe, list);
- unregister_trace_probe(tp);
+ ret = unregister_trace_probe(tp);
+ if (ret)
+ goto end;
free_trace_probe(tp);
}
@@ -1087,9 +1103,6 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
__size = sizeof(*entry) + tp->size + dsize;
size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
- "profile buffer not large enough"))
- return;
entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
if (!entry)
@@ -1120,9 +1133,6 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
__size = sizeof(*entry) + tp->size + dsize;
size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
- "profile buffer not large enough"))
- return;
entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
if (!entry)
@@ -1242,11 +1252,15 @@ static int register_probe_event(struct trace_probe *tp)
return ret;
}
-static void unregister_probe_event(struct trace_probe *tp)
+static int unregister_probe_event(struct trace_probe *tp)
{
+ int ret;
+
/* tp->event is unregistered in trace_remove_event_call() */
- trace_remove_event_call(&tp->call);
- kfree(tp->call.print_fmt);
+ ret = trace_remove_event_call(&tp->call);
+ if (!ret)
+ kfree(tp->call.print_fmt);
+ return ret;
}
/* Make a debugfs interface for controlling probe points */
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index a5e8f4878bfa..b3dcfb2f0fef 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -90,7 +90,7 @@ static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
if (drv)
ret += trace_seq_printf(s, " %s\n", drv->name);
else
- ret += trace_seq_printf(s, " \n");
+ ret += trace_seq_puts(s, " \n");
return ret;
}
@@ -107,7 +107,7 @@ static void mmio_pipe_open(struct trace_iterator *iter)
struct header_iter *hiter;
struct trace_seq *s = &iter->seq;
- trace_seq_printf(s, "VERSION 20070824\n");
+ trace_seq_puts(s, "VERSION 20070824\n");
hiter = kzalloc(sizeof(*hiter), GFP_KERNEL);
if (!hiter)
@@ -209,7 +209,7 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
(rw->value >> 0) & 0xff, rw->pc, 0);
break;
default:
- ret = trace_seq_printf(s, "rw what?\n");
+ ret = trace_seq_puts(s, "rw what?\n");
break;
}
if (ret)
@@ -245,7 +245,7 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter)
secs, usec_rem, m->map_id, 0UL, 0);
break;
default:
- ret = trace_seq_printf(s, "map what?\n");
+ ret = trace_seq_puts(s, "map what?\n");
break;
}
if (ret)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index bb922d9ee51b..34e7cbac0c9c 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -78,7 +78,7 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
trace_assign_type(field, entry);
- ret = trace_seq_printf(s, "%s", field->buf);
+ ret = trace_seq_puts(s, field->buf);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -558,14 +558,14 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
if (ret)
ret = trace_seq_puts(s, "??");
if (ret)
- ret = trace_seq_puts(s, "\n");
+ ret = trace_seq_putc(s, '\n');
continue;
}
if (!ret)
break;
if (ret)
ret = seq_print_user_ip(s, mm, ip, sym_flags);
- ret = trace_seq_puts(s, "\n");
+ ret = trace_seq_putc(s, '\n');
}
if (mm)
@@ -579,7 +579,7 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
int ret;
if (!ip)
- return trace_seq_printf(s, "0");
+ return trace_seq_putc(s, '0');
if (sym_flags & TRACE_ITER_SYM_OFFSET)
ret = seq_print_sym_offset(s, "%s", ip);
@@ -964,14 +964,14 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
goto partial;
if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
- if (!trace_seq_printf(s, " <-"))
+ if (!trace_seq_puts(s, " <-"))
goto partial;
if (!seq_print_ip_sym(s,
field->parent_ip,
flags))
goto partial;
}
- if (!trace_seq_printf(s, "\n"))
+ if (!trace_seq_putc(s, '\n'))
goto partial;
return TRACE_TYPE_HANDLED;
@@ -1210,7 +1210,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
if (!seq_print_ip_sym(s, *p, flags))
goto partial;
- if (!trace_seq_puts(s, "\n"))
+ if (!trace_seq_putc(s, '\n'))
goto partial;
}
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 322e16461072..8fd03657bc7d 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -175,7 +175,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags,
entry = syscall_nr_to_meta(syscall);
if (!entry) {
- trace_seq_printf(s, "\n");
+ trace_seq_putc(s, '\n');
return TRACE_TYPE_HANDLED;
}
@@ -566,15 +566,15 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
if (!sys_data)
return;
+ head = this_cpu_ptr(sys_data->enter_event->perf_events);
+ if (hlist_empty(head))
+ return;
+
/* get the size after alignment with the u32 buffer size field */
size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
size = ALIGN(size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
- "perf buffer not large enough"))
- return;
-
rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
sys_data->enter_event->event.type, regs, &rctx);
if (!rec)
@@ -583,8 +583,6 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
rec->nr = syscall_nr;
syscall_get_arguments(current, regs, 0, sys_data->nb_args,
(unsigned long *)&rec->args);
-
- head = this_cpu_ptr(sys_data->enter_event->perf_events);
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
@@ -642,18 +640,14 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
if (!sys_data)
return;
+ head = this_cpu_ptr(sys_data->exit_event->perf_events);
+ if (hlist_empty(head))
+ return;
+
/* We can probably do that at build time */
size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- /*
- * Impossible, but be paranoid with the future
- * How to put this check outside runtime?
- */
- if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
- "exit event has grown above perf buffer size"))
- return;
-
rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
sys_data->exit_event->event.type, regs, &rctx);
if (!rec)
@@ -661,8 +655,6 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
rec->nr = syscall_nr;
rec->ret = syscall_get_return_value(current, regs);
-
- head = this_cpu_ptr(sys_data->exit_event->perf_events);
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index d5d0cd368a56..272261b5f94f 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -70,7 +70,7 @@ struct trace_uprobe {
(sizeof(struct probe_arg) * (n)))
static int register_uprobe_event(struct trace_uprobe *tu);
-static void unregister_uprobe_event(struct trace_uprobe *tu);
+static int unregister_uprobe_event(struct trace_uprobe *tu);
static DEFINE_MUTEX(uprobe_lock);
static LIST_HEAD(uprobe_list);
@@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
}
/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
-static void unregister_trace_uprobe(struct trace_uprobe *tu)
+static int unregister_trace_uprobe(struct trace_uprobe *tu)
{
+ int ret;
+
+ ret = unregister_uprobe_event(tu);
+ if (ret)
+ return ret;
+
list_del(&tu->list);
- unregister_uprobe_event(tu);
free_trace_uprobe(tu);
+ return 0;
}
/* Register a trace_uprobe and probe_event */
@@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
/* register as an event */
old_tp = find_probe_event(tu->call.name, tu->call.class->system);
- if (old_tp)
+ if (old_tp) {
/* delete old event */
- unregister_trace_uprobe(old_tp);
+ ret = unregister_trace_uprobe(old_tp);
+ if (ret)
+ goto end;
+ }
ret = register_uprobe_event(tu);
if (ret) {
@@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv)
group = UPROBE_EVENT_SYSTEM;
if (is_delete) {
+ int ret;
+
if (!event) {
pr_info("Delete command needs an event name.\n");
return -EINVAL;
@@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv)
return -ENOENT;
}
/* delete an event */
- unregister_trace_uprobe(tu);
+ ret = unregister_trace_uprobe(tu);
mutex_unlock(&uprobe_lock);
- return 0;
+ return ret;
}
if (argc < 2) {
@@ -408,16 +419,20 @@ fail_address_parse:
return ret;
}
-static void cleanup_all_probes(void)
+static int cleanup_all_probes(void)
{
struct trace_uprobe *tu;
+ int ret = 0;
mutex_lock(&uprobe_lock);
while (!list_empty(&uprobe_list)) {
tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
- unregister_trace_uprobe(tu);
+ ret = unregister_trace_uprobe(tu);
+ if (ret)
+ break;
}
mutex_unlock(&uprobe_lock);
+ return ret;
}
/* Probes listing interfaces */
@@ -462,8 +477,13 @@ static const struct seq_operations probes_seq_op = {
static int probes_open(struct inode *inode, struct file *file)
{
- if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
- cleanup_all_probes();
+ int ret;
+
+ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+ ret = cleanup_all_probes();
+ if (ret)
+ return ret;
+ }
return seq_open(file, &probes_seq_op);
}
@@ -818,8 +838,6 @@ static void uprobe_perf_print(struct trace_uprobe *tu,
size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32);
- if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
- return;
preempt_disable();
head = this_cpu_ptr(call->perf_events);
@@ -970,12 +988,17 @@ static int register_uprobe_event(struct trace_uprobe *tu)
return ret;
}
-static void unregister_uprobe_event(struct trace_uprobe *tu)
+static int unregister_uprobe_event(struct trace_uprobe *tu)
{
+ int ret;
+
/* tu->event is unregistered in trace_remove_event_call() */
- trace_remove_event_call(&tu->call);
+ ret = trace_remove_event_call(&tu->call);
+ if (ret)
+ return ret;
kfree(tu->call.print_fmt);
tu->call.print_fmt = NULL;
+ return 0;
}
/* Make a trace interface for controling probe points */
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index d8c30db06c5b..9064b919a406 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -62,6 +62,9 @@ int create_user_ns(struct cred *new)
kgid_t group = new->egid;
int ret;
+ if (parent_ns->level > 32)
+ return -EUSERS;
+
/*
* Verify that we can not violate the policy of which files
* may be accessed that is specified by the root directory,
@@ -92,6 +95,7 @@ int create_user_ns(struct cred *new)
atomic_set(&ns->count, 1);
/* Leave the new->user_ns reference with the new user namespace. */
ns->parent = parent_ns;
+ ns->level = parent_ns->level + 1;
ns->owner = owner;
ns->group = group;
@@ -105,16 +109,21 @@ int create_user_ns(struct cred *new)
int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
{
struct cred *cred;
+ int err = -ENOMEM;
if (!(unshare_flags & CLONE_NEWUSER))
return 0;
cred = prepare_creds();
- if (!cred)
- return -ENOMEM;
+ if (cred) {
+ err = create_user_ns(cred);
+ if (err)
+ put_cred(cred);
+ else
+ *new_cred = cred;
+ }
- *new_cred = cred;
- return create_user_ns(cred);
+ return err;
}
void free_user_ns(struct user_namespace *ns)
diff --git a/kernel/wait.c b/kernel/wait.c
index ce0daa320a26..dec68bd4e9d8 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -333,7 +333,8 @@ int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
prepare_to_wait(wq, &q->wait, mode);
val = q->key.flags;
if (atomic_read(val) == 0)
- ret = (*action)(val);
+ break;
+ ret = (*action)(val);
} while (!ret && atomic_read(val) != 0);
finish_wait(wq, &q->wait);
return ret;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f02c4a4a0c3c..7f5d4be22034 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2817,6 +2817,19 @@ already_gone:
return false;
}
+static bool __flush_work(struct work_struct *work)
+{
+ struct wq_barrier barr;
+
+ if (start_flush_work(work, &barr)) {
+ wait_for_completion(&barr.done);
+ destroy_work_on_stack(&barr.work);
+ return true;
+ } else {
+ return false;
+ }
+}
+
/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
@@ -2830,18 +2843,10 @@ already_gone:
*/
bool flush_work(struct work_struct *work)
{
- struct wq_barrier barr;
-
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
- if (start_flush_work(work, &barr)) {
- wait_for_completion(&barr.done);
- destroy_work_on_stack(&barr.work);
- return true;
- } else {
- return false;
- }
+ return __flush_work(work);
}
EXPORT_SYMBOL_GPL(flush_work);
@@ -3411,6 +3416,12 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
{
to->nice = from->nice;
cpumask_copy(to->cpumask, from->cpumask);
+ /*
+ * Unlike hash and equality test, this function doesn't ignore
+ * ->no_numa as it is used for both pool and wq attrs. Instead,
+ * get_unbound_pool() explicitly clears ->no_numa after copying.
+ */
+ to->no_numa = from->no_numa;
}
/* hash value of the content of @attr */
@@ -3578,6 +3589,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
copy_workqueue_attrs(pool->attrs, attrs);
+ /*
+ * no_numa isn't a worker_pool attribute, always clear it. See
+ * 'struct workqueue_attrs' comments for detail.
+ */
+ pool->attrs->no_numa = false;
+
/* if cpumask is contained inside a NUMA node, we belong to that node */
if (wq_numa_enabled) {
for_each_node(node) {
@@ -4644,7 +4661,7 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
* Workqueues should be brought up before normal priority CPU notifiers.
* This will be registered high priority CPU notifier.
*/
-static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
+static int workqueue_cpu_up_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -4697,7 +4714,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
* Workqueues should be brought down after normal priority CPU notifiers.
* This will be registered as low priority CPU notifier.
*/
-static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
+static int workqueue_cpu_down_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -4756,7 +4773,14 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
schedule_work_on(cpu, &wfc.work);
- flush_work(&wfc.work);
+
+ /*
+ * The work item is on-stack and can't lead to deadlock through
+ * flushing. Use __flush_work() to avoid spurious lockdep warnings
+ * when work_on_cpu()s are nested.
+ */
+ __flush_work(&wfc.work);
+
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);