summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-12-13 21:55:51 +1030
committerRusty Russell <rusty@rustcorp.com.au>2008-12-13 21:55:51 +1030
commit968ea6d80e395cf11a51143cfa1b9a14ada676df (patch)
treedc2acec8c9bdced33afe1e273ee5e0b0b93d2703 /include
parent7be7585393d311866653564fbcd10a3232773c0b (diff)
parent8299608f140ae321e4eb5d1306184265d2b9511e (diff)
downloadlinux-968ea6d80e395cf11a51143cfa1b9a14ada676df.tar.gz
linux-968ea6d80e395cf11a51143cfa1b9a14ada676df.tar.bz2
linux-968ea6d80e395cf11a51143cfa1b9a14ada676df.zip
Merge ../linux-2.6-x86
Conflicts: arch/x86/kernel/io_apic.c kernel/sched.c kernel/sched_stats.h
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/vmlinux.lds.h21
-rw-r--r--include/asm-m32r/system.h2
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/blktrace_api.h172
-rw-r--r--include/linux/compiler.h84
-rw-r--r--include/linux/debug_locks.h2
-rw-r--r--include/linux/ftrace.h272
-rw-r--r--include/linux/ftrace_irq.h13
-rw-r--r--include/linux/futex.h2
-rw-r--r--include/linux/hardirq.h15
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/irq.h45
-rw-r--r--include/linux/irqnr.h26
-rw-r--r--include/linux/kernel.h11
-rw-r--r--include/linux/kernel_stat.h14
-rw-r--r--include/linux/lockdep.h31
-rw-r--r--include/linux/marker.h75
-rw-r--r--include/linux/msi.h3
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/pid.h4
-rw-r--r--include/linux/random.h51
-rw-r--r--include/linux/rcuclassic.h2
-rw-r--r--include/linux/rcupdate.h2
-rw-r--r--include/linux/ring_buffer.h16
-rw-r--r--include/linux/sched.h68
-rw-r--r--include/linux/seq_file.h1
-rw-r--r--include/linux/stacktrace.h8
-rw-r--r--include/linux/topology.h2
-rw-r--r--include/linux/tracepoint.h57
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/trace/block.h76
-rw-r--r--include/trace/boot.h60
-rw-r--r--include/trace/sched.h24
34 files changed, 889 insertions, 279 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 80744606bad1..eba835a2c2cd 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -45,6 +45,22 @@
#define MCOUNT_REC()
#endif
+#ifdef CONFIG_TRACE_BRANCH_PROFILING
+#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
+ *(_ftrace_annotated_branch) \
+ VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
+#else
+#define LIKELY_PROFILE()
+#endif
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
+ *(_ftrace_branch) \
+ VMLINUX_SYMBOL(__stop_branch_profile) = .;
+#else
+#define BRANCH_PROFILE()
+#endif
+
/* .data section */
#define DATA_DATA \
*(.data) \
@@ -60,9 +76,12 @@
VMLINUX_SYMBOL(__start___markers) = .; \
*(__markers) \
VMLINUX_SYMBOL(__stop___markers) = .; \
+ . = ALIGN(32); \
VMLINUX_SYMBOL(__start___tracepoints) = .; \
*(__tracepoints) \
- VMLINUX_SYMBOL(__stop___tracepoints) = .;
+ VMLINUX_SYMBOL(__stop___tracepoints) = .; \
+ LIKELY_PROFILE() \
+ BRANCH_PROFILE()
#define RO_DATA(align) \
. = ALIGN((align)); \
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 70a57c8c002b..c980f5ba8de7 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -23,7 +23,7 @@
*/
#if defined(CONFIG_FRAME_POINTER) || \
- !defined(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER)
+ !defined(CONFIG_SCHED_OMIT_FRAME_POINTER)
#define M32R_PUSH_FP " push fp\n"
#define M32R_POP_FP " pop fp\n"
#else
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index e531783e5d78..95ac82340c3b 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -313,6 +313,7 @@ unifdef-y += ptrace.h
unifdef-y += qnx4_fs.h
unifdef-y += quota.h
unifdef-y += random.h
+unifdef-y += irqnr.h
unifdef-y += reboot.h
unifdef-y += reiserfs_fs.h
unifdef-y += reiserfs_xattr.h
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index bdf505d33e77..1dba3493d520 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -160,7 +160,6 @@ struct blk_trace {
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
-extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
extern int do_blk_trace_setup(struct request_queue *q,
char *name, dev_t dev, struct blk_user_trace_setup *buts);
extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
@@ -186,168 +185,8 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
} while (0)
#define BLK_TN_MAX_MSG 128
-/**
- * blk_add_trace_rq - Add a trace for a request oriented action
- * @q: queue the io is for
- * @rq: the source request
- * @what: the action
- *
- * Description:
- * Records an action against a request. Will log the bio offset + size.
- *
- **/
-static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
- u32 what)
-{
- struct blk_trace *bt = q->blk_trace;
- int rw = rq->cmd_flags & 0x03;
-
- if (likely(!bt))
- return;
-
- if (blk_discard_rq(rq))
- rw |= (1 << BIO_RW_DISCARD);
-
- if (blk_pc_request(rq)) {
- what |= BLK_TC_ACT(BLK_TC_PC);
- __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
- } else {
- what |= BLK_TC_ACT(BLK_TC_FS);
- __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
- }
-}
-
-/**
- * blk_add_trace_bio - Add a trace for a bio oriented action
- * @q: queue the io is for
- * @bio: the source bio
- * @what: the action
- *
- * Description:
- * Records an action against a bio. Will log the bio offset + size.
- *
- **/
-static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
- u32 what)
-{
- struct blk_trace *bt = q->blk_trace;
-
- if (likely(!bt))
- return;
-
- __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
-}
-
-/**
- * blk_add_trace_generic - Add a trace for a generic action
- * @q: queue the io is for
- * @bio: the source bio
- * @rw: the data direction
- * @what: the action
- *
- * Description:
- * Records a simple trace
- *
- **/
-static inline void blk_add_trace_generic(struct request_queue *q,
- struct bio *bio, int rw, u32 what)
-{
- struct blk_trace *bt = q->blk_trace;
-
- if (likely(!bt))
- return;
-
- if (bio)
- blk_add_trace_bio(q, bio, what);
- else
- __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
-}
-
-/**
- * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload
- * @q: queue the io is for
- * @what: the action
- * @bio: the source bio
- * @pdu: the integer payload
- *
- * Description:
- * Adds a trace with some integer payload. This might be an unplug
- * option given as the action, with the depth at unplug time given
- * as the payload
- *
- **/
-static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
- struct bio *bio, unsigned int pdu)
-{
- struct blk_trace *bt = q->blk_trace;
- __be64 rpdu = cpu_to_be64(pdu);
-
- if (likely(!bt))
- return;
-
- if (bio)
- __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
- else
- __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
-}
-
-/**
- * blk_add_trace_remap - Add a trace for a remap operation
- * @q: queue the io is for
- * @bio: the source bio
- * @dev: target device
- * @from: source sector
- * @to: target sector
- *
- * Description:
- * Device mapper or raid target sometimes need to split a bio because
- * it spans a stripe (or similar). Add a trace for that action.
- *
- **/
-static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
- dev_t dev, sector_t from, sector_t to)
-{
- struct blk_trace *bt = q->blk_trace;
- struct blk_io_trace_remap r;
-
- if (likely(!bt))
- return;
-
- r.device = cpu_to_be32(dev);
- r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
- r.sector = cpu_to_be64(to);
-
- __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
-}
-
-/**
- * blk_add_driver_data - Add binary message with driver-specific data
- * @q: queue the io is for
- * @rq: io request
- * @data: driver-specific data
- * @len: length of driver-specific data
- *
- * Description:
- * Some drivers might want to write driver-specific data per request.
- *
- **/
-static inline void blk_add_driver_data(struct request_queue *q,
- struct request *rq,
- void *data, size_t len)
-{
- struct blk_trace *bt = q->blk_trace;
-
- if (likely(!bt))
- return;
-
- if (blk_pc_request(rq))
- __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
- rq->errors, len, data);
- else
- __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
- 0, BLK_TA_DRV_DATA, rq->errors, len, data);
-}
-
+extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
+ void *data, size_t len);
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
char __user *arg);
extern int blk_trace_startstop(struct request_queue *q, int start);
@@ -356,13 +195,8 @@ extern int blk_trace_remove(struct request_queue *q);
#else /* !CONFIG_BLK_DEV_IO_TRACE */
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
#define blk_trace_shutdown(q) do { } while (0)
-#define blk_add_trace_rq(q, rq, what) do { } while (0)
-#define blk_add_trace_bio(q, rq, what) do { } while (0)
-#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
-#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
-#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
-#define blk_add_driver_data(q, rq, data, len) do {} while (0)
#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY)
+#define blk_add_driver_data(q, rq, data, len) do {} while (0)
#define blk_trace_setup(q, name, dev, arg) (-ENOTTY)
#define blk_trace_startstop(q, start) (-ENOTTY)
#define blk_trace_remove(q) (-ENOTTY)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 98115d9d04da..ea7c6be354b7 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -59,8 +59,88 @@ extern void __chk_io_ptr(const volatile void __iomem *);
* specific implementations come from the above header files
*/
-#define likely(x) __builtin_expect(!!(x), 1)
-#define unlikely(x) __builtin_expect(!!(x), 0)
+struct ftrace_branch_data {
+ const char *func;
+ const char *file;
+ unsigned line;
+ union {
+ struct {
+ unsigned long correct;
+ unsigned long incorrect;
+ };
+ struct {
+ unsigned long miss;
+ unsigned long hit;
+ };
+ };
+};
+
+/*
+ * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
+ * to disable branch tracing on a per file basis.
+ */
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING)
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+
+#define likely_notrace(x) __builtin_expect(!!(x), 1)
+#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
+
+#define __branch_check__(x, expect) ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_annotated_branch"))) \
+ ______f = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ ______r = likely_notrace(x); \
+ ftrace_likely_update(&______f, ______r, expect); \
+ ______r; \
+ })
+
+/*
+ * Using __builtin_constant_p(x) to ignore cases where the return
+ * value is always the same. This idea is taken from a similar patch
+ * written by Daniel Walker.
+ */
+# ifndef likely
+# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
+# endif
+# ifndef unlikely
+# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
+# endif
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+/*
+ * "Define 'is'", Bill Clinton
+ * "Define 'if'", Steven Rostedt
+ */
+#define if(cond) if (__builtin_constant_p((cond)) ? !!(cond) : \
+ ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_branch"))) \
+ ______f = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ ______r = !!(cond); \
+ if (______r) \
+ ______f.hit++; \
+ else \
+ ______f.miss++; \
+ ______r; \
+ }))
+#endif /* CONFIG_PROFILE_ALL_BRANCHES */
+
+#else
+# define likely(x) __builtin_expect(!!(x), 1)
+# define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
/* Optimization barrier */
#ifndef barrier
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 4aaa4afb1cb9..096476f1fb35 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -17,7 +17,7 @@ extern int debug_locks_off(void);
({ \
int __ret = 0; \
\
- if (unlikely(c)) { \
+ if (!oops_in_progress && unlikely(c)) { \
if (debug_locks_off() && !debug_locks_silent) \
WARN_ON(1); \
__ret = 1; \
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 9c5bc6be2b09..985b28dc2ba9 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
+#include <linux/bitops.h>
+#include <linux/sched.h>
#ifdef CONFIG_FUNCTION_TRACER
@@ -24,6 +26,45 @@ struct ftrace_ops {
struct ftrace_ops *next;
};
+extern int function_trace_stop;
+
+/*
+ * Type of the current tracing.
+ */
+enum ftrace_tracing_type_t {
+ FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
+ FTRACE_TYPE_RETURN, /* Hook the return of the function */
+};
+
+/* Current tracing type, default is FTRACE_TYPE_ENTER */
+extern enum ftrace_tracing_type_t ftrace_tracing_type;
+
+/**
+ * ftrace_stop - stop function tracer.
+ *
+ * A quick way to stop the function tracer. Note this an on off switch,
+ * it is not something that is recursive like preempt_disable.
+ * This does not disable the calling of mcount, it only stops the
+ * calling of functions from mcount.
+ */
+static inline void ftrace_stop(void)
+{
+ function_trace_stop = 1;
+}
+
+/**
+ * ftrace_start - start the function tracer.
+ *
+ * This function is the inverse of ftrace_stop. This does not enable
+ * the function tracing if the function tracer is disabled. This only
+ * sets the function tracer flag to continue calling the functions
+ * from mcount.
+ */
+static inline void ftrace_start(void)
+{
+ function_trace_stop = 0;
+}
+
/*
* The ftrace_ops must be a static and should also
* be read_mostly. These functions do modify read_mostly variables
@@ -42,9 +83,13 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1);
# define unregister_ftrace_function(ops) do { } while (0)
# define clear_ftrace_function(ops) do { } while (0)
static inline void ftrace_kill(void) { }
+static inline void ftrace_stop(void) { }
+static inline void ftrace_start(void) { }
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_DYNAMIC_FTRACE
+/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */
+#include <asm/ftrace.h>
enum {
FTRACE_FL_FREE = (1 << 0),
@@ -60,6 +105,7 @@ struct dyn_ftrace {
struct list_head list;
unsigned long ip; /* address of mcount call-site */
unsigned long flags;
+ struct dyn_arch_ftrace arch;
};
int ftrace_force_update(void);
@@ -67,19 +113,25 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset);
/* defined in arch */
extern int ftrace_ip_converted(unsigned long ip);
-extern unsigned char *ftrace_nop_replace(void);
-extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
extern int ftrace_dyn_arch_init(void *data);
extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
extern void ftrace_call(void);
extern void mcount_call(void);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+extern void ftrace_graph_caller(void);
+extern int ftrace_enable_ftrace_graph_caller(void);
+extern int ftrace_disable_ftrace_graph_caller(void);
+#else
+static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
+static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
+#endif
/**
- * ftrace_modify_code - modify code segment
- * @ip: the address of the code segment
- * @old_code: the contents of what is expected to be there
- * @new_code: the code to patch in
+ * ftrace_make_nop - convert code into top
+ * @mod: module structure if called by module load initialization
+ * @rec: the mcount call site record
+ * @addr: the address that the call site should be calling
*
* This is a very sensitive operation and great care needs
* to be taken by the arch. The operation should carefully
@@ -87,6 +139,8 @@ extern void mcount_call(void);
* what we expect it to be, and then on success of the compare,
* it should write to the location.
*
+ * The code segment at @rec->ip should be a caller to @addr
+ *
* Return must be:
* 0 on success
* -EFAULT on error reading the location
@@ -94,8 +148,34 @@ extern void mcount_call(void);
* -EPERM on error writing to the location
* Any other value will be considered a failure.
*/
-extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
- unsigned char *new_code);
+extern int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr);
+
+/**
+ * ftrace_make_call - convert a nop call site into a call to addr
+ * @rec: the mcount call site record
+ * @addr: the address that the call site should call
+ *
+ * This is a very sensitive operation and great care needs
+ * to be taken by the arch. The operation should carefully
+ * read the location, check to see if what is read is indeed
+ * what we expect it to be, and then on success of the compare,
+ * it should write to the location.
+ *
+ * The code segment at @rec->ip should be a nop
+ *
+ * Return must be:
+ * 0 on success
+ * -EFAULT on error reading the location
+ * -EINVAL on a failed compare of the contents
+ * -EPERM on error writing to the location
+ * Any other value will be considered a failure.
+ */
+extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
+
+
+/* May be defined in arch */
+extern int ftrace_arch_read_dyn_info(char *buf, int size);
extern int skip_trace(unsigned long ip);
@@ -103,7 +183,6 @@ extern void ftrace_release(void *start, unsigned long size);
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
-
#else
# define skip_trace(ip) ({ 0; })
# define ftrace_force_update() ({ 0; })
@@ -182,6 +261,12 @@ static inline void __ftrace_enabled_restore(int enabled)
#endif
#ifdef CONFIG_TRACING
+extern int ftrace_dump_on_oops;
+
+extern void tracing_start(void);
+extern void tracing_stop(void);
+extern void ftrace_off_permanent(void);
+
extern void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
@@ -212,6 +297,9 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
static inline int
ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0)));
+static inline void tracing_start(void) { }
+static inline void tracing_stop(void) { }
+static inline void ftrace_off_permanent(void) { }
static inline int
ftrace_printk(const char *fmt, ...)
{
@@ -222,33 +310,167 @@ static inline void ftrace_dump(void) { }
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
-extern void ftrace_init_module(unsigned long *start, unsigned long *end);
+extern void ftrace_init_module(struct module *mod,
+ unsigned long *start, unsigned long *end);
#else
static inline void ftrace_init(void) { }
static inline void
-ftrace_init_module(unsigned long *start, unsigned long *end) { }
+ftrace_init_module(struct module *mod,
+ unsigned long *start, unsigned long *end) { }
#endif
+enum {
+ POWER_NONE = 0,
+ POWER_CSTATE = 1,
+ POWER_PSTATE = 2,
+};
+
+struct power_trace {
+#ifdef CONFIG_POWER_TRACER
+ ktime_t stamp;
+ ktime_t end;
+ int type;
+ int state;
+#endif
+};
+
+#ifdef CONFIG_POWER_TRACER
+extern void trace_power_start(struct power_trace *it, unsigned int type,
+ unsigned int state);
+extern void trace_power_mark(struct power_trace *it, unsigned int type,
+ unsigned int state);
+extern void trace_power_end(struct power_trace *it);
+#else
+static inline void trace_power_start(struct power_trace *it, unsigned int type,
+ unsigned int state) { }
+static inline void trace_power_mark(struct power_trace *it, unsigned int type,
+ unsigned int state) { }
+static inline void trace_power_end(struct power_trace *it) { }
+#endif
+
+
+/*
+ * Structure that defines an entry function trace.
+ */
+struct ftrace_graph_ent {
+ unsigned long func; /* Current function */
+ int depth;
+};
-struct boot_trace {
- pid_t caller;
- char func[KSYM_SYMBOL_LEN];
- int result;
- unsigned long long duration; /* usecs */
- ktime_t calltime;
- ktime_t rettime;
+/*
+ * Structure that defines a return function trace.
+ */
+struct ftrace_graph_ret {
+ unsigned long func; /* Current function */
+ unsigned long long calltime;
+ unsigned long long rettime;
+ /* Number of functions that overran the depth limit for current task */
+ unsigned long overrun;
+ int depth;
};
-#ifdef CONFIG_BOOT_TRACER
-extern void trace_boot(struct boot_trace *it, initcall_t fn);
-extern void start_boot_trace(void);
-extern void stop_boot_trace(void);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/*
+ * Sometimes we don't want to trace a function with the function
+ * graph tracer but we want them to keep traced by the usual function
+ * tracer if the function graph tracer is not configured.
+ */
+#define __notrace_funcgraph notrace
+
+#define FTRACE_RETFUNC_DEPTH 50
+#define FTRACE_RETSTACK_ALLOC_SIZE 32
+/* Type of the callback handlers for tracing function graph*/
+typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
+typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
+
+extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ trace_func_graph_ent_t entryfunc);
+
+extern void ftrace_graph_stop(void);
+
+/* The current handlers in use */
+extern trace_func_graph_ret_t ftrace_graph_return;
+extern trace_func_graph_ent_t ftrace_graph_entry;
+
+extern void unregister_ftrace_graph(void);
+
+extern void ftrace_graph_init_task(struct task_struct *t);
+extern void ftrace_graph_exit_task(struct task_struct *t);
+
+static inline int task_curr_ret_stack(struct task_struct *t)
+{
+ return t->curr_ret_stack;
+}
+
+static inline void pause_graph_tracing(void)
+{
+ atomic_inc(&current->tracing_graph_pause);
+}
+
+static inline void unpause_graph_tracing(void)
+{
+ atomic_dec(&current->tracing_graph_pause);
+}
#else
-static inline void trace_boot(struct boot_trace *it, initcall_t fn) { }
-static inline void start_boot_trace(void) { }
-static inline void stop_boot_trace(void) { }
+
+#define __notrace_funcgraph
+
+static inline void ftrace_graph_init_task(struct task_struct *t) { }
+static inline void ftrace_graph_exit_task(struct task_struct *t) { }
+
+static inline int task_curr_ret_stack(struct task_struct *tsk)
+{
+ return -1;
+}
+
+static inline void pause_graph_tracing(void) { }
+static inline void unpause_graph_tracing(void) { }
#endif
+#ifdef CONFIG_TRACING
+#include <linux/sched.h>
+
+/* flags for current->trace */
+enum {
+ TSK_TRACE_FL_TRACE_BIT = 0,
+ TSK_TRACE_FL_GRAPH_BIT = 1,
+};
+enum {
+ TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
+ TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
+};
+
+static inline void set_tsk_trace_trace(struct task_struct *tsk)
+{
+ set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
+}
+
+static inline void clear_tsk_trace_trace(struct task_struct *tsk)
+{
+ clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
+}
+
+static inline int test_tsk_trace_trace(struct task_struct *tsk)
+{
+ return tsk->trace & TSK_TRACE_FL_TRACE;
+}
+
+static inline void set_tsk_trace_graph(struct task_struct *tsk)
+{
+ set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
+}
+
+static inline void clear_tsk_trace_graph(struct task_struct *tsk)
+{
+ clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
+}
+
+static inline int test_tsk_trace_graph(struct task_struct *tsk)
+{
+ return tsk->trace & TSK_TRACE_FL_GRAPH;
+}
+#endif /* CONFIG_TRACING */
#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
new file mode 100644
index 000000000000..366a054d0b05
--- /dev/null
+++ b/include/linux/ftrace_irq.h
@@ -0,0 +1,13 @@
+#ifndef _LINUX_FTRACE_IRQ_H
+#define _LINUX_FTRACE_IRQ_H
+
+
+#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
+extern void ftrace_nmi_enter(void);
+extern void ftrace_nmi_exit(void);
+#else
+static inline void ftrace_nmi_enter(void) { }
+static inline void ftrace_nmi_exit(void) { }
+#endif
+
+#endif /* _LINUX_FTRACE_IRQ_H */
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 586ab56a3ec3..8f627b9ae2b1 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -164,6 +164,8 @@ union futex_key {
} both;
};
+#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+
#ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr);
extern void exit_pi_state_list(struct task_struct *curr);
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 181006cc94a0..89a56d79e4c6 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -4,6 +4,7 @@
#include <linux/preempt.h>
#include <linux/smp_lock.h>
#include <linux/lockdep.h>
+#include <linux/ftrace_irq.h>
#include <asm/hardirq.h>
#include <asm/system.h>
@@ -161,7 +162,17 @@ extern void irq_enter(void);
*/
extern void irq_exit(void);
-#define nmi_enter() do { lockdep_off(); __irq_enter(); } while (0)
-#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0)
+#define nmi_enter() \
+ do { \
+ ftrace_nmi_enter(); \
+ lockdep_off(); \
+ __irq_enter(); \
+ } while (0)
+#define nmi_exit() \
+ do { \
+ __irq_exit(); \
+ lockdep_on(); \
+ ftrace_nmi_exit(); \
+ } while (0)
#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 48e63934fabe..7e85a6e89e41 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -14,6 +14,8 @@
#include <linux/irqflags.h>
#include <linux/smp.h>
#include <linux/percpu.h>
+#include <linux/irqnr.h>
+
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
diff --git a/include/linux/irq.h b/include/linux/irq.h
index ab70fd604d3a..59525b74979f 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -130,6 +130,8 @@ struct irq_chip {
const char *typename;
};
+struct timer_rand_state;
+struct irq_2_iommu;
/**
* struct irq_desc - interrupt descriptor
* @irq: interrupt number for this descriptor
@@ -155,6 +157,13 @@ struct irq_chip {
*/
struct irq_desc {
unsigned int irq;
+#ifdef CONFIG_SPARSE_IRQ
+ struct timer_rand_state *timer_rand_state;
+ unsigned int *kstat_irqs;
+# ifdef CONFIG_INTR_REMAP
+ struct irq_2_iommu *irq_2_iommu;
+# endif
+#endif
irq_flow_handler_t handle_irq;
struct irq_chip *chip;
struct msi_desc *msi_desc;
@@ -182,14 +191,43 @@ struct irq_desc {
const char *name;
} ____cacheline_internodealigned_in_smp;
+extern void early_irq_init(void);
+extern void arch_early_irq_init(void);
+extern void arch_init_chip_data(struct irq_desc *desc, int cpu);
+extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
+ struct irq_desc *desc, int cpu);
+extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
+#ifndef CONFIG_SPARSE_IRQ
extern struct irq_desc irq_desc[NR_IRQS];
static inline struct irq_desc *irq_to_desc(unsigned int irq)
{
- return (irq < nr_irqs) ? irq_desc + irq : NULL;
+ return (irq < NR_IRQS) ? irq_desc + irq : NULL;
+}
+static inline struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
+{
+ return irq_to_desc(irq);
}
+#else
+
+extern struct irq_desc *irq_to_desc(unsigned int irq);
+extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
+extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
+
+# define for_each_irq_desc(irq, desc) \
+ for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; irq++, desc = irq_to_desc(irq))
+# define for_each_irq_desc_reverse(irq, desc) \
+ for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; irq--, desc = irq_to_desc(irq))
+
+#define kstat_irqs_this_cpu(DESC) \
+ ((DESC)->kstat_irqs[smp_processor_id()])
+#define kstat_incr_irqs_this_cpu(irqno, DESC) \
+ ((DESC)->kstat_irqs[smp_processor_id()]++)
+
+#endif
+
/*
* Migration helpers for obsolete names, they will go away:
*/
@@ -381,6 +419,11 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
#define get_irq_data(irq) (irq_to_desc(irq)->handler_data)
#define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc)
+#define get_irq_desc_chip(desc) ((desc)->chip)
+#define get_irq_desc_chip_data(desc) ((desc)->chip_data)
+#define get_irq_desc_data(desc) ((desc)->handler_data)
+#define get_irq_desc_msi(desc) ((desc)->msi_desc)
+
#endif /* CONFIG_GENERIC_HARDIRQS */
#endif /* !CONFIG_S390 */
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 452c280c8115..95d2b74641f5 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -1,24 +1,38 @@
#ifndef _LINUX_IRQNR_H
#define _LINUX_IRQNR_H
+/*
+ * Generic irq_desc iterators:
+ */
+#ifdef __KERNEL__
+
#ifndef CONFIG_GENERIC_HARDIRQS
#include <asm/irq.h>
# define nr_irqs NR_IRQS
# define for_each_irq_desc(irq, desc) \
for (irq = 0; irq < nr_irqs; irq++)
+
+# define for_each_irq_desc_reverse(irq, desc) \
+ for (irq = nr_irqs - 1; irq >= 0; irq--)
#else
+
extern int nr_irqs;
+#ifndef CONFIG_SPARSE_IRQ
+
+struct irq_desc;
# define for_each_irq_desc(irq, desc) \
for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++)
-
-# define for_each_irq_desc_reverse(irq, desc) \
- for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \
- irq >= 0; irq--, desc--)
+# define for_each_irq_desc_reverse(irq, desc) \
+ for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \
+ irq >= 0; irq--, desc--)
+#endif
#endif
-#define for_each_irq_nr(irq) \
- for (irq = 0; irq < nr_irqs; irq++)
+#define for_each_irq_nr(irq) \
+ for (irq = 0; irq < nr_irqs; irq++)
+
+#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index dc7e0d0a6474..269df5a17b30 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -141,6 +141,15 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \
})
+#ifdef CONFIG_PROVE_LOCKING
+void might_fault(void);
+#else
+static inline void might_fault(void)
+{
+ might_sleep();
+}
+#endif
+
extern struct atomic_notifier_head panic_notifier_list;
extern long (*panic_blink)(long time);
NORET_TYPE void panic(const char * fmt, ...)
@@ -188,6 +197,8 @@ extern unsigned long long memparse(const char *ptr, char **retptr);
extern int core_kernel_text(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
+extern int func_ptr_is_kernel_text(void *ptr);
+
struct pid;
extern struct pid *session_of_pgrp(struct pid *pgrp);
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 4a145caeee07..4ee4b3d2316f 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -28,7 +28,9 @@ struct cpu_usage_stat {
struct kernel_stat {
struct cpu_usage_stat cpustat;
- unsigned int irqs[NR_IRQS];
+#ifndef CONFIG_SPARSE_IRQ
+ unsigned int irqs[NR_IRQS];
+#endif
};
DECLARE_PER_CPU(struct kernel_stat, kstat);
@@ -39,6 +41,10 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
extern unsigned long long nr_context_switches(void);
+#ifndef CONFIG_SPARSE_IRQ
+#define kstat_irqs_this_cpu(irq) \
+ (kstat_this_cpu.irqs[irq])
+
struct irq_desc;
static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
@@ -46,11 +52,17 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
{
kstat_this_cpu.irqs[irq]++;
}
+#endif
+
+#ifndef CONFIG_SPARSE_IRQ
static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
return kstat_cpu(cpu).irqs[irq];
}
+#else
+extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
+#endif
/*
* Number of interrupts per specific IRQ source, since bootup
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 29aec6e10020..8956daf64abd 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -73,6 +73,8 @@ struct lock_class_key {
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
};
+#define LOCKSTAT_POINTS 4
+
/*
* The lock-class itself:
*/
@@ -119,7 +121,8 @@ struct lock_class {
int name_version;
#ifdef CONFIG_LOCK_STAT
- unsigned long contention_point[4];
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
#endif
};
@@ -144,6 +147,7 @@ enum bounce_type {
struct lock_class_stats {
unsigned long contention_point[4];
+ unsigned long contending_point[4];
struct lock_time read_waittime;
struct lock_time write_waittime;
struct lock_time read_holdtime;
@@ -165,6 +169,7 @@ struct lockdep_map {
const char *name;
#ifdef CONFIG_LOCK_STAT
int cpu;
+ unsigned long ip;
#endif
};
@@ -356,7 +361,7 @@ struct lock_class_key { };
#ifdef CONFIG_LOCK_STAT
extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
-extern void lock_acquired(struct lockdep_map *lock);
+extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
#define LOCK_CONTENDED(_lock, try, lock) \
do { \
@@ -364,13 +369,13 @@ do { \
lock_contended(&(_lock)->dep_map, _RET_IP_); \
lock(_lock); \
} \
- lock_acquired(&(_lock)->dep_map); \
+ lock_acquired(&(_lock)->dep_map, _RET_IP_); \
} while (0)
#else /* CONFIG_LOCK_STAT */
#define lock_contended(lockdep_map, ip) do {} while (0)
-#define lock_acquired(lockdep_map) do {} while (0)
+#define lock_acquired(lockdep_map, ip) do {} while (0)
#define LOCK_CONTENDED(_lock, try, lock) \
lock(_lock)
@@ -481,4 +486,22 @@ static inline void print_irqtrace_events(struct task_struct *curr)
# define lock_map_release(l) do { } while (0)
#endif
+#ifdef CONFIG_PROVE_LOCKING
+# define might_lock(lock) \
+do { \
+ typecheck(struct lockdep_map *, &(lock)->dep_map); \
+ lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+} while (0)
+# define might_lock_read(lock) \
+do { \
+ typecheck(struct lockdep_map *, &(lock)->dep_map); \
+ lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+} while (0)
+#else
+# define might_lock(lock) do { } while (0)
+# define might_lock_read(lock) do { } while (0)
+#endif
+
#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/marker.h b/include/linux/marker.h
index 889196c7fbb1..b85e74ca782f 100644
--- a/include/linux/marker.h
+++ b/include/linux/marker.h
@@ -12,6 +12,7 @@
* See the file COPYING for more details.
*/
+#include <stdarg.h>
#include <linux/types.h>
struct module;
@@ -48,10 +49,28 @@ struct marker {
void (*call)(const struct marker *mdata, void *call_private, ...);
struct marker_probe_closure single;
struct marker_probe_closure *multi;
+ const char *tp_name; /* Optional tracepoint name */
+ void *tp_cb; /* Optional tracepoint callback */
} __attribute__((aligned(8)));
#ifdef CONFIG_MARKERS
+#define _DEFINE_MARKER(name, tp_name_str, tp_cb, format) \
+ static const char __mstrtab_##name[] \
+ __attribute__((section("__markers_strings"))) \
+ = #name "\0" format; \
+ static struct marker __mark_##name \
+ __attribute__((section("__markers"), aligned(8))) = \
+ { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \
+ 0, 0, marker_probe_cb, { __mark_empty_function, NULL},\
+ NULL, tp_name_str, tp_cb }
+
+#define DEFINE_MARKER(name, format) \
+ _DEFINE_MARKER(name, NULL, NULL, format)
+
+#define DEFINE_MARKER_TP(name, tp_name, tp_cb, format) \
+ _DEFINE_MARKER(name, #tp_name, tp_cb, format)
+
/*
* Note : the empty asm volatile with read constraint is used here instead of a
* "used" attribute to fix a gcc 4.1.x bug.
@@ -65,14 +84,7 @@ struct marker {
*/
#define __trace_mark(generic, name, call_private, format, args...) \
do { \
- static const char __mstrtab_##name[] \
- __attribute__((section("__markers_strings"))) \
- = #name "\0" format; \
- static struct marker __mark_##name \
- __attribute__((section("__markers"), aligned(8))) = \
- { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \
- 0, 0, marker_probe_cb, \
- { __mark_empty_function, NULL}, NULL }; \
+ DEFINE_MARKER(name, format); \
__mark_check_format(format, ## args); \
if (unlikely(__mark_##name.state)) { \
(*__mark_##name.call) \
@@ -80,14 +92,39 @@ struct marker {
} \
} while (0)
+#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \
+ do { \
+ void __check_tp_type(void) \
+ { \
+ register_trace_##tp_name(tp_cb); \
+ } \
+ DEFINE_MARKER_TP(name, tp_name, tp_cb, format); \
+ __mark_check_format(format, ## args); \
+ (*__mark_##name.call)(&__mark_##name, call_private, \
+ ## args); \
+ } while (0)
+
extern void marker_update_probe_range(struct marker *begin,
struct marker *end);
+
+#define GET_MARKER(name) (__mark_##name)
+
#else /* !CONFIG_MARKERS */
+#define DEFINE_MARKER(name, tp_name, tp_cb, format)
#define __trace_mark(generic, name, call_private, format, args...) \
__mark_check_format(format, ## args)
+#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \
+ do { \
+ void __check_tp_type(void) \
+ { \
+ register_trace_##tp_name(tp_cb); \
+ } \
+ __mark_check_format(format, ## args); \
+ } while (0)
static inline void marker_update_probe_range(struct marker *begin,
struct marker *end)
{ }
+#define GET_MARKER(name)
#endif /* CONFIG_MARKERS */
/**
@@ -117,6 +154,20 @@ static inline void marker_update_probe_range(struct marker *begin,
__trace_mark(1, name, NULL, format, ## args)
/**
+ * trace_mark_tp - Marker in a tracepoint callback
+ * @name: marker name, not quoted.
+ * @tp_name: tracepoint name, not quoted.
+ * @tp_cb: tracepoint callback. Should have an associated global symbol so it
+ * is not optimized away by the compiler (should not be static).
+ * @format: format string
+ * @args...: variable argument list
+ *
+ * Places a marker in a tracepoint callback.
+ */
+#define trace_mark_tp(name, tp_name, tp_cb, format, args...) \
+ __trace_mark_tp(name, NULL, tp_name, tp_cb, format, ## args)
+
+/**
* MARK_NOARGS - Format string for a marker with no argument.
*/
#define MARK_NOARGS " "
@@ -136,8 +187,6 @@ extern marker_probe_func __mark_empty_function;
extern void marker_probe_cb(const struct marker *mdata,
void *call_private, ...);
-extern void marker_probe_cb_noarg(const struct marker *mdata,
- void *call_private, ...);
/*
* Connect a probe to a marker.
@@ -162,8 +211,10 @@ extern void *marker_get_private_data(const char *name, marker_probe_func *probe,
/*
* marker_synchronize_unregister must be called between the last marker probe
- * unregistration and the end of module exit to make sure there is no caller
- * executing a probe when it is freed.
+ * unregistration and the first one of
+ * - the end of module exit function
+ * - the free of any resource used by the probes
+ * to ensure the code and data are valid for any possibly running probes.
*/
#define marker_synchronize_unregister() synchronize_sched()
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8f2939227207..d2b8a1e8ca11 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -10,8 +10,11 @@ struct msi_msg {
};
/* Helper functions */
+struct irq_desc;
extern void mask_msi_irq(unsigned int irq);
extern void unmask_msi_irq(unsigned int irq);
+extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
+extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index bc6da10ceee0..7a0e5c4f8072 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -144,6 +144,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
/*
* NOTE: mutex_trylock() follows the spin_trylock() convention,
* not the down_trylock() convention!
+ *
+ * Returns 1 if the mutex has been acquired successfully, and 0 on contention.
*/
extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
diff --git a/include/linux/pid.h b/include/linux/pid.h
index d7e98ff8021e..bb206c56d1f0 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -147,9 +147,9 @@ pid_t pid_vnr(struct pid *pid);
#define do_each_pid_task(pid, type, task) \
do { \
struct hlist_node *pos___; \
- if (pid != NULL) \
+ if ((pid) != NULL) \
hlist_for_each_entry_rcu((task), pos___, \
- &pid->tasks[type], pids[type].node) {
+ &(pid)->tasks[type], pids[type].node) {
/*
* Both old and new leaders may be attached to
diff --git a/include/linux/random.h b/include/linux/random.h
index 36f125c0c603..adbf3bd3c6b3 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -8,6 +8,7 @@
#define _LINUX_RANDOM_H
#include <linux/ioctl.h>
+#include <linux/irqnr.h>
/* ioctl()'s for the random number generator */
@@ -44,6 +45,56 @@ struct rand_pool_info {
extern void rand_initialize_irq(int irq);
+struct timer_rand_state;
+#ifndef CONFIG_SPARSE_IRQ
+
+extern struct timer_rand_state *irq_timer_state[];
+
+static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+{
+ if (irq >= nr_irqs)
+ return NULL;
+
+ return irq_timer_state[irq];
+}
+
+static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
+{
+ if (irq >= nr_irqs)
+ return;
+
+ irq_timer_state[irq] = state;
+}
+
+#else
+
+#include <linux/irq.h>
+static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+
+ if (!desc)
+ return NULL;
+
+ return desc->timer_rand_state;
+}
+
+static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+
+ if (!desc)
+ return;
+
+ desc->timer_rand_state = state;
+}
+#endif
+
+
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
extern void add_interrupt_randomness(int irq);
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 5f89b62e6983..301dda829e37 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -41,7 +41,7 @@
#include <linux/seqlock.h>
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
-#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */
+#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */
#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 86f1f5e43e33..895dc9c1088c 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -142,6 +142,7 @@ struct rcu_head {
* on the write-side to insure proper synchronization.
*/
#define rcu_read_lock_sched() preempt_disable()
+#define rcu_read_lock_sched_notrace() preempt_disable_notrace()
/*
* rcu_read_unlock_sched - marks the end of a RCU-classic critical section
@@ -149,6 +150,7 @@ struct rcu_head {
* See rcu_read_lock_sched for more information.
*/
#define rcu_read_unlock_sched() preempt_enable()
+#define rcu_read_unlock_sched_notrace() preempt_enable_notrace()
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index e097c2e6b6dc..d363467c8f13 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -28,17 +28,19 @@ struct ring_buffer_event {
* size = 8 bytes
*
* @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock
- * array[0] = tv_nsec
- * array[1] = tv_sec
+ * array[0] = tv_nsec
+ * array[1..2] = tv_sec
* size = 16 bytes
*
* @RINGBUF_TYPE_DATA: Data record
* If len is zero:
* array[0] holds the actual length
- * array[1..(length+3)/4-1] holds data
+ * array[1..(length+3)/4] holds data
+ * size = 4 + 4 + length (bytes)
* else
* length = len << 2
- * array[0..(length+3)/4] holds data
+ * array[0..(length+3)/4-1] holds data
+ * size = 4 + length (bytes)
*/
enum ring_buffer_type {
RINGBUF_TYPE_PADDING,
@@ -122,6 +124,12 @@ void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
void tracing_on(void);
void tracing_off(void);
+void tracing_off_permanent(void);
+
+void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
+void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
+int ring_buffer_read_page(struct ring_buffer *buffer,
+ void **data_page, int cpu, int full);
enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 55e30d114477..4240f6bfa812 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -96,6 +96,7 @@ struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
struct bio;
+struct bts_tracer;
/*
* List of flags we want to share for kernel threads,
@@ -249,7 +250,7 @@ extern void init_idle_bootup_task(struct task_struct *idle);
extern int runqueue_is_locked(void);
extern void task_rq_unlock_wait(struct task_struct *p);
-extern cpumask_t nohz_cpu_mask;
+extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
#else
@@ -259,8 +260,6 @@ static inline int select_nohz_load_balancer(int cpu)
}
#endif
-extern unsigned long rt_needs_cpu(int cpu);
-
/*
* Only dump TASK_* tasks. (0 for all tasks)
*/
@@ -777,7 +776,6 @@ enum cpu_idle_type {
struct sched_group {
struct sched_group *next; /* Must be a circular list */
- cpumask_t cpumask;
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
@@ -790,8 +788,15 @@ struct sched_group {
* (see include/linux/reciprocal_div.h)
*/
u32 reciprocal_cpu_power;
+
+ unsigned long cpumask[];
};
+static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
+{
+ return to_cpumask(sg->cpumask);
+}
+
enum sched_domain_level {
SD_LV_NONE = 0,
SD_LV_SIBLING,
@@ -815,7 +820,6 @@ struct sched_domain {
struct sched_domain *parent; /* top domain must be null terminated */
struct sched_domain *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */
- cpumask_t span; /* span of all CPUs in this domain */
unsigned long min_interval; /* Minimum balance interval ms */
unsigned long max_interval; /* Maximum balance interval ms */
unsigned int busy_factor; /* less balancing by factor if busy */
@@ -870,9 +874,17 @@ struct sched_domain {
#ifdef CONFIG_SCHED_DEBUG
char *name;
#endif
+
+ /* span of all CPUs in this domain */
+ unsigned long span[];
};
-extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
+static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
+{
+ return to_cpumask(sd->span);
+}
+
+extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
struct sched_domain_attr *dattr_new);
extern int arch_reinit_sched_domains(void);
@@ -881,7 +893,7 @@ extern int arch_reinit_sched_domains(void);
struct sched_domain_attr;
static inline void
-partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
+partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
struct sched_domain_attr *dattr_new)
{
}
@@ -963,7 +975,7 @@ struct sched_class {
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
- const cpumask_t *newmask);
+ const struct cpumask *newmask);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
@@ -1165,6 +1177,18 @@ struct task_struct {
struct list_head ptraced;
struct list_head ptrace_entry;
+#ifdef CONFIG_X86_PTRACE_BTS
+ /*
+ * This is the tracer handle for the ptrace BTS extension.
+ * This field actually belongs to the ptracer task.
+ */
+ struct bts_tracer *bts;
+ /*
+ * The buffer to hold the BTS data.
+ */
+ void *bts_buffer;
+#endif /* CONFIG_X86_PTRACE_BTS */
+
/* PID/PID hash table linkage. */
struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
@@ -1356,6 +1380,23 @@ struct task_struct {
unsigned long default_timer_slack_ns;
struct list_head *scm_work_list;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* Index of current stored adress in ret_stack */
+ int curr_ret_stack;
+ /* Stack of return addresses for return function tracing */
+ struct ftrace_ret_stack *ret_stack;
+ /*
+ * Number of functions that haven't been traced
+ * because of depth overrun.
+ */
+ atomic_t trace_overrun;
+ /* Pause for the tracing */
+ atomic_t tracing_graph_pause;
+#endif
+#ifdef CONFIG_TRACING
+ /* state flags for use by tracers */
+ unsigned long trace;
+#endif
};
/*
@@ -1594,12 +1635,12 @@ extern cputime_t task_gtime(struct task_struct *p);
#ifdef CONFIG_SMP
extern int set_cpus_allowed_ptr(struct task_struct *p,
- const cpumask_t *new_mask);
+ const struct cpumask *new_mask);
#else
static inline int set_cpus_allowed_ptr(struct task_struct *p,
- const cpumask_t *new_mask)
+ const struct cpumask *new_mask)
{
- if (!cpu_isset(0, *new_mask))
+ if (!cpumask_test_cpu(0, new_mask))
return -EINVAL;
return 0;
}
@@ -2212,8 +2253,8 @@ __trace_special(void *__tr, void *__data,
}
#endif
-extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
-extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
+extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
extern int sched_mc_power_savings, sched_smt_power_savings;
@@ -2224,6 +2265,7 @@ extern void normalize_rt_tasks(void);
extern struct task_group init_task_group;
#ifdef CONFIG_USER_SCHED
extern struct task_group root_task_group;
+extern void set_tg_uid(struct user_struct *user);
#endif
extern struct task_group *sched_create_group(struct task_group *parent);
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index dc50bcc282a8..b3dfa72f13b9 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -34,6 +34,7 @@ struct seq_operations {
#define SEQ_SKIP 1
+char *mangle_path(char *s, char *p, char *esc);
int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
loff_t seq_lseek(struct file *, loff_t, int);
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index b106fd8e0d5c..1a8cecc4f38c 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -15,9 +15,17 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace);
extern void print_stack_trace(struct stack_trace *trace, int spaces);
+
+#ifdef CONFIG_USER_STACKTRACE_SUPPORT
+extern void save_stack_trace_user(struct stack_trace *trace);
+#else
+# define save_stack_trace_user(trace) do { } while (0)
+#endif
+
#else
# define save_stack_trace(trace) do { } while (0)
# define save_stack_trace_tsk(tsk, trace) do { } while (0)
+# define save_stack_trace_user(trace) do { } while (0)
# define print_stack_trace(trace, spaces) do { } while (0)
#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 117f1b7405cf..0c5b5ac36d8e 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -49,7 +49,7 @@
for_each_online_node(node) \
if (nr_cpus_node(node))
-void arch_update_cpu_topology(void);
+int arch_update_cpu_topology(void);
/* Conform to ACPI 2.0 SLIT distance definitions */
#define LOCAL_DISTANCE 10
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index c5bb39c7a770..757005458366 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -24,8 +24,12 @@ struct tracepoint {
const char *name; /* Tracepoint name */
int state; /* State. */
void **funcs;
-} __attribute__((aligned(8)));
-
+} __attribute__((aligned(32))); /*
+ * Aligned on 32 bytes because it is
+ * globally visible and gcc happily
+ * align these on the structure size.
+ * Keep in sync with vmlinux.lds.h.
+ */
#define TPPROTO(args...) args
#define TPARGS(args...) args
@@ -40,14 +44,14 @@ struct tracepoint {
do { \
void **it_func; \
\
- rcu_read_lock_sched(); \
+ rcu_read_lock_sched_notrace(); \
it_func = rcu_dereference((tp)->funcs); \
if (it_func) { \
do { \
((void(*)(proto))(*it_func))(args); \
} while (*(++it_func)); \
} \
- rcu_read_unlock_sched(); \
+ rcu_read_unlock_sched_notrace(); \
} while (0)
/*
@@ -55,35 +59,40 @@ struct tracepoint {
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
*/
-#define DEFINE_TRACE(name, proto, args) \
+#define DECLARE_TRACE(name, proto, args) \
+ extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \
{ \
- static const char __tpstrtab_##name[] \
- __attribute__((section("__tracepoints_strings"))) \
- = #name ":" #proto; \
- static struct tracepoint __tracepoint_##name \
- __attribute__((section("__tracepoints"), aligned(8))) = \
- { __tpstrtab_##name, 0, NULL }; \
if (unlikely(__tracepoint_##name.state)) \
__DO_TRACE(&__tracepoint_##name, \
TPPROTO(proto), TPARGS(args)); \
} \
static inline int register_trace_##name(void (*probe)(proto)) \
{ \
- return tracepoint_probe_register(#name ":" #proto, \
- (void *)probe); \
+ return tracepoint_probe_register(#name, (void *)probe); \
} \
- static inline void unregister_trace_##name(void (*probe)(proto))\
+ static inline int unregister_trace_##name(void (*probe)(proto)) \
{ \
- tracepoint_probe_unregister(#name ":" #proto, \
- (void *)probe); \
+ return tracepoint_probe_unregister(#name, (void *)probe);\
}
+#define DEFINE_TRACE(name) \
+ static const char __tpstrtab_##name[] \
+ __attribute__((section("__tracepoints_strings"))) = #name; \
+ struct tracepoint __tracepoint_##name \
+ __attribute__((section("__tracepoints"), aligned(32))) = \
+ { __tpstrtab_##name, 0, NULL }
+
+#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
+ EXPORT_SYMBOL_GPL(__tracepoint_##name)
+#define EXPORT_TRACEPOINT_SYMBOL(name) \
+ EXPORT_SYMBOL(__tracepoint_##name)
+
extern void tracepoint_update_probe_range(struct tracepoint *begin,
struct tracepoint *end);
#else /* !CONFIG_TRACEPOINTS */
-#define DEFINE_TRACE(name, proto, args) \
+#define DECLARE_TRACE(name, proto, args) \
static inline void _do_trace_##name(struct tracepoint *tp, proto) \
{ } \
static inline void trace_##name(proto) \
@@ -92,8 +101,14 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin,
{ \
return -ENOSYS; \
} \
- static inline void unregister_trace_##name(void (*probe)(proto))\
- { }
+ static inline int unregister_trace_##name(void (*probe)(proto)) \
+ { \
+ return -ENOSYS; \
+ }
+
+#define DEFINE_TRACE(name)
+#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
+#define EXPORT_TRACEPOINT_SYMBOL(name)
static inline void tracepoint_update_probe_range(struct tracepoint *begin,
struct tracepoint *end)
@@ -112,6 +127,10 @@ extern int tracepoint_probe_register(const char *name, void *probe);
*/
extern int tracepoint_probe_unregister(const char *name, void *probe);
+extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
+extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
+extern void tracepoint_probe_update_all(void);
+
struct tracepoint_iter {
struct module *module;
struct tracepoint *tracepoint;
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 3b8121d4e36f..eaec37c9d83d 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -325,7 +325,7 @@ extern struct class *tty_class;
* go away
*/
-extern inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
+static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
{
if (tty)
kref_get(&tty->kref);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index fec6decfb983..6b58367d145e 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -78,7 +78,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
\
set_fs(KERNEL_DS); \
pagefault_disable(); \
- ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \
+ ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
pagefault_enable(); \
set_fs(old_fs); \
ret; \
diff --git a/include/trace/block.h b/include/trace/block.h
new file mode 100644
index 000000000000..25c6a1fd5b77
--- /dev/null
+++ b/include/trace/block.h
@@ -0,0 +1,76 @@
+#ifndef _TRACE_BLOCK_H
+#define _TRACE_BLOCK_H
+
+#include <linux/blkdev.h>
+#include <linux/tracepoint.h>
+
+DECLARE_TRACE(block_rq_abort,
+ TPPROTO(struct request_queue *q, struct request *rq),
+ TPARGS(q, rq));
+
+DECLARE_TRACE(block_rq_insert,
+ TPPROTO(struct request_queue *q, struct request *rq),
+ TPARGS(q, rq));
+
+DECLARE_TRACE(block_rq_issue,
+ TPPROTO(struct request_queue *q, struct request *rq),
+ TPARGS(q, rq));
+
+DECLARE_TRACE(block_rq_requeue,
+ TPPROTO(struct request_queue *q, struct request *rq),
+ TPARGS(q, rq));
+
+DECLARE_TRACE(block_rq_complete,
+ TPPROTO(struct request_queue *q, struct request *rq),
+ TPARGS(q, rq));
+
+DECLARE_TRACE(block_bio_bounce,
+ TPPROTO(struct request_queue *q, struct bio *bio),
+ TPARGS(q, bio));
+
+DECLARE_TRACE(block_bio_complete,
+ TPPROTO(struct request_queue *q, struct bio *bio),
+ TPARGS(q, bio));
+
+DECLARE_TRACE(block_bio_backmerge,
+ TPPROTO(struct request_queue *q, struct bio *bio),
+ TPARGS(q, bio));
+
+DECLARE_TRACE(block_bio_frontmerge,
+ TPPROTO(struct request_queue *q, struct bio *bio),
+ TPARGS(q, bio));
+
+DECLARE_TRACE(block_bio_queue,
+ TPPROTO(struct request_queue *q, struct bio *bio),
+ TPARGS(q, bio));
+
+DECLARE_TRACE(block_getrq,
+ TPPROTO(struct request_queue *q, struct bio *bio, int rw),
+ TPARGS(q, bio, rw));
+
+DECLARE_TRACE(block_sleeprq,
+ TPPROTO(struct request_queue *q, struct bio *bio, int rw),
+ TPARGS(q, bio, rw));
+
+DECLARE_TRACE(block_plug,
+ TPPROTO(struct request_queue *q),
+ TPARGS(q));
+
+DECLARE_TRACE(block_unplug_timer,
+ TPPROTO(struct request_queue *q),
+ TPARGS(q));
+
+DECLARE_TRACE(block_unplug_io,
+ TPPROTO(struct request_queue *q),
+ TPARGS(q));
+
+DECLARE_TRACE(block_split,
+ TPPROTO(struct request_queue *q, struct bio *bio, unsigned int pdu),
+ TPARGS(q, bio, pdu));
+
+DECLARE_TRACE(block_remap,
+ TPPROTO(struct request_queue *q, struct bio *bio, dev_t dev,
+ sector_t from, sector_t to),
+ TPARGS(q, bio, dev, from, to));
+
+#endif
diff --git a/include/trace/boot.h b/include/trace/boot.h
new file mode 100644
index 000000000000..088ea089e31d
--- /dev/null
+++ b/include/trace/boot.h
@@ -0,0 +1,60 @@
+#ifndef _LINUX_TRACE_BOOT_H
+#define _LINUX_TRACE_BOOT_H
+
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/init.h>
+
+/*
+ * Structure which defines the trace of an initcall
+ * while it is called.
+ * You don't have to fill the func field since it is
+ * only used internally by the tracer.
+ */
+struct boot_trace_call {
+ pid_t caller;
+ char func[KSYM_SYMBOL_LEN];
+};
+
+/*
+ * Structure which defines the trace of an initcall
+ * while it returns.
+ */
+struct boot_trace_ret {
+ char func[KSYM_SYMBOL_LEN];
+ int result;
+ unsigned long long duration; /* nsecs */
+};
+
+#ifdef CONFIG_BOOT_TRACER
+/* Append the traces on the ring-buffer */
+extern void trace_boot_call(struct boot_trace_call *bt, initcall_t fn);
+extern void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn);
+
+/* Tells the tracer that smp_pre_initcall is finished.
+ * So we can start the tracing
+ */
+extern void start_boot_trace(void);
+
+/* Resume the tracing of other necessary events
+ * such as sched switches
+ */
+extern void enable_boot_trace(void);
+
+/* Suspend this tracing. Actually, only sched_switches tracing have
+ * to be suspended. Initcalls doesn't need it.)
+ */
+extern void disable_boot_trace(void);
+#else
+static inline
+void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) { }
+
+static inline
+void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) { }
+
+static inline void start_boot_trace(void) { }
+static inline void enable_boot_trace(void) { }
+static inline void disable_boot_trace(void) { }
+#endif /* CONFIG_BOOT_TRACER */
+
+#endif /* __LINUX_TRACE_BOOT_H */
diff --git a/include/trace/sched.h b/include/trace/sched.h
index ad47369d01b5..9b2854abf7e2 100644
--- a/include/trace/sched.h
+++ b/include/trace/sched.h
@@ -4,52 +4,52 @@
#include <linux/sched.h>
#include <linux/tracepoint.h>
-DEFINE_TRACE(sched_kthread_stop,
+DECLARE_TRACE(sched_kthread_stop,
TPPROTO(struct task_struct *t),
TPARGS(t));
-DEFINE_TRACE(sched_kthread_stop_ret,
+DECLARE_TRACE(sched_kthread_stop_ret,
TPPROTO(int ret),
TPARGS(ret));
-DEFINE_TRACE(sched_wait_task,
+DECLARE_TRACE(sched_wait_task,
TPPROTO(struct rq *rq, struct task_struct *p),
TPARGS(rq, p));
-DEFINE_TRACE(sched_wakeup,
+DECLARE_TRACE(sched_wakeup,
TPPROTO(struct rq *rq, struct task_struct *p),
TPARGS(rq, p));
-DEFINE_TRACE(sched_wakeup_new,
+DECLARE_TRACE(sched_wakeup_new,
TPPROTO(struct rq *rq, struct task_struct *p),
TPARGS(rq, p));
-DEFINE_TRACE(sched_switch,
+DECLARE_TRACE(sched_switch,
TPPROTO(struct rq *rq, struct task_struct *prev,
struct task_struct *next),
TPARGS(rq, prev, next));
-DEFINE_TRACE(sched_migrate_task,
+DECLARE_TRACE(sched_migrate_task,
TPPROTO(struct rq *rq, struct task_struct *p, int dest_cpu),
TPARGS(rq, p, dest_cpu));
-DEFINE_TRACE(sched_process_free,
+DECLARE_TRACE(sched_process_free,
TPPROTO(struct task_struct *p),
TPARGS(p));
-DEFINE_TRACE(sched_process_exit,
+DECLARE_TRACE(sched_process_exit,
TPPROTO(struct task_struct *p),
TPARGS(p));
-DEFINE_TRACE(sched_process_wait,
+DECLARE_TRACE(sched_process_wait,
TPPROTO(struct pid *pid),
TPARGS(pid));
-DEFINE_TRACE(sched_process_fork,
+DECLARE_TRACE(sched_process_fork,
TPPROTO(struct task_struct *parent, struct task_struct *child),
TPARGS(parent, child));
-DEFINE_TRACE(sched_signal_send,
+DECLARE_TRACE(sched_signal_send,
TPPROTO(int sig, struct task_struct *p),
TPARGS(sig, p));