summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/ftrace.c
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-07-24 09:49:13 -0600
committerGrant Likely <grant.likely@secretlab.ca>2010-07-24 09:49:13 -0600
commit4e4f62bf7396fca48efe61513640ee399a6046e3 (patch)
tree42a503af02d9806bcc05e5fcc2cd53f9bd45b0c2 /arch/mips/kernel/ftrace.c
parent9e3288dc9a94fab5ea87db42177d3a9e0345a614 (diff)
parentb37fa16e78d6f9790462b3181602a26b5af36260 (diff)
downloadlinux-4e4f62bf7396fca48efe61513640ee399a6046e3.tar.gz
linux-4e4f62bf7396fca48efe61513640ee399a6046e3.tar.bz2
linux-4e4f62bf7396fca48efe61513640ee399a6046e3.zip
Merge commit 'v2.6.35-rc6' into devicetree/next
Conflicts: arch/sparc/kernel/prom_64.c
Diffstat (limited to 'arch/mips/kernel/ftrace.c')
-rw-r--r--arch/mips/kernel/ftrace.c184
1 files changed, 112 insertions, 72 deletions
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index e9e64e0ff7aa..5a84a1f11231 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -2,7 +2,7 @@
* Code for replacing ftrace calls with jumps.
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
- * Copyright (C) 2009 DSLab, Lanzhou University, China
+ * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*
* Thanks goes to Steven Rostedt for writing the original x86 version.
@@ -12,18 +12,62 @@
#include <linux/init.h>
#include <linux/ftrace.h>
-#include <asm/cacheflush.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
+#include <asm/cacheflush.h>
+#include <asm/uasm.h>
+
+/*
+ * If the Instruction Pointer is in module space (0xc0000000), return true;
+ * otherwise, it is in kernel space (0x80000000), return false.
+ *
+ * FIXME: This will not work when the kernel space and module space are the
+ * same. If they are the same, we need to modify scripts/recordmcount.pl,
+ * ftrace_make_nop/call() and the other related parts to ensure the
+ * enabling/disabling of the calling site to _mcount is right for both kernel
+ * and module.
+ */
+
+static inline int in_module(unsigned long ip)
+{
+ return ip & 0x40000000;
+}
#ifdef CONFIG_DYNAMIC_FTRACE
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
-#define jump_insn_encode(op_code, addr) \
- ((unsigned int)((op_code) | (((addr) >> 2) & ADDR_MASK)))
-static unsigned int ftrace_nop = 0x00000000;
+#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */
+#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */
+#define INSN_NOP 0x00000000 /* nop */
+#define INSN_JAL(addr) \
+ ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
+
+static unsigned int insn_jal_ftrace_caller __read_mostly;
+static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
+static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
+
+static inline void ftrace_dyn_arch_init_insns(void)
+{
+ u32 *buf;
+ unsigned int v1;
+
+ /* lui v1, hi16_mcount */
+ v1 = 3;
+ buf = (u32 *)&insn_lui_v1_hi16_mcount;
+ UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
+
+ /* jal (ftrace_caller + 8), jump over the first two instruction */
+ buf = (u32 *)&insn_jal_ftrace_caller;
+ uasm_i_jal(&buf, (FTRACE_ADDR + 8));
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* j ftrace_graph_caller */
+ buf = (u32 *)&insn_j_ftrace_graph_caller;
+ uasm_i_j(&buf, (unsigned long)ftrace_graph_caller);
+#endif
+}
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
@@ -40,67 +84,56 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
return 0;
}
-static int lui_v1;
-static int jal_mcount;
-
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int new;
- int faulted;
unsigned long ip = rec->ip;
- /* We have compiled module with -mlong-calls, but compiled the kernel
- * without it, we need to cope with them respectively. */
- if (ip & 0x40000000) {
- /* record it for ftrace_make_call */
- if (lui_v1 == 0) {
- /* lui_v1 = *(unsigned int *)ip; */
- safe_load_code(lui_v1, ip, faulted);
-
- if (unlikely(faulted))
- return -EFAULT;
- }
-
- /* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
+ /*
+ * We have compiled module with -mlong-calls, but compiled the kernel
+ * without it, we need to cope with them respectively.
+ */
+ if (in_module(ip)) {
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+ /*
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * move $12, ra_address
+ * jalr v1
+ * sub sp, sp, 8
+ * 1: offset = 5 instructions
+ */
+ new = INSN_B_1F_5;
+#else
+ /*
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
* addiu v1, v1, low_16bit_of_mcount
* move at, ra
* jalr v1
- * nop
- * 1f: (ip + 12)
+ * nop | move $12, ra_address | sub sp, sp, 8
+ * 1: offset = 4 instructions
*/
- new = 0x10000004;
+ new = INSN_B_1F_4;
+#endif
} else {
- /* record/calculate it for ftrace_make_call */
- if (jal_mcount == 0) {
- /* We can record it directly like this:
- * jal_mcount = *(unsigned int *)ip;
- * Herein, jump over the first two nop instructions */
- jal_mcount = jump_insn_encode(JAL, (MCOUNT_ADDR + 8));
- }
-
- /* move at, ra
- * jalr v1 --> nop
+ /*
+ * move at, ra
+ * jal _mcount --> nop
*/
- new = ftrace_nop;
+ new = INSN_NOP;
}
return ftrace_modify_code(ip, new);
}
-static int modified; /* initialized as 0 by default */
-
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int new;
unsigned long ip = rec->ip;
- /* We just need to remove the "b ftrace_stub" at the fist time! */
- if (modified == 0) {
- modified = 1;
- ftrace_modify_code(addr, ftrace_nop);
- }
/* ip, module: 0xc0000000, kernel: 0x80000000 */
- new = (ip & 0x40000000) ? lui_v1 : jal_mcount;
+ new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller;
return ftrace_modify_code(ip, new);
}
@@ -111,44 +144,48 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned int new;
- new = jump_insn_encode(JAL, (unsigned long)func);
+ new = INSN_JAL((unsigned long)func);
return ftrace_modify_code(FTRACE_CALL_IP, new);
}
int __init ftrace_dyn_arch_init(void *data)
{
+ /* Encode the instructions when booting */
+ ftrace_dyn_arch_init_insns();
+
+ /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
+ ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
+
/* The return code is retured via data */
*(unsigned long *)data = 0;
return 0;
}
-#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
-#define JMP 0x08000000 /* jump to target directly */
-#define CALL_FTRACE_GRAPH_CALLER \
- jump_insn_encode(JMP, (unsigned long)(&ftrace_graph_caller))
#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
int ftrace_enable_ftrace_graph_caller(void)
{
return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
- CALL_FTRACE_GRAPH_CALLER);
+ insn_j_ftrace_graph_caller);
}
int ftrace_disable_ftrace_graph_caller(void)
{
- return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, ftrace_nop);
+ return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
}
-#endif /* !CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_DYNAMIC_FTRACE */
#ifndef KBUILD_MCOUNT_RA_ADDRESS
+
#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
@@ -162,17 +199,17 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
unsigned int code;
int faulted;
- /* in module or kernel? */
- if (self_addr & 0x40000000) {
- /* module: move to the instruction "lui v1, HI_16BIT_OF_MCOUNT" */
- ip = self_addr - 20;
- } else {
- /* kernel: move to the instruction "move ra, at" */
- ip = self_addr - 12;
- }
+ /*
+ * For module, move the ip from calling site of mcount to the
+ * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for
+ * kernel, move to the instruction "move ra, at"(offset is 12)
+ */
+ ip = self_addr - (in_module(self_addr) ? 20 : 12);
- /* search the text until finding the non-store instruction or "s{d,w}
- * ra, offset(sp)" instruction */
+ /*
+ * search the text until finding the non-store instruction or "s{d,w}
+ * ra, offset(sp)" instruction
+ */
do {
ip -= 4;
@@ -181,10 +218,11 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
if (unlikely(faulted))
return 0;
-
- /* If we hit the non-store instruction before finding where the
+ /*
+ * If we hit the non-store instruction before finding where the
* ra is stored, then this is a leaf function and it does not
- * store the ra on the stack. */
+ * store the ra on the stack
+ */
if ((code & S_R_SP) != S_R_SP)
return parent_addr;
@@ -202,7 +240,7 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
return 0;
}
-#endif
+#endif /* !KBUILD_MCOUNT_RA_ADDRESS */
/*
* Hook the return address and push it in the stack of return addrs
@@ -220,7 +258,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
- /* "parent" is the stack address saved the return address of the caller
+ /*
+ * "parent" is the stack address saved the return address of the caller
* of _mcount.
*
* if the gcc < 4.5, a leaf function does not save the return address
@@ -242,10 +281,11 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
goto out;
#ifndef KBUILD_MCOUNT_RA_ADDRESS
parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
- (unsigned long)parent,
- fp);
- /* If fails when getting the stack address of the non-leaf function's
- * ra, stop function graph tracer and return */
+ (unsigned long)parent, fp);
+ /*
+ * If fails when getting the stack address of the non-leaf function's
+ * ra, stop function graph tracer and return
+ */
if (parent == 0)
goto out;
#endif
@@ -272,4 +312,4 @@ out:
ftrace_graph_stop();
WARN_ON(1);
}
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */