summaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel/ftrace-entry.S
blob: 28d059540424f5ac07ffae53e556d5d2c6f1393f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
/*
 * mcount and friends -- ftrace stuff
 *
 * Copyright (C) 2009-2010 Analog Devices Inc.
 * Licensed under the GPL-2 or later.
 */

#include <linux/linkage.h>
#include <asm/ftrace.h>

.text

#ifdef CONFIG_DYNAMIC_FTRACE

/* Simple stub so we can boot the kernel until runtime patching has
 * disabled all calls to this.  Then it'll be unused.
 */
ENTRY(__mcount)
# if ANOMALY_05000371
	nop; nop; nop; nop;
# endif
	rts;
ENDPROC(__mcount)

/* GCC will have called us before setting up the function prologue, so we
 * can clobber the normal scratch registers, but we need to make sure to
 * save/restore the registers used for argument passing (R0-R2) in case
 * the profiled function is using them.  With data registers, R3 is the
 * only one we can blow away.  With pointer registers, we have P0-P2.
 *
 * Upon entry, the RETS will point to the top of the current profiled
 * function.  And since GCC pushed the previous RETS for us, the previous
 * function will be waiting there.  mmmm pie.
 */
ENTRY(_ftrace_caller)
	/* save first/second/third function arg and the return register */
	[--sp] = r2;
	[--sp] = r0;
	[--sp] = r1;
	[--sp] = rets;

	/* function_trace_call(unsigned long ip, unsigned long parent_ip):
	 *  ip: this point was called by ...
	 *  parent_ip: ... this function
	 * the ip itself will need adjusting for the mcount call
	 */
	r0 = rets;
	r1 = [sp + 16];	/* skip the 4 local regs on stack */
	r0 += -MCOUNT_INSN_SIZE;

.globl _ftrace_call
_ftrace_call:
	call _ftrace_stub

# ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl _ftrace_graph_call
_ftrace_graph_call:
	nop;	/* jump _ftrace_graph_caller; */
# endif

	/* restore state and get out of dodge */
.Lfinish_trace:
	rets = [sp++];
	r1 = [sp++];
	r0 = [sp++];
	r2 = [sp++];

.globl _ftrace_stub
_ftrace_stub:
	rts;
ENDPROC(_ftrace_caller)

#else

/* See documentation for _ftrace_caller */
ENTRY(__mcount)
	/* save third function arg early so we can do testing below */
	[--sp] = r2;

	/* load the function pointer to the tracer */
	p0.l = _ftrace_trace_function;
	p0.h = _ftrace_trace_function;
	r3 = [p0];

	/* optional micro optimization: don't call the stub tracer */
	r2.l = _ftrace_stub;
	r2.h = _ftrace_stub;
	cc = r2 == r3;
	if ! cc jump .Ldo_trace;

# ifdef CONFIG_FUNCTION_GRAPH_TRACER
	/* if the ftrace_graph_return function pointer is not set to
	 * the ftrace_stub entry, call prepare_ftrace_return().
	 */
	p0.l = _ftrace_graph_return;
	p0.h = _ftrace_graph_return;
	r3 = [p0];
	cc = r2 == r3;
	if ! cc jump _ftrace_graph_caller;

	/* similarly, if the ftrace_graph_entry function pointer is not
	 * set to the ftrace_graph_entry_stub entry, ...
	 */
	p0.l = _ftrace_graph_entry;
	p0.h = _ftrace_graph_entry;
	r2.l = _ftrace_graph_entry_stub;
	r2.h = _ftrace_graph_entry_stub;
	r3 = [p0];
	cc = r2 == r3;
	if ! cc jump _ftrace_graph_caller;
# endif

	r2 = [sp++];
	rts;

.Ldo_trace:

	/* save first/second function arg and the return register */
	[--sp] = r0;
	[--sp] = r1;
	[--sp] = rets;

	/* setup the tracer function */
	p0 = r3;

	/* function_trace_call(unsigned long ip, unsigned long parent_ip):
	 *  ip: this point was called by ...
	 *  parent_ip: ... this function
	 * the ip itself will need adjusting for the mcount call
	 */
	r0 = rets;
	r1 = [sp + 16];	/* skip the 4 local regs on stack */
	r0 += -MCOUNT_INSN_SIZE;

	/* call the tracer */
	call (p0);

	/* restore state and get out of dodge */
.Lfinish_trace:
	rets = [sp++];
	r1 = [sp++];
	r0 = [sp++];
	r2 = [sp++];

.globl _ftrace_stub
_ftrace_stub:
	rts;
ENDPROC(__mcount)

#endif

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* The prepare_ftrace_return() function is similar to the trace function
 * except it takes a pointer to the location of the frompc.  This is so
 * the prepare_ftrace_return() can hijack it temporarily for probing
 * purposes.
 */
ENTRY(_ftrace_graph_caller)
# ifndef CONFIG_DYNAMIC_FTRACE
	/* save first/second function arg and the return register */
	[--sp] = r0;
	[--sp] = r1;
	[--sp] = rets;

	/* prepare_ftrace_return(parent, self_addr, frame_pointer) */
	r0 = sp;	/* unsigned long *parent */
	r1 = rets;	/* unsigned long self_addr */
# else
	r0 = sp;	/* unsigned long *parent */
	r1 = [sp];	/* unsigned long self_addr */
# endif
# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
	r2 = fp;	/* unsigned long frame_pointer */
# endif
	r0 += 16;	/* skip the 4 local regs on stack */
	r1 += -MCOUNT_INSN_SIZE;
	call _prepare_ftrace_return;

	jump .Lfinish_trace;
ENDPROC(_ftrace_graph_caller)

/* Undo the rewrite caused by ftrace_graph_caller().  The common function
 * ftrace_return_to_handler() will return the original rets so we can
 * restore it and be on our way.
 */
ENTRY(_return_to_handler)
	/* make sure original return values are saved */
	[--sp] = p0;
	[--sp] = r0;
	[--sp] = r1;

	/* get original return address */
# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
	r0 = fp;	/* Blackfin is sane, so omit this */
# endif
	call _ftrace_return_to_handler;
	rets = r0;

	/* anomaly 05000371 - make sure we have at least three instructions
	 * between rets setting and the return
	 */
	r1 = [sp++];
	r0 = [sp++];
	p0 = [sp++];
	rts;
ENDPROC(_return_to_handler)
#endif