1
2
3
4
5
6
7
8
9
10
11#include <linux/uaccess.h>
12#include <linux/init.h>
13#include <linux/ftrace.h>
14
15#include <asm/asm.h>
16#include <asm/asm-offsets.h>
17#include <asm/cacheflush.h>
18#include <asm/uasm.h>
19
20#include <asm-generic/sections.h>
21
22#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
23#define MCOUNT_OFFSET_INSNS 5
24#else
25#define MCOUNT_OFFSET_INSNS 4
26#endif
27
28#ifdef CONFIG_DYNAMIC_FTRACE
29
30
31void arch_ftrace_update_code(int command)
32{
33 ftrace_modify_all_code(command);
34}
35
36#endif
37
38
39
40
41
42
43
44static inline int in_kernel_space(unsigned long ip)
45{
46 if (ip >= (unsigned long)_stext &&
47 ip <= (unsigned long)_etext)
48 return 1;
49 return 0;
50}
51
52#ifdef CONFIG_DYNAMIC_FTRACE
53
54#define JAL 0x0c000000
55#define ADDR_MASK 0x03ffffff
56#define JUMP_RANGE_MASK ((1UL << 28) - 1)
57
58#define INSN_NOP 0x00000000
59#define INSN_JAL(addr) \
60 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
61
62static unsigned int insn_jal_ftrace_caller __read_mostly;
63static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
64static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
65
66static inline void ftrace_dyn_arch_init_insns(void)
67{
68 u32 *buf;
69 unsigned int v1;
70
71
72 v1 = 3;
73 buf = (u32 *)&insn_lui_v1_hi16_mcount;
74 UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
75
76
77 buf = (u32 *)&insn_jal_ftrace_caller;
78 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
79
80#ifdef CONFIG_FUNCTION_GRAPH_TRACER
81
82 buf = (u32 *)&insn_j_ftrace_graph_caller;
83 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
84#endif
85}
86
87static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
88{
89 int faulted;
90
91
92 safe_store_code(new_code, ip, faulted);
93
94 if (unlikely(faulted))
95 return -EFAULT;
96
97 flush_icache_range(ip, ip + 8);
98
99 return 0;
100}
101
102#ifndef CONFIG_64BIT
103static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
104 unsigned int new_code2)
105{
106 int faulted;
107
108 safe_store_code(new_code1, ip, faulted);
109 if (unlikely(faulted))
110 return -EFAULT;
111 ip += 4;
112 safe_store_code(new_code2, ip, faulted);
113 if (unlikely(faulted))
114 return -EFAULT;
115 flush_icache_range(ip, ip + 8);
116 return 0;
117}
118#endif
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
150
151int ftrace_make_nop(struct module *mod,
152 struct dyn_ftrace *rec, unsigned long addr)
153{
154 unsigned int new;
155 unsigned long ip = rec->ip;
156
157
158
159
160
161 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
162#ifdef CONFIG_64BIT
163 return ftrace_modify_code(ip, new);
164#else
165
166
167
168
169
170
171
172 return ftrace_modify_code_2(ip, new, INSN_NOP);
173#endif
174}
175
176int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
177{
178 unsigned int new;
179 unsigned long ip = rec->ip;
180
181 new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
182 insn_lui_v1_hi16_mcount;
183
184 return ftrace_modify_code(ip, new);
185}
186
187#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
188
189int ftrace_update_ftrace_func(ftrace_func_t func)
190{
191 unsigned int new;
192
193 new = INSN_JAL((unsigned long)func);
194
195 return ftrace_modify_code(FTRACE_CALL_IP, new);
196}
197
198int __init ftrace_dyn_arch_init(void *data)
199{
200
201 ftrace_dyn_arch_init_insns();
202
203
204 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
205
206
207 *(unsigned long *)data = 0;
208
209 return 0;
210}
211#endif
212
213#ifdef CONFIG_FUNCTION_GRAPH_TRACER
214
215#ifdef CONFIG_DYNAMIC_FTRACE
216
217extern void ftrace_graph_call(void);
218#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
219
220int ftrace_enable_ftrace_graph_caller(void)
221{
222 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
223 insn_j_ftrace_graph_caller);
224}
225
226int ftrace_disable_ftrace_graph_caller(void)
227{
228 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
229}
230
231#endif
232
233#ifndef KBUILD_MCOUNT_RA_ADDRESS
234
235#define S_RA_SP (0xafbf << 16)
236#define S_R_SP (0xafb0 << 16)
237#define OFFSET_MASK 0xffff
238
239unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
240 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
241{
242 unsigned long sp, ip, tmp;
243 unsigned int code;
244 int faulted;
245
246
247
248
249
250
251 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
252
253
254
255
256
257 do {
258
259 safe_load_code(code, ip, faulted);
260
261 if (unlikely(faulted))
262 return 0;
263
264
265
266
267
268 if ((code & S_R_SP) != S_R_SP)
269 return parent_ra_addr;
270
271
272 ip -= 4;
273 } while ((code & S_RA_SP) != S_RA_SP);
274
275 sp = fp + (code & OFFSET_MASK);
276
277
278 safe_load_stack(tmp, sp, faulted);
279 if (unlikely(faulted))
280 return 0;
281
282 if (tmp == old_parent_ra)
283 return sp;
284 return 0;
285}
286
287#endif
288
289
290
291
292
293void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
294 unsigned long fp)
295{
296 unsigned long old_parent_ra;
297 struct ftrace_graph_ent trace;
298 unsigned long return_hooker = (unsigned long)
299 &return_to_handler;
300 int faulted, insns;
301
302 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
303 return;
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
324 if (unlikely(faulted))
325 goto out;
326#ifndef KBUILD_MCOUNT_RA_ADDRESS
327 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
328 old_parent_ra, (unsigned long)parent_ra_addr, fp);
329
330
331
332
333 if (parent_ra_addr == 0)
334 goto out;
335#endif
336
337 safe_store_stack(return_hooker, parent_ra_addr, faulted);
338 if (unlikely(faulted))
339 goto out;
340
341 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp,
342 NULL) == -EBUSY) {
343 *parent_ra_addr = old_parent_ra;
344 return;
345 }
346
347
348
349
350
351
352
353 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
354 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
355
356
357 if (!ftrace_graph_entry(&trace)) {
358 current->curr_ret_stack--;
359 *parent_ra_addr = old_parent_ra;
360 }
361 return;
362out:
363 ftrace_graph_stop();
364 WARN_ON(1);
365}
366#endif
367