1
2
3
4
5
6
7
8
9
10
11#include <linux/uaccess.h>
12#include <linux/init.h>
13#include <linux/ftrace.h>
14#include <linux/syscalls.h>
15
16#include <asm/asm.h>
17#include <asm/asm-offsets.h>
18#include <asm/cacheflush.h>
19#include <asm/syscall.h>
20#include <asm/uasm.h>
21#include <asm/unistd.h>
22
23#include <asm-generic/sections.h>
24
25#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
26#define MCOUNT_OFFSET_INSNS 5
27#else
28#define MCOUNT_OFFSET_INSNS 4
29#endif
30
31#ifdef CONFIG_DYNAMIC_FTRACE
32
33
34void arch_ftrace_update_code(int command)
35{
36 ftrace_modify_all_code(command);
37}
38
39#endif
40
41
42
43
44
45
46
47static inline int in_kernel_space(unsigned long ip)
48{
49 if (ip >= (unsigned long)_stext &&
50 ip <= (unsigned long)_etext)
51 return 1;
52 return 0;
53}
54
55#ifdef CONFIG_DYNAMIC_FTRACE
56
57#define JAL 0x0c000000
58#define ADDR_MASK 0x03ffffff
59#define JUMP_RANGE_MASK ((1UL << 28) - 1)
60
61#define INSN_NOP 0x00000000
62#define INSN_JAL(addr) \
63 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
64
65static unsigned int insn_jal_ftrace_caller __read_mostly;
66static unsigned int insn_la_mcount[2] __read_mostly;
67static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
68
69static inline void ftrace_dyn_arch_init_insns(void)
70{
71 u32 *buf;
72 unsigned int v1;
73
74
75 v1 = 3;
76 buf = (u32 *)&insn_la_mcount[0];
77 UASM_i_LA(&buf, v1, MCOUNT_ADDR);
78
79
80 buf = (u32 *)&insn_jal_ftrace_caller;
81 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
82
83#ifdef CONFIG_FUNCTION_GRAPH_TRACER
84
85 buf = (u32 *)&insn_j_ftrace_graph_caller;
86 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
87#endif
88}
89
90static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
91{
92 int faulted;
93 mm_segment_t old_fs;
94
95
96 safe_store_code(new_code, ip, faulted);
97
98 if (unlikely(faulted))
99 return -EFAULT;
100
101 old_fs = get_fs();
102 set_fs(get_ds());
103 flush_icache_range(ip, ip + 8);
104 set_fs(old_fs);
105
106 return 0;
107}
108
109#ifndef CONFIG_64BIT
110static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
111 unsigned int new_code2)
112{
113 int faulted;
114 mm_segment_t old_fs;
115
116 safe_store_code(new_code1, ip, faulted);
117 if (unlikely(faulted))
118 return -EFAULT;
119
120 ip += 4;
121 safe_store_code(new_code2, ip, faulted);
122 if (unlikely(faulted))
123 return -EFAULT;
124
125 ip -= 4;
126 old_fs = get_fs();
127 set_fs(get_ds());
128 flush_icache_range(ip, ip + 8);
129 set_fs(old_fs);
130
131 return 0;
132}
133
134static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
135 unsigned int new_code2)
136{
137 int faulted;
138 mm_segment_t old_fs;
139
140 ip += 4;
141 safe_store_code(new_code2, ip, faulted);
142 if (unlikely(faulted))
143 return -EFAULT;
144
145 ip -= 4;
146 safe_store_code(new_code1, ip, faulted);
147 if (unlikely(faulted))
148 return -EFAULT;
149
150 old_fs = get_fs();
151 set_fs(get_ds());
152 flush_icache_range(ip, ip + 8);
153 set_fs(old_fs);
154
155 return 0;
156}
157#endif
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
190
191int ftrace_make_nop(struct module *mod,
192 struct dyn_ftrace *rec, unsigned long addr)
193{
194 unsigned int new;
195 unsigned long ip = rec->ip;
196
197
198
199
200
201 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
202#ifdef CONFIG_64BIT
203 return ftrace_modify_code(ip, new);
204#else
205
206
207
208
209
210
211
212 return ftrace_modify_code_2(ip, new, INSN_NOP);
213#endif
214}
215
216int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
217{
218 unsigned int new;
219 unsigned long ip = rec->ip;
220
221 new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
222
223#ifdef CONFIG_64BIT
224 return ftrace_modify_code(ip, new);
225#else
226 return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ?
227 INSN_NOP : insn_la_mcount[1]);
228#endif
229}
230
231#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
232
233int ftrace_update_ftrace_func(ftrace_func_t func)
234{
235 unsigned int new;
236
237 new = INSN_JAL((unsigned long)func);
238
239 return ftrace_modify_code(FTRACE_CALL_IP, new);
240}
241
242int __init ftrace_dyn_arch_init(void)
243{
244
245 ftrace_dyn_arch_init_insns();
246
247
248 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
249
250 return 0;
251}
252#endif
253
254#ifdef CONFIG_FUNCTION_GRAPH_TRACER
255
256#ifdef CONFIG_DYNAMIC_FTRACE
257
258extern void ftrace_graph_call(void);
259#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
260
261int ftrace_enable_ftrace_graph_caller(void)
262{
263 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
264 insn_j_ftrace_graph_caller);
265}
266
267int ftrace_disable_ftrace_graph_caller(void)
268{
269 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
270}
271
272#endif
273
274#ifndef KBUILD_MCOUNT_RA_ADDRESS
275
276#define S_RA_SP (0xafbf << 16)
277#define S_R_SP (0xafb0 << 16)
278#define OFFSET_MASK 0xffff
279
280unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
281 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
282{
283 unsigned long sp, ip, tmp;
284 unsigned int code;
285 int faulted;
286
287
288
289
290
291
292 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
293
294
295
296
297
298 do {
299
300 safe_load_code(code, ip, faulted);
301
302 if (unlikely(faulted))
303 return 0;
304
305
306
307
308
309 if ((code & S_R_SP) != S_R_SP)
310 return parent_ra_addr;
311
312
313 ip -= 4;
314 } while ((code & S_RA_SP) != S_RA_SP);
315
316 sp = fp + (code & OFFSET_MASK);
317
318
319 safe_load_stack(tmp, sp, faulted);
320 if (unlikely(faulted))
321 return 0;
322
323 if (tmp == old_parent_ra)
324 return sp;
325 return 0;
326}
327
328#endif
329
330
331
332
333
334void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
335 unsigned long fp)
336{
337 unsigned long old_parent_ra;
338 struct ftrace_graph_ent trace;
339 unsigned long return_hooker = (unsigned long)
340 &return_to_handler;
341 int faulted, insns;
342
343 if (unlikely(ftrace_graph_is_dead()))
344 return;
345
346 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
347 return;
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
368 if (unlikely(faulted))
369 goto out;
370#ifndef KBUILD_MCOUNT_RA_ADDRESS
371 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
372 old_parent_ra, (unsigned long)parent_ra_addr, fp);
373
374
375
376
377 if (parent_ra_addr == 0)
378 goto out;
379#endif
380
381 safe_store_stack(return_hooker, parent_ra_addr, faulted);
382 if (unlikely(faulted))
383 goto out;
384
385 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
386 == -EBUSY) {
387 *parent_ra_addr = old_parent_ra;
388 return;
389 }
390
391
392
393
394
395
396
397 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
398 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
399
400
401 if (!ftrace_graph_entry(&trace)) {
402 current->curr_ret_stack--;
403 *parent_ra_addr = old_parent_ra;
404 }
405 return;
406out:
407 ftrace_graph_stop();
408 WARN_ON(1);
409}
410#endif
411
412#ifdef CONFIG_FTRACE_SYSCALLS
413
414#ifdef CONFIG_32BIT
415unsigned long __init arch_syscall_addr(int nr)
416{
417 return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
418}
419#endif
420
421#ifdef CONFIG_64BIT
422
423unsigned long __init arch_syscall_addr(int nr)
424{
425#ifdef CONFIG_MIPS32_N32
426 if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
427 return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
428#endif
429 if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
430 return (unsigned long)sys_call_table[nr - __NR_64_Linux];
431#ifdef CONFIG_MIPS32_O32
432 if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
433 return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
434#endif
435
436 return (unsigned long) &sys_ni_syscall;
437}
438#endif
439
440#endif
441