1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/stop_machine.h>
18#include <linux/clocksource.h>
19#include <linux/sched/task.h>
20#include <linux/kallsyms.h>
21#include <linux/security.h>
22#include <linux/seq_file.h>
23#include <linux/tracefs.h>
24#include <linux/hardirq.h>
25#include <linux/kthread.h>
26#include <linux/uaccess.h>
27#include <linux/bsearch.h>
28#include <linux/module.h>
29#include <linux/ftrace.h>
30#include <linux/sysctl.h>
31#include <linux/slab.h>
32#include <linux/ctype.h>
33#include <linux/sort.h>
34#include <linux/list.h>
35#include <linux/hash.h>
36#include <linux/rcupdate.h>
37#include <linux/kprobes.h>
38
39#include <trace/events/sched.h>
40
41#include <asm/sections.h>
42#include <asm/setup.h>
43
44#include "ftrace_internal.h"
45#include "trace_output.h"
46#include "trace_stat.h"
47
48#define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__"
49
50#define FTRACE_WARN_ON(cond) \
51 ({ \
52 int ___r = cond; \
53 if (WARN_ON(___r)) \
54 ftrace_kill(); \
55 ___r; \
56 })
57
58#define FTRACE_WARN_ON_ONCE(cond) \
59 ({ \
60 int ___r = cond; \
61 if (WARN_ON_ONCE(___r)) \
62 ftrace_kill(); \
63 ___r; \
64 })
65
66
67#define FTRACE_HASH_DEFAULT_BITS 10
68#define FTRACE_HASH_MAX_BITS 12
69
70#ifdef CONFIG_DYNAMIC_FTRACE
71#define INIT_OPS_HASH(opsname) \
72 .func_hash = &opsname.local_hash, \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
74#else
75#define INIT_OPS_HASH(opsname)
76#endif
77
78enum {
79 FTRACE_MODIFY_ENABLE_FL = (1 << 0),
80 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1),
81};
82
83struct ftrace_ops ftrace_list_end __read_mostly = {
84 .func = ftrace_stub,
85 .flags = FTRACE_OPS_FL_STUB,
86 INIT_OPS_HASH(ftrace_list_end)
87};
88
89
90int ftrace_enabled __read_mostly;
91static int __maybe_unused last_ftrace_enabled;
92
93
94struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
95
96static struct ftrace_ops *set_function_trace_op;
97
98static bool ftrace_pids_enabled(struct ftrace_ops *ops)
99{
100 struct trace_array *tr;
101
102 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
103 return false;
104
105 tr = ops->private;
106
107 return tr->function_pids != NULL || tr->function_no_pids != NULL;
108}
109
110static void ftrace_update_trampoline(struct ftrace_ops *ops);
111
112
113
114
115
116static int ftrace_disabled __read_mostly;
117
118DEFINE_MUTEX(ftrace_lock);
119
120struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
121ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
122struct ftrace_ops global_ops;
123
124
125void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
126 struct ftrace_ops *op, struct ftrace_regs *fregs);
127
128static inline void ftrace_ops_init(struct ftrace_ops *ops)
129{
130#ifdef CONFIG_DYNAMIC_FTRACE
131 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
132 mutex_init(&ops->local_hash.regex_lock);
133 ops->func_hash = &ops->local_hash;
134 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
135 }
136#endif
137}
138
139static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
140 struct ftrace_ops *op, struct ftrace_regs *fregs)
141{
142 struct trace_array *tr = op->private;
143 int pid;
144
145 if (tr) {
146 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
147 if (pid == FTRACE_PID_IGNORE)
148 return;
149 if (pid != FTRACE_PID_TRACE &&
150 pid != current->pid)
151 return;
152 }
153
154 op->saved_func(ip, parent_ip, op, fregs);
155}
156
157static void ftrace_sync_ipi(void *data)
158{
159
160 smp_rmb();
161}
162
163static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
164{
165
166
167
168
169 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
170 FTRACE_FORCE_LIST_FUNC)
171 return ftrace_ops_list_func;
172
173 return ftrace_ops_get_func(ops);
174}
175
176static void update_ftrace_function(void)
177{
178 ftrace_func_t func;
179
180
181
182
183
184
185 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
186 lockdep_is_held(&ftrace_lock));
187
188
189 if (set_function_trace_op == &ftrace_list_end) {
190 func = ftrace_stub;
191
192
193
194
195
196
197 } else if (rcu_dereference_protected(ftrace_ops_list->next,
198 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
199 func = ftrace_ops_get_list_func(ftrace_ops_list);
200
201 } else {
202
203 set_function_trace_op = &ftrace_list_end;
204 func = ftrace_ops_list_func;
205 }
206
207 update_function_graph_func();
208
209
210 if (ftrace_trace_function == func)
211 return;
212
213
214
215
216
217 if (func == ftrace_ops_list_func) {
218 ftrace_trace_function = func;
219
220
221
222
223 return;
224 }
225
226#ifndef CONFIG_DYNAMIC_FTRACE
227
228
229
230
231
232
233
234
235
236
237 ftrace_trace_function = ftrace_ops_list_func;
238
239
240
241
242 synchronize_rcu_tasks_rude();
243
244 function_trace_op = set_function_trace_op;
245
246 smp_wmb();
247
248 smp_call_function(ftrace_sync_ipi, NULL, 1);
249
250#endif
251
252 ftrace_trace_function = func;
253}
254
255static void add_ftrace_ops(struct ftrace_ops __rcu **list,
256 struct ftrace_ops *ops)
257{
258 rcu_assign_pointer(ops->next, *list);
259
260
261
262
263
264
265
266 rcu_assign_pointer(*list, ops);
267}
268
269static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
270 struct ftrace_ops *ops)
271{
272 struct ftrace_ops **p;
273
274
275
276
277
278 if (rcu_dereference_protected(*list,
279 lockdep_is_held(&ftrace_lock)) == ops &&
280 rcu_dereference_protected(ops->next,
281 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
282 *list = &ftrace_list_end;
283 return 0;
284 }
285
286 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
287 if (*p == ops)
288 break;
289
290 if (*p != ops)
291 return -1;
292
293 *p = (*p)->next;
294 return 0;
295}
296
297static void ftrace_update_trampoline(struct ftrace_ops *ops);
298
299int __register_ftrace_function(struct ftrace_ops *ops)
300{
301 if (ops->flags & FTRACE_OPS_FL_DELETED)
302 return -EINVAL;
303
304 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
305 return -EBUSY;
306
307#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
308
309
310
311
312
313 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
314 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
315 return -EINVAL;
316
317 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
318 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
319#endif
320 if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
321 return -EBUSY;
322
323 if (!is_kernel_core_data((unsigned long)ops))
324 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
325
326 add_ftrace_ops(&ftrace_ops_list, ops);
327
328
329 ops->saved_func = ops->func;
330
331 if (ftrace_pids_enabled(ops))
332 ops->func = ftrace_pid_func;
333
334 ftrace_update_trampoline(ops);
335
336 if (ftrace_enabled)
337 update_ftrace_function();
338
339 return 0;
340}
341
342int __unregister_ftrace_function(struct ftrace_ops *ops)
343{
344 int ret;
345
346 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
347 return -EBUSY;
348
349 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
350
351 if (ret < 0)
352 return ret;
353
354 if (ftrace_enabled)
355 update_ftrace_function();
356
357 ops->func = ops->saved_func;
358
359 return 0;
360}
361
362static void ftrace_update_pid_func(void)
363{
364 struct ftrace_ops *op;
365
366
367 if (ftrace_trace_function == ftrace_stub)
368 return;
369
370 do_for_each_ftrace_op(op, ftrace_ops_list) {
371 if (op->flags & FTRACE_OPS_FL_PID) {
372 op->func = ftrace_pids_enabled(op) ?
373 ftrace_pid_func : op->saved_func;
374 ftrace_update_trampoline(op);
375 }
376 } while_for_each_ftrace_op(op);
377
378 update_ftrace_function();
379}
380
381#ifdef CONFIG_FUNCTION_PROFILER
382struct ftrace_profile {
383 struct hlist_node node;
384 unsigned long ip;
385 unsigned long counter;
386#ifdef CONFIG_FUNCTION_GRAPH_TRACER
387 unsigned long long time;
388 unsigned long long time_squared;
389#endif
390};
391
392struct ftrace_profile_page {
393 struct ftrace_profile_page *next;
394 unsigned long index;
395 struct ftrace_profile records[];
396};
397
398struct ftrace_profile_stat {
399 atomic_t disabled;
400 struct hlist_head *hash;
401 struct ftrace_profile_page *pages;
402 struct ftrace_profile_page *start;
403 struct tracer_stat stat;
404};
405
406#define PROFILE_RECORDS_SIZE \
407 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
408
409#define PROFILES_PER_PAGE \
410 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
411
412static int ftrace_profile_enabled __read_mostly;
413
414
415static DEFINE_MUTEX(ftrace_profile_lock);
416
417static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
418
419#define FTRACE_PROFILE_HASH_BITS 10
420#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
421
422static void *
423function_stat_next(void *v, int idx)
424{
425 struct ftrace_profile *rec = v;
426 struct ftrace_profile_page *pg;
427
428 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
429
430 again:
431 if (idx != 0)
432 rec++;
433
434 if ((void *)rec >= (void *)&pg->records[pg->index]) {
435 pg = pg->next;
436 if (!pg)
437 return NULL;
438 rec = &pg->records[0];
439 if (!rec->counter)
440 goto again;
441 }
442
443 return rec;
444}
445
446static void *function_stat_start(struct tracer_stat *trace)
447{
448 struct ftrace_profile_stat *stat =
449 container_of(trace, struct ftrace_profile_stat, stat);
450
451 if (!stat || !stat->start)
452 return NULL;
453
454 return function_stat_next(&stat->start->records[0], 0);
455}
456
457#ifdef CONFIG_FUNCTION_GRAPH_TRACER
458
459static int function_stat_cmp(const void *p1, const void *p2)
460{
461 const struct ftrace_profile *a = p1;
462 const struct ftrace_profile *b = p2;
463
464 if (a->time < b->time)
465 return -1;
466 if (a->time > b->time)
467 return 1;
468 else
469 return 0;
470}
471#else
472
473static int function_stat_cmp(const void *p1, const void *p2)
474{
475 const struct ftrace_profile *a = p1;
476 const struct ftrace_profile *b = p2;
477
478 if (a->counter < b->counter)
479 return -1;
480 if (a->counter > b->counter)
481 return 1;
482 else
483 return 0;
484}
485#endif
486
487static int function_stat_headers(struct seq_file *m)
488{
489#ifdef CONFIG_FUNCTION_GRAPH_TRACER
490 seq_puts(m, " Function "
491 "Hit Time Avg s^2\n"
492 " -------- "
493 "--- ---- --- ---\n");
494#else
495 seq_puts(m, " Function Hit\n"
496 " -------- ---\n");
497#endif
498 return 0;
499}
500
501static int function_stat_show(struct seq_file *m, void *v)
502{
503 struct ftrace_profile *rec = v;
504 char str[KSYM_SYMBOL_LEN];
505 int ret = 0;
506#ifdef CONFIG_FUNCTION_GRAPH_TRACER
507 static struct trace_seq s;
508 unsigned long long avg;
509 unsigned long long stddev;
510#endif
511 mutex_lock(&ftrace_profile_lock);
512
513
514 if (unlikely(rec->counter == 0)) {
515 ret = -EBUSY;
516 goto out;
517 }
518
519#ifdef CONFIG_FUNCTION_GRAPH_TRACER
520 avg = div64_ul(rec->time, rec->counter);
521 if (tracing_thresh && (avg < tracing_thresh))
522 goto out;
523#endif
524
525 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
526 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
527
528#ifdef CONFIG_FUNCTION_GRAPH_TRACER
529 seq_puts(m, " ");
530
531
532 if (rec->counter <= 1)
533 stddev = 0;
534 else {
535
536
537
538
539 stddev = rec->counter * rec->time_squared -
540 rec->time * rec->time;
541
542
543
544
545
546 stddev = div64_ul(stddev,
547 rec->counter * (rec->counter - 1) * 1000);
548 }
549
550 trace_seq_init(&s);
551 trace_print_graph_duration(rec->time, &s);
552 trace_seq_puts(&s, " ");
553 trace_print_graph_duration(avg, &s);
554 trace_seq_puts(&s, " ");
555 trace_print_graph_duration(stddev, &s);
556 trace_print_seq(m, &s);
557#endif
558 seq_putc(m, '\n');
559out:
560 mutex_unlock(&ftrace_profile_lock);
561
562 return ret;
563}
564
565static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
566{
567 struct ftrace_profile_page *pg;
568
569 pg = stat->pages = stat->start;
570
571 while (pg) {
572 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
573 pg->index = 0;
574 pg = pg->next;
575 }
576
577 memset(stat->hash, 0,
578 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
579}
580
581static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
582{
583 struct ftrace_profile_page *pg;
584 int functions;
585 int pages;
586 int i;
587
588
589 if (stat->pages)
590 return 0;
591
592 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
593 if (!stat->pages)
594 return -ENOMEM;
595
596#ifdef CONFIG_DYNAMIC_FTRACE
597 functions = ftrace_update_tot_cnt;
598#else
599
600
601
602
603
604
605
606 functions = 20000;
607#endif
608
609 pg = stat->start = stat->pages;
610
611 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
612
613 for (i = 1; i < pages; i++) {
614 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
615 if (!pg->next)
616 goto out_free;
617 pg = pg->next;
618 }
619
620 return 0;
621
622 out_free:
623 pg = stat->start;
624 while (pg) {
625 unsigned long tmp = (unsigned long)pg;
626
627 pg = pg->next;
628 free_page(tmp);
629 }
630
631 stat->pages = NULL;
632 stat->start = NULL;
633
634 return -ENOMEM;
635}
636
637static int ftrace_profile_init_cpu(int cpu)
638{
639 struct ftrace_profile_stat *stat;
640 int size;
641
642 stat = &per_cpu(ftrace_profile_stats, cpu);
643
644 if (stat->hash) {
645
646 ftrace_profile_reset(stat);
647 return 0;
648 }
649
650
651
652
653
654 size = FTRACE_PROFILE_HASH_SIZE;
655
656 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
657
658 if (!stat->hash)
659 return -ENOMEM;
660
661
662 if (ftrace_profile_pages_init(stat) < 0) {
663 kfree(stat->hash);
664 stat->hash = NULL;
665 return -ENOMEM;
666 }
667
668 return 0;
669}
670
671static int ftrace_profile_init(void)
672{
673 int cpu;
674 int ret = 0;
675
676 for_each_possible_cpu(cpu) {
677 ret = ftrace_profile_init_cpu(cpu);
678 if (ret)
679 break;
680 }
681
682 return ret;
683}
684
685
686static struct ftrace_profile *
687ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
688{
689 struct ftrace_profile *rec;
690 struct hlist_head *hhd;
691 unsigned long key;
692
693 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
694 hhd = &stat->hash[key];
695
696 if (hlist_empty(hhd))
697 return NULL;
698
699 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
700 if (rec->ip == ip)
701 return rec;
702 }
703
704 return NULL;
705}
706
707static void ftrace_add_profile(struct ftrace_profile_stat *stat,
708 struct ftrace_profile *rec)
709{
710 unsigned long key;
711
712 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
713 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
714}
715
716
717
718
719static struct ftrace_profile *
720ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
721{
722 struct ftrace_profile *rec = NULL;
723
724
725 if (atomic_inc_return(&stat->disabled) != 1)
726 goto out;
727
728
729
730
731
732 rec = ftrace_find_profiled_func(stat, ip);
733 if (rec)
734 goto out;
735
736 if (stat->pages->index == PROFILES_PER_PAGE) {
737 if (!stat->pages->next)
738 goto out;
739 stat->pages = stat->pages->next;
740 }
741
742 rec = &stat->pages->records[stat->pages->index++];
743 rec->ip = ip;
744 ftrace_add_profile(stat, rec);
745
746 out:
747 atomic_dec(&stat->disabled);
748
749 return rec;
750}
751
752static void
753function_profile_call(unsigned long ip, unsigned long parent_ip,
754 struct ftrace_ops *ops, struct ftrace_regs *fregs)
755{
756 struct ftrace_profile_stat *stat;
757 struct ftrace_profile *rec;
758 unsigned long flags;
759
760 if (!ftrace_profile_enabled)
761 return;
762
763 local_irq_save(flags);
764
765 stat = this_cpu_ptr(&ftrace_profile_stats);
766 if (!stat->hash || !ftrace_profile_enabled)
767 goto out;
768
769 rec = ftrace_find_profiled_func(stat, ip);
770 if (!rec) {
771 rec = ftrace_profile_alloc(stat, ip);
772 if (!rec)
773 goto out;
774 }
775
776 rec->counter++;
777 out:
778 local_irq_restore(flags);
779}
780
781#ifdef CONFIG_FUNCTION_GRAPH_TRACER
782static bool fgraph_graph_time = true;
783
784void ftrace_graph_graph_time_control(bool enable)
785{
786 fgraph_graph_time = enable;
787}
788
789static int profile_graph_entry(struct ftrace_graph_ent *trace)
790{
791 struct ftrace_ret_stack *ret_stack;
792
793 function_profile_call(trace->func, 0, NULL, NULL);
794
795
796 if (!current->ret_stack)
797 return 0;
798
799 ret_stack = ftrace_graph_get_ret_stack(current, 0);
800 if (ret_stack)
801 ret_stack->subtime = 0;
802
803 return 1;
804}
805
806static void profile_graph_return(struct ftrace_graph_ret *trace)
807{
808 struct ftrace_ret_stack *ret_stack;
809 struct ftrace_profile_stat *stat;
810 unsigned long long calltime;
811 struct ftrace_profile *rec;
812 unsigned long flags;
813
814 local_irq_save(flags);
815 stat = this_cpu_ptr(&ftrace_profile_stats);
816 if (!stat->hash || !ftrace_profile_enabled)
817 goto out;
818
819
820 if (!trace->calltime)
821 goto out;
822
823 calltime = trace->rettime - trace->calltime;
824
825 if (!fgraph_graph_time) {
826
827
828 ret_stack = ftrace_graph_get_ret_stack(current, 1);
829 if (ret_stack)
830 ret_stack->subtime += calltime;
831
832 ret_stack = ftrace_graph_get_ret_stack(current, 0);
833 if (ret_stack && ret_stack->subtime < calltime)
834 calltime -= ret_stack->subtime;
835 else
836 calltime = 0;
837 }
838
839 rec = ftrace_find_profiled_func(stat, trace->func);
840 if (rec) {
841 rec->time += calltime;
842 rec->time_squared += calltime * calltime;
843 }
844
845 out:
846 local_irq_restore(flags);
847}
848
849static struct fgraph_ops fprofiler_ops = {
850 .entryfunc = &profile_graph_entry,
851 .retfunc = &profile_graph_return,
852};
853
854static int register_ftrace_profiler(void)
855{
856 return register_ftrace_graph(&fprofiler_ops);
857}
858
859static void unregister_ftrace_profiler(void)
860{
861 unregister_ftrace_graph(&fprofiler_ops);
862}
863#else
864static struct ftrace_ops ftrace_profile_ops __read_mostly = {
865 .func = function_profile_call,
866 .flags = FTRACE_OPS_FL_INITIALIZED,
867 INIT_OPS_HASH(ftrace_profile_ops)
868};
869
870static int register_ftrace_profiler(void)
871{
872 return register_ftrace_function(&ftrace_profile_ops);
873}
874
875static void unregister_ftrace_profiler(void)
876{
877 unregister_ftrace_function(&ftrace_profile_ops);
878}
879#endif
880
881static ssize_t
882ftrace_profile_write(struct file *filp, const char __user *ubuf,
883 size_t cnt, loff_t *ppos)
884{
885 unsigned long val;
886 int ret;
887
888 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
889 if (ret)
890 return ret;
891
892 val = !!val;
893
894 mutex_lock(&ftrace_profile_lock);
895 if (ftrace_profile_enabled ^ val) {
896 if (val) {
897 ret = ftrace_profile_init();
898 if (ret < 0) {
899 cnt = ret;
900 goto out;
901 }
902
903 ret = register_ftrace_profiler();
904 if (ret < 0) {
905 cnt = ret;
906 goto out;
907 }
908 ftrace_profile_enabled = 1;
909 } else {
910 ftrace_profile_enabled = 0;
911
912
913
914
915 unregister_ftrace_profiler();
916 }
917 }
918 out:
919 mutex_unlock(&ftrace_profile_lock);
920
921 *ppos += cnt;
922
923 return cnt;
924}
925
926static ssize_t
927ftrace_profile_read(struct file *filp, char __user *ubuf,
928 size_t cnt, loff_t *ppos)
929{
930 char buf[64];
931 int r;
932
933 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
934 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
935}
936
937static const struct file_operations ftrace_profile_fops = {
938 .open = tracing_open_generic,
939 .read = ftrace_profile_read,
940 .write = ftrace_profile_write,
941 .llseek = default_llseek,
942};
943
944
945static struct tracer_stat function_stats __initdata = {
946 .name = "functions",
947 .stat_start = function_stat_start,
948 .stat_next = function_stat_next,
949 .stat_cmp = function_stat_cmp,
950 .stat_headers = function_stat_headers,
951 .stat_show = function_stat_show
952};
953
954static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
955{
956 struct ftrace_profile_stat *stat;
957 char *name;
958 int ret;
959 int cpu;
960
961 for_each_possible_cpu(cpu) {
962 stat = &per_cpu(ftrace_profile_stats, cpu);
963
964 name = kasprintf(GFP_KERNEL, "function%d", cpu);
965 if (!name) {
966
967
968
969
970 WARN(1,
971 "Could not allocate stat file for cpu %d\n",
972 cpu);
973 return;
974 }
975 stat->stat = function_stats;
976 stat->stat.name = name;
977 ret = register_stat_tracer(&stat->stat);
978 if (ret) {
979 WARN(1,
980 "Could not register function stat for cpu %d\n",
981 cpu);
982 kfree(name);
983 return;
984 }
985 }
986
987 trace_create_file("function_profile_enabled",
988 TRACE_MODE_WRITE, d_tracer, NULL,
989 &ftrace_profile_fops);
990}
991
992#else
993static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
994{
995}
996#endif
997
998#ifdef CONFIG_DYNAMIC_FTRACE
999
1000static struct ftrace_ops *removed_ops;
1001
1002
1003
1004
1005
1006static bool update_all_ops;
1007
1008#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1009# error Dynamic ftrace depends on MCOUNT_RECORD
1010#endif
1011
1012struct ftrace_func_probe {
1013 struct ftrace_probe_ops *probe_ops;
1014 struct ftrace_ops ops;
1015 struct trace_array *tr;
1016 struct list_head list;
1017 void *data;
1018 int ref;
1019};
1020
1021
1022
1023
1024
1025
1026
1027static const struct hlist_head empty_buckets[1];
1028static const struct ftrace_hash empty_hash = {
1029 .buckets = (struct hlist_head *)empty_buckets,
1030};
1031#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1032
1033struct ftrace_ops global_ops = {
1034 .func = ftrace_stub,
1035 .local_hash.notrace_hash = EMPTY_HASH,
1036 .local_hash.filter_hash = EMPTY_HASH,
1037 INIT_OPS_HASH(global_ops)
1038 .flags = FTRACE_OPS_FL_INITIALIZED |
1039 FTRACE_OPS_FL_PID,
1040};
1041
1042
1043
1044
1045struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1046{
1047 struct ftrace_ops *op = NULL;
1048
1049
1050
1051
1052
1053 preempt_disable_notrace();
1054
1055 do_for_each_ftrace_op(op, ftrace_ops_list) {
1056
1057
1058
1059
1060
1061 if (op->trampoline && op->trampoline_size)
1062 if (addr >= op->trampoline &&
1063 addr < op->trampoline + op->trampoline_size) {
1064 preempt_enable_notrace();
1065 return op;
1066 }
1067 } while_for_each_ftrace_op(op);
1068 preempt_enable_notrace();
1069
1070 return NULL;
1071}
1072
1073
1074
1075
1076
1077
1078
1079bool is_ftrace_trampoline(unsigned long addr)
1080{
1081 return ftrace_ops_trampoline(addr) != NULL;
1082}
1083
1084struct ftrace_page {
1085 struct ftrace_page *next;
1086 struct dyn_ftrace *records;
1087 int index;
1088 int order;
1089};
1090
1091#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1092#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1093
1094static struct ftrace_page *ftrace_pages_start;
1095static struct ftrace_page *ftrace_pages;
1096
1097static __always_inline unsigned long
1098ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1099{
1100 if (hash->size_bits > 0)
1101 return hash_long(ip, hash->size_bits);
1102
1103 return 0;
1104}
1105
1106
1107static __always_inline struct ftrace_func_entry *
1108__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1109{
1110 unsigned long key;
1111 struct ftrace_func_entry *entry;
1112 struct hlist_head *hhd;
1113
1114 key = ftrace_hash_key(hash, ip);
1115 hhd = &hash->buckets[key];
1116
1117 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1118 if (entry->ip == ip)
1119 return entry;
1120 }
1121 return NULL;
1122}
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134struct ftrace_func_entry *
1135ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1136{
1137 if (ftrace_hash_empty(hash))
1138 return NULL;
1139
1140 return __ftrace_lookup_ip(hash, ip);
1141}
1142
1143static void __add_hash_entry(struct ftrace_hash *hash,
1144 struct ftrace_func_entry *entry)
1145{
1146 struct hlist_head *hhd;
1147 unsigned long key;
1148
1149 key = ftrace_hash_key(hash, entry->ip);
1150 hhd = &hash->buckets[key];
1151 hlist_add_head(&entry->hlist, hhd);
1152 hash->count++;
1153}
1154
1155static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1156{
1157 struct ftrace_func_entry *entry;
1158
1159 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1160 if (!entry)
1161 return -ENOMEM;
1162
1163 entry->ip = ip;
1164 __add_hash_entry(hash, entry);
1165
1166 return 0;
1167}
1168
1169static void
1170free_hash_entry(struct ftrace_hash *hash,
1171 struct ftrace_func_entry *entry)
1172{
1173 hlist_del(&entry->hlist);
1174 kfree(entry);
1175 hash->count--;
1176}
1177
1178static void
1179remove_hash_entry(struct ftrace_hash *hash,
1180 struct ftrace_func_entry *entry)
1181{
1182 hlist_del_rcu(&entry->hlist);
1183 hash->count--;
1184}
1185
1186static void ftrace_hash_clear(struct ftrace_hash *hash)
1187{
1188 struct hlist_head *hhd;
1189 struct hlist_node *tn;
1190 struct ftrace_func_entry *entry;
1191 int size = 1 << hash->size_bits;
1192 int i;
1193
1194 if (!hash->count)
1195 return;
1196
1197 for (i = 0; i < size; i++) {
1198 hhd = &hash->buckets[i];
1199 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1200 free_hash_entry(hash, entry);
1201 }
1202 FTRACE_WARN_ON(hash->count);
1203}
1204
1205static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1206{
1207 list_del(&ftrace_mod->list);
1208 kfree(ftrace_mod->module);
1209 kfree(ftrace_mod->func);
1210 kfree(ftrace_mod);
1211}
1212
1213static void clear_ftrace_mod_list(struct list_head *head)
1214{
1215 struct ftrace_mod_load *p, *n;
1216
1217
1218 if (!head)
1219 return;
1220
1221 mutex_lock(&ftrace_lock);
1222 list_for_each_entry_safe(p, n, head, list)
1223 free_ftrace_mod(p);
1224 mutex_unlock(&ftrace_lock);
1225}
1226
1227static void free_ftrace_hash(struct ftrace_hash *hash)
1228{
1229 if (!hash || hash == EMPTY_HASH)
1230 return;
1231 ftrace_hash_clear(hash);
1232 kfree(hash->buckets);
1233 kfree(hash);
1234}
1235
1236static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1237{
1238 struct ftrace_hash *hash;
1239
1240 hash = container_of(rcu, struct ftrace_hash, rcu);
1241 free_ftrace_hash(hash);
1242}
1243
1244static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1245{
1246 if (!hash || hash == EMPTY_HASH)
1247 return;
1248 call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1249}
1250
1251void ftrace_free_filter(struct ftrace_ops *ops)
1252{
1253 ftrace_ops_init(ops);
1254 free_ftrace_hash(ops->func_hash->filter_hash);
1255 free_ftrace_hash(ops->func_hash->notrace_hash);
1256}
1257
1258static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1259{
1260 struct ftrace_hash *hash;
1261 int size;
1262
1263 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1264 if (!hash)
1265 return NULL;
1266
1267 size = 1 << size_bits;
1268 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1269
1270 if (!hash->buckets) {
1271 kfree(hash);
1272 return NULL;
1273 }
1274
1275 hash->size_bits = size_bits;
1276
1277 return hash;
1278}
1279
1280
1281static int ftrace_add_mod(struct trace_array *tr,
1282 const char *func, const char *module,
1283 int enable)
1284{
1285 struct ftrace_mod_load *ftrace_mod;
1286 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1287
1288 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1289 if (!ftrace_mod)
1290 return -ENOMEM;
1291
1292 ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1293 ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1294 ftrace_mod->enable = enable;
1295
1296 if (!ftrace_mod->func || !ftrace_mod->module)
1297 goto out_free;
1298
1299 list_add(&ftrace_mod->list, mod_head);
1300
1301 return 0;
1302
1303 out_free:
1304 free_ftrace_mod(ftrace_mod);
1305
1306 return -ENOMEM;
1307}
1308
1309static struct ftrace_hash *
1310alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1311{
1312 struct ftrace_func_entry *entry;
1313 struct ftrace_hash *new_hash;
1314 int size;
1315 int ret;
1316 int i;
1317
1318 new_hash = alloc_ftrace_hash(size_bits);
1319 if (!new_hash)
1320 return NULL;
1321
1322 if (hash)
1323 new_hash->flags = hash->flags;
1324
1325
1326 if (ftrace_hash_empty(hash))
1327 return new_hash;
1328
1329 size = 1 << hash->size_bits;
1330 for (i = 0; i < size; i++) {
1331 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1332 ret = add_hash_entry(new_hash, entry->ip);
1333 if (ret < 0)
1334 goto free_hash;
1335 }
1336 }
1337
1338 FTRACE_WARN_ON(new_hash->count != hash->count);
1339
1340 return new_hash;
1341
1342 free_hash:
1343 free_ftrace_hash(new_hash);
1344 return NULL;
1345}
1346
1347static void
1348ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1349static void
1350ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1351
1352static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1353 struct ftrace_hash *new_hash);
1354
1355static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
1356{
1357 struct ftrace_func_entry *entry;
1358 struct ftrace_hash *new_hash;
1359 struct hlist_head *hhd;
1360 struct hlist_node *tn;
1361 int bits = 0;
1362 int i;
1363
1364
1365
1366
1367
1368 bits = fls(size / 2);
1369
1370
1371 if (bits > FTRACE_HASH_MAX_BITS)
1372 bits = FTRACE_HASH_MAX_BITS;
1373
1374 new_hash = alloc_ftrace_hash(bits);
1375 if (!new_hash)
1376 return NULL;
1377
1378 new_hash->flags = src->flags;
1379
1380 size = 1 << src->size_bits;
1381 for (i = 0; i < size; i++) {
1382 hhd = &src->buckets[i];
1383 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1384 remove_hash_entry(src, entry);
1385 __add_hash_entry(new_hash, entry);
1386 }
1387 }
1388 return new_hash;
1389}
1390
1391static struct ftrace_hash *
1392__ftrace_hash_move(struct ftrace_hash *src)
1393{
1394 int size = src->count;
1395
1396
1397
1398
1399 if (ftrace_hash_empty(src))
1400 return EMPTY_HASH;
1401
1402 return dup_hash(src, size);
1403}
1404
1405static int
1406ftrace_hash_move(struct ftrace_ops *ops, int enable,
1407 struct ftrace_hash **dst, struct ftrace_hash *src)
1408{
1409 struct ftrace_hash *new_hash;
1410 int ret;
1411
1412
1413 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1414 return -EINVAL;
1415
1416 new_hash = __ftrace_hash_move(src);
1417 if (!new_hash)
1418 return -ENOMEM;
1419
1420
1421 if (enable) {
1422
1423 ret = ftrace_hash_ipmodify_update(ops, new_hash);
1424 if (ret < 0) {
1425 free_ftrace_hash(new_hash);
1426 return ret;
1427 }
1428 }
1429
1430
1431
1432
1433
1434 ftrace_hash_rec_disable_modify(ops, enable);
1435
1436 rcu_assign_pointer(*dst, new_hash);
1437
1438 ftrace_hash_rec_enable_modify(ops, enable);
1439
1440 return 0;
1441}
1442
1443static bool hash_contains_ip(unsigned long ip,
1444 struct ftrace_ops_hash *hash)
1445{
1446
1447
1448
1449
1450
1451
1452 return (ftrace_hash_empty(hash->filter_hash) ||
1453 __ftrace_lookup_ip(hash->filter_hash, ip)) &&
1454 (ftrace_hash_empty(hash->notrace_hash) ||
1455 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1456}
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470int
1471ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1472{
1473 struct ftrace_ops_hash hash;
1474 int ret;
1475
1476#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1477
1478
1479
1480
1481
1482 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1483 return 0;
1484#endif
1485
1486 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1487 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1488
1489 if (hash_contains_ip(ip, &hash))
1490 ret = 1;
1491 else
1492 ret = 0;
1493
1494 return ret;
1495}
1496
1497
1498
1499
1500
1501#define do_for_each_ftrace_rec(pg, rec) \
1502 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1503 int _____i; \
1504 for (_____i = 0; _____i < pg->index; _____i++) { \
1505 rec = &pg->records[_____i];
1506
1507#define while_for_each_ftrace_rec() \
1508 } \
1509 }
1510
1511
1512static int ftrace_cmp_recs(const void *a, const void *b)
1513{
1514 const struct dyn_ftrace *key = a;
1515 const struct dyn_ftrace *rec = b;
1516
1517 if (key->flags < rec->ip)
1518 return -1;
1519 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1520 return 1;
1521 return 0;
1522}
1523
1524static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1525{
1526 struct ftrace_page *pg;
1527 struct dyn_ftrace *rec = NULL;
1528 struct dyn_ftrace key;
1529
1530 key.ip = start;
1531 key.flags = end;
1532
1533 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1534 if (end < pg->records[0].ip ||
1535 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1536 continue;
1537 rec = bsearch(&key, pg->records, pg->index,
1538 sizeof(struct dyn_ftrace),
1539 ftrace_cmp_recs);
1540 if (rec)
1541 break;
1542 }
1543 return rec;
1544}
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1559{
1560 struct dyn_ftrace *rec;
1561
1562 rec = lookup_rec(start, end);
1563 if (rec)
1564 return rec->ip;
1565
1566 return 0;
1567}
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577unsigned long ftrace_location(unsigned long ip)
1578{
1579 struct dyn_ftrace *rec;
1580 unsigned long offset;
1581 unsigned long size;
1582
1583 rec = lookup_rec(ip, ip);
1584 if (!rec) {
1585 if (!kallsyms_lookup_size_offset(ip, &size, &offset))
1586 goto out;
1587
1588
1589 if (!offset)
1590 rec = lookup_rec(ip, ip + size - 1);
1591 }
1592
1593 if (rec)
1594 return rec->ip;
1595
1596out:
1597 return 0;
1598}
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610int ftrace_text_reserved(const void *start, const void *end)
1611{
1612 unsigned long ret;
1613
1614 ret = ftrace_location_range((unsigned long)start,
1615 (unsigned long)end);
1616
1617 return (int)!!ret;
1618}
1619
1620
1621static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1622{
1623 struct ftrace_ops *ops;
1624 bool keep_regs = false;
1625
1626 for (ops = ftrace_ops_list;
1627 ops != &ftrace_list_end; ops = ops->next) {
1628
1629 if (ftrace_ops_test(ops, rec->ip, rec)) {
1630 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1631 keep_regs = true;
1632 break;
1633 }
1634 }
1635 }
1636
1637 return keep_regs;
1638}
1639
1640static struct ftrace_ops *
1641ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1642static struct ftrace_ops *
1643ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
1644static struct ftrace_ops *
1645ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1646
1647static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1648 int filter_hash,
1649 bool inc)
1650{
1651 struct ftrace_hash *hash;
1652 struct ftrace_hash *other_hash;
1653 struct ftrace_page *pg;
1654 struct dyn_ftrace *rec;
1655 bool update = false;
1656 int count = 0;
1657 int all = false;
1658
1659
1660 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1661 return false;
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674 if (filter_hash) {
1675 hash = ops->func_hash->filter_hash;
1676 other_hash = ops->func_hash->notrace_hash;
1677 if (ftrace_hash_empty(hash))
1678 all = true;
1679 } else {
1680 inc = !inc;
1681 hash = ops->func_hash->notrace_hash;
1682 other_hash = ops->func_hash->filter_hash;
1683
1684
1685
1686
1687 if (ftrace_hash_empty(hash))
1688 return false;
1689 }
1690
1691 do_for_each_ftrace_rec(pg, rec) {
1692 int in_other_hash = 0;
1693 int in_hash = 0;
1694 int match = 0;
1695
1696 if (rec->flags & FTRACE_FL_DISABLED)
1697 continue;
1698
1699 if (all) {
1700
1701
1702
1703
1704 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1705 match = 1;
1706 } else {
1707 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1708 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720 if (filter_hash && in_hash && !in_other_hash)
1721 match = 1;
1722 else if (!filter_hash && in_hash &&
1723 (in_other_hash || ftrace_hash_empty(other_hash)))
1724 match = 1;
1725 }
1726 if (!match)
1727 continue;
1728
1729 if (inc) {
1730 rec->flags++;
1731 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1732 return false;
1733
1734 if (ops->flags & FTRACE_OPS_FL_DIRECT)
1735 rec->flags |= FTRACE_FL_DIRECT;
1736
1737
1738
1739
1740
1741
1742 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1743 rec->flags |= FTRACE_FL_TRAMP;
1744 else
1745
1746
1747
1748
1749
1750
1751 rec->flags &= ~FTRACE_FL_TRAMP;
1752
1753
1754
1755
1756
1757 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1758 rec->flags |= FTRACE_FL_REGS;
1759 } else {
1760 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1761 return false;
1762 rec->flags--;
1763
1764
1765
1766
1767
1768
1769
1770 if (ops->flags & FTRACE_OPS_FL_DIRECT)
1771 rec->flags &= ~FTRACE_FL_DIRECT;
1772
1773
1774
1775
1776
1777
1778
1779 if (ftrace_rec_count(rec) > 0 &&
1780 rec->flags & FTRACE_FL_REGS &&
1781 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1782 if (!test_rec_ops_needs_regs(rec))
1783 rec->flags &= ~FTRACE_FL_REGS;
1784 }
1785
1786
1787
1788
1789
1790
1791
1792
1793 if (ftrace_rec_count(rec) == 1 &&
1794 ftrace_find_tramp_ops_any_other(rec, ops))
1795 rec->flags |= FTRACE_FL_TRAMP;
1796 else
1797 rec->flags &= ~FTRACE_FL_TRAMP;
1798
1799
1800
1801
1802
1803 }
1804 count++;
1805
1806
1807 update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
1808
1809
1810 if (!all && count == hash->count)
1811 return update;
1812 } while_for_each_ftrace_rec();
1813
1814 return update;
1815}
1816
1817static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1818 int filter_hash)
1819{
1820 return __ftrace_hash_rec_update(ops, filter_hash, 0);
1821}
1822
1823static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1824 int filter_hash)
1825{
1826 return __ftrace_hash_rec_update(ops, filter_hash, 1);
1827}
1828
1829static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1830 int filter_hash, int inc)
1831{
1832 struct ftrace_ops *op;
1833
1834 __ftrace_hash_rec_update(ops, filter_hash, inc);
1835
1836 if (ops->func_hash != &global_ops.local_hash)
1837 return;
1838
1839
1840
1841
1842
1843 do_for_each_ftrace_op(op, ftrace_ops_list) {
1844
1845 if (op == ops)
1846 continue;
1847 if (op->func_hash == &global_ops.local_hash)
1848 __ftrace_hash_rec_update(op, filter_hash, inc);
1849 } while_for_each_ftrace_op(op);
1850}
1851
1852static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1853 int filter_hash)
1854{
1855 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1856}
1857
1858static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1859 int filter_hash)
1860{
1861 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1862}
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1874 struct ftrace_hash *old_hash,
1875 struct ftrace_hash *new_hash)
1876{
1877 struct ftrace_page *pg;
1878 struct dyn_ftrace *rec, *end = NULL;
1879 int in_old, in_new;
1880
1881
1882 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1883 return 0;
1884
1885 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1886 return 0;
1887
1888
1889
1890
1891
1892 if (!new_hash || !old_hash)
1893 return -EINVAL;
1894
1895
1896 do_for_each_ftrace_rec(pg, rec) {
1897
1898 if (rec->flags & FTRACE_FL_DISABLED)
1899 continue;
1900
1901
1902 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1903 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1904 if (in_old == in_new)
1905 continue;
1906
1907 if (in_new) {
1908
1909 if (rec->flags & FTRACE_FL_IPMODIFY)
1910 goto rollback;
1911 rec->flags |= FTRACE_FL_IPMODIFY;
1912 } else
1913 rec->flags &= ~FTRACE_FL_IPMODIFY;
1914 } while_for_each_ftrace_rec();
1915
1916 return 0;
1917
1918rollback:
1919 end = rec;
1920
1921
1922 do_for_each_ftrace_rec(pg, rec) {
1923
1924 if (rec->flags & FTRACE_FL_DISABLED)
1925 continue;
1926
1927 if (rec == end)
1928 goto err_out;
1929
1930 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1931 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1932 if (in_old == in_new)
1933 continue;
1934
1935 if (in_new)
1936 rec->flags &= ~FTRACE_FL_IPMODIFY;
1937 else
1938 rec->flags |= FTRACE_FL_IPMODIFY;
1939 } while_for_each_ftrace_rec();
1940
1941err_out:
1942 return -EBUSY;
1943}
1944
1945static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1946{
1947 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1948
1949 if (ftrace_hash_empty(hash))
1950 hash = NULL;
1951
1952 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1953}
1954
1955
1956static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1957{
1958 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1959
1960 if (ftrace_hash_empty(hash))
1961 hash = NULL;
1962
1963 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1964}
1965
1966static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1967 struct ftrace_hash *new_hash)
1968{
1969 struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1970
1971 if (ftrace_hash_empty(old_hash))
1972 old_hash = NULL;
1973
1974 if (ftrace_hash_empty(new_hash))
1975 new_hash = NULL;
1976
1977 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1978}
1979
1980static void print_ip_ins(const char *fmt, const unsigned char *p)
1981{
1982 char ins[MCOUNT_INSN_SIZE];
1983 int i;
1984
1985 if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
1986 printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
1987 return;
1988 }
1989
1990 printk(KERN_CONT "%s", fmt);
1991
1992 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1993 printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
1994}
1995
1996enum ftrace_bug_type ftrace_bug_type;
1997const void *ftrace_expected;
1998
1999static void print_bug_type(void)
2000{
2001 switch (ftrace_bug_type) {
2002 case FTRACE_BUG_UNKNOWN:
2003 break;
2004 case FTRACE_BUG_INIT:
2005 pr_info("Initializing ftrace call sites\n");
2006 break;
2007 case FTRACE_BUG_NOP:
2008 pr_info("Setting ftrace call site to NOP\n");
2009 break;
2010 case FTRACE_BUG_CALL:
2011 pr_info("Setting ftrace call site to call ftrace function\n");
2012 break;
2013 case FTRACE_BUG_UPDATE:
2014 pr_info("Updating ftrace call site to call a different ftrace function\n");
2015 break;
2016 }
2017}
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031void ftrace_bug(int failed, struct dyn_ftrace *rec)
2032{
2033 unsigned long ip = rec ? rec->ip : 0;
2034
2035 pr_info("------------[ ftrace bug ]------------\n");
2036
2037 switch (failed) {
2038 case -EFAULT:
2039 pr_info("ftrace faulted on modifying ");
2040 print_ip_sym(KERN_INFO, ip);
2041 break;
2042 case -EINVAL:
2043 pr_info("ftrace failed to modify ");
2044 print_ip_sym(KERN_INFO, ip);
2045 print_ip_ins(" actual: ", (unsigned char *)ip);
2046 pr_cont("\n");
2047 if (ftrace_expected) {
2048 print_ip_ins(" expected: ", ftrace_expected);
2049 pr_cont("\n");
2050 }
2051 break;
2052 case -EPERM:
2053 pr_info("ftrace faulted on writing ");
2054 print_ip_sym(KERN_INFO, ip);
2055 break;
2056 default:
2057 pr_info("ftrace faulted on unknown error ");
2058 print_ip_sym(KERN_INFO, ip);
2059 }
2060 print_bug_type();
2061 if (rec) {
2062 struct ftrace_ops *ops = NULL;
2063
2064 pr_info("ftrace record flags: %lx\n", rec->flags);
2065 pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2066 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2067 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2068 ops = ftrace_find_tramp_ops_any(rec);
2069 if (ops) {
2070 do {
2071 pr_cont("\ttramp: %pS (%pS)",
2072 (void *)ops->trampoline,
2073 (void *)ops->func);
2074 ops = ftrace_find_tramp_ops_next(rec, ops);
2075 } while (ops);
2076 } else
2077 pr_cont("\ttramp: ERROR!");
2078
2079 }
2080 ip = ftrace_get_addr_curr(rec);
2081 pr_cont("\n expected tramp: %lx\n", ip);
2082 }
2083
2084 FTRACE_WARN_ON_ONCE(1);
2085}
2086
2087static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
2088{
2089 unsigned long flag = 0UL;
2090
2091 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2092
2093 if (rec->flags & FTRACE_FL_DISABLED)
2094 return FTRACE_UPDATE_IGNORE;
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107 if (enable && ftrace_rec_count(rec))
2108 flag = FTRACE_FL_ENABLED;
2109
2110
2111
2112
2113
2114
2115
2116 if (flag) {
2117 if (!(rec->flags & FTRACE_FL_REGS) !=
2118 !(rec->flags & FTRACE_FL_REGS_EN))
2119 flag |= FTRACE_FL_REGS;
2120
2121 if (!(rec->flags & FTRACE_FL_TRAMP) !=
2122 !(rec->flags & FTRACE_FL_TRAMP_EN))
2123 flag |= FTRACE_FL_TRAMP;
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135 if (ftrace_rec_count(rec) == 1) {
2136 if (!(rec->flags & FTRACE_FL_DIRECT) !=
2137 !(rec->flags & FTRACE_FL_DIRECT_EN))
2138 flag |= FTRACE_FL_DIRECT;
2139 } else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2140 flag |= FTRACE_FL_DIRECT;
2141 }
2142 }
2143
2144
2145 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2146 return FTRACE_UPDATE_IGNORE;
2147
2148 if (flag) {
2149
2150 flag ^= rec->flags & FTRACE_FL_ENABLED;
2151
2152 if (update) {
2153 rec->flags |= FTRACE_FL_ENABLED;
2154 if (flag & FTRACE_FL_REGS) {
2155 if (rec->flags & FTRACE_FL_REGS)
2156 rec->flags |= FTRACE_FL_REGS_EN;
2157 else
2158 rec->flags &= ~FTRACE_FL_REGS_EN;
2159 }
2160 if (flag & FTRACE_FL_TRAMP) {
2161 if (rec->flags & FTRACE_FL_TRAMP)
2162 rec->flags |= FTRACE_FL_TRAMP_EN;
2163 else
2164 rec->flags &= ~FTRACE_FL_TRAMP_EN;
2165 }
2166
2167 if (flag & FTRACE_FL_DIRECT) {
2168
2169
2170
2171
2172
2173 if (ftrace_rec_count(rec) == 1) {
2174 if (rec->flags & FTRACE_FL_DIRECT)
2175 rec->flags |= FTRACE_FL_DIRECT_EN;
2176 else
2177 rec->flags &= ~FTRACE_FL_DIRECT_EN;
2178 } else {
2179
2180
2181
2182
2183 rec->flags &= ~FTRACE_FL_DIRECT_EN;
2184 }
2185 }
2186 }
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196 if (flag & FTRACE_FL_ENABLED) {
2197 ftrace_bug_type = FTRACE_BUG_CALL;
2198 return FTRACE_UPDATE_MAKE_CALL;
2199 }
2200
2201 ftrace_bug_type = FTRACE_BUG_UPDATE;
2202 return FTRACE_UPDATE_MODIFY_CALL;
2203 }
2204
2205 if (update) {
2206
2207 if (!ftrace_rec_count(rec))
2208 rec->flags = 0;
2209 else
2210
2211
2212
2213
2214 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2215 FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN);
2216 }
2217
2218 ftrace_bug_type = FTRACE_BUG_NOP;
2219 return FTRACE_UPDATE_MAKE_NOP;
2220}
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2231{
2232 return ftrace_check_record(rec, enable, true);
2233}
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2245{
2246 return ftrace_check_record(rec, enable, false);
2247}
2248
2249static struct ftrace_ops *
2250ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2251{
2252 struct ftrace_ops *op;
2253 unsigned long ip = rec->ip;
2254
2255 do_for_each_ftrace_op(op, ftrace_ops_list) {
2256
2257 if (!op->trampoline)
2258 continue;
2259
2260 if (hash_contains_ip(ip, op->func_hash))
2261 return op;
2262 } while_for_each_ftrace_op(op);
2263
2264 return NULL;
2265}
2266
2267static struct ftrace_ops *
2268ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
2269{
2270 struct ftrace_ops *op;
2271 unsigned long ip = rec->ip;
2272
2273 do_for_each_ftrace_op(op, ftrace_ops_list) {
2274
2275 if (op == op_exclude || !op->trampoline)
2276 continue;
2277
2278 if (hash_contains_ip(ip, op->func_hash))
2279 return op;
2280 } while_for_each_ftrace_op(op);
2281
2282 return NULL;
2283}
2284
2285static struct ftrace_ops *
2286ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2287 struct ftrace_ops *op)
2288{
2289 unsigned long ip = rec->ip;
2290
2291 while_for_each_ftrace_op(op) {
2292
2293 if (!op->trampoline)
2294 continue;
2295
2296 if (hash_contains_ip(ip, op->func_hash))
2297 return op;
2298 }
2299
2300 return NULL;
2301}
2302
2303static struct ftrace_ops *
2304ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2305{
2306 struct ftrace_ops *op;
2307 unsigned long ip = rec->ip;
2308
2309
2310
2311
2312
2313
2314
2315 if (removed_ops) {
2316 if (hash_contains_ip(ip, &removed_ops->old_hash))
2317 return removed_ops;
2318 }
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338 do_for_each_ftrace_op(op, ftrace_ops_list) {
2339
2340 if (!op->trampoline)
2341 continue;
2342
2343
2344
2345
2346
2347 if (op->flags & FTRACE_OPS_FL_ADDING)
2348 continue;
2349
2350
2351
2352
2353
2354
2355
2356 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2357 hash_contains_ip(ip, &op->old_hash))
2358 return op;
2359
2360
2361
2362
2363
2364 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2365 hash_contains_ip(ip, op->func_hash))
2366 return op;
2367
2368 } while_for_each_ftrace_op(op);
2369
2370 return NULL;
2371}
2372
2373static struct ftrace_ops *
2374ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2375{
2376 struct ftrace_ops *op;
2377 unsigned long ip = rec->ip;
2378
2379 do_for_each_ftrace_op(op, ftrace_ops_list) {
2380
2381 if (hash_contains_ip(ip, op->func_hash))
2382 return op;
2383 } while_for_each_ftrace_op(op);
2384
2385 return NULL;
2386}
2387
2388#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2389
2390static struct ftrace_hash *direct_functions = EMPTY_HASH;
2391static DEFINE_MUTEX(direct_mutex);
2392int ftrace_direct_func_count;
2393
2394
2395
2396
2397
2398unsigned long ftrace_find_rec_direct(unsigned long ip)
2399{
2400 struct ftrace_func_entry *entry;
2401
2402 entry = __ftrace_lookup_ip(direct_functions, ip);
2403 if (!entry)
2404 return 0;
2405
2406 return entry->direct;
2407}
2408
2409static struct ftrace_func_entry*
2410ftrace_add_rec_direct(unsigned long ip, unsigned long addr,
2411 struct ftrace_hash **free_hash)
2412{
2413 struct ftrace_func_entry *entry;
2414
2415 if (ftrace_hash_empty(direct_functions) ||
2416 direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
2417 struct ftrace_hash *new_hash;
2418 int size = ftrace_hash_empty(direct_functions) ? 0 :
2419 direct_functions->count + 1;
2420
2421 if (size < 32)
2422 size = 32;
2423
2424 new_hash = dup_hash(direct_functions, size);
2425 if (!new_hash)
2426 return NULL;
2427
2428 *free_hash = direct_functions;
2429 direct_functions = new_hash;
2430 }
2431
2432 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2433 if (!entry)
2434 return NULL;
2435
2436 entry->ip = ip;
2437 entry->direct = addr;
2438 __add_hash_entry(direct_functions, entry);
2439 return entry;
2440}
2441
2442static void call_direct_funcs(unsigned long ip, unsigned long pip,
2443 struct ftrace_ops *ops, struct ftrace_regs *fregs)
2444{
2445 struct pt_regs *regs = ftrace_get_regs(fregs);
2446 unsigned long addr;
2447
2448 addr = ftrace_find_rec_direct(ip);
2449 if (!addr)
2450 return;
2451
2452 arch_ftrace_set_direct_caller(regs, addr);
2453}
2454
2455struct ftrace_ops direct_ops = {
2456 .func = call_direct_funcs,
2457 .flags = FTRACE_OPS_FL_IPMODIFY
2458 | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
2459 | FTRACE_OPS_FL_PERMANENT,
2460
2461
2462
2463
2464
2465
2466
2467 .trampoline = FTRACE_REGS_ADDR,
2468};
2469#endif
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2482{
2483 struct ftrace_ops *ops;
2484 unsigned long addr;
2485
2486 if ((rec->flags & FTRACE_FL_DIRECT) &&
2487 (ftrace_rec_count(rec) == 1)) {
2488 addr = ftrace_find_rec_direct(rec->ip);
2489 if (addr)
2490 return addr;
2491 WARN_ON_ONCE(1);
2492 }
2493
2494
2495 if (rec->flags & FTRACE_FL_TRAMP) {
2496 ops = ftrace_find_tramp_ops_new(rec);
2497 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2498 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2499 (void *)rec->ip, (void *)rec->ip, rec->flags);
2500
2501 return (unsigned long)FTRACE_ADDR;
2502 }
2503 return ops->trampoline;
2504 }
2505
2506 if (rec->flags & FTRACE_FL_REGS)
2507 return (unsigned long)FTRACE_REGS_ADDR;
2508 else
2509 return (unsigned long)FTRACE_ADDR;
2510}
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2523{
2524 struct ftrace_ops *ops;
2525 unsigned long addr;
2526
2527
2528 if (rec->flags & FTRACE_FL_DIRECT_EN) {
2529 addr = ftrace_find_rec_direct(rec->ip);
2530 if (addr)
2531 return addr;
2532 WARN_ON_ONCE(1);
2533 }
2534
2535
2536 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2537 ops = ftrace_find_tramp_ops_curr(rec);
2538 if (FTRACE_WARN_ON(!ops)) {
2539 pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2540 (void *)rec->ip, (void *)rec->ip);
2541
2542 return (unsigned long)FTRACE_ADDR;
2543 }
2544 return ops->trampoline;
2545 }
2546
2547 if (rec->flags & FTRACE_FL_REGS_EN)
2548 return (unsigned long)FTRACE_REGS_ADDR;
2549 else
2550 return (unsigned long)FTRACE_ADDR;
2551}
2552
2553static int
2554__ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2555{
2556 unsigned long ftrace_old_addr;
2557 unsigned long ftrace_addr;
2558 int ret;
2559
2560 ftrace_addr = ftrace_get_addr_new(rec);
2561
2562
2563 ftrace_old_addr = ftrace_get_addr_curr(rec);
2564
2565 ret = ftrace_update_record(rec, enable);
2566
2567 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2568
2569 switch (ret) {
2570 case FTRACE_UPDATE_IGNORE:
2571 return 0;
2572
2573 case FTRACE_UPDATE_MAKE_CALL:
2574 ftrace_bug_type = FTRACE_BUG_CALL;
2575 return ftrace_make_call(rec, ftrace_addr);
2576
2577 case FTRACE_UPDATE_MAKE_NOP:
2578 ftrace_bug_type = FTRACE_BUG_NOP;
2579 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2580
2581 case FTRACE_UPDATE_MODIFY_CALL:
2582 ftrace_bug_type = FTRACE_BUG_UPDATE;
2583 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2584 }
2585
2586 return -1;
2587}
2588
2589void __weak ftrace_replace_code(int mod_flags)
2590{
2591 struct dyn_ftrace *rec;
2592 struct ftrace_page *pg;
2593 bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2594 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2595 int failed;
2596
2597 if (unlikely(ftrace_disabled))
2598 return;
2599
2600 do_for_each_ftrace_rec(pg, rec) {
2601
2602 if (rec->flags & FTRACE_FL_DISABLED)
2603 continue;
2604
2605 failed = __ftrace_replace_code(rec, enable);
2606 if (failed) {
2607 ftrace_bug(failed, rec);
2608
2609 return;
2610 }
2611 if (schedulable)
2612 cond_resched();
2613 } while_for_each_ftrace_rec();
2614}
2615
2616struct ftrace_rec_iter {
2617 struct ftrace_page *pg;
2618 int index;
2619};
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2631{
2632
2633
2634
2635
2636 static struct ftrace_rec_iter ftrace_rec_iter;
2637 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2638
2639 iter->pg = ftrace_pages_start;
2640 iter->index = 0;
2641
2642
2643 while (iter->pg && !iter->pg->index)
2644 iter->pg = iter->pg->next;
2645
2646 if (!iter->pg)
2647 return NULL;
2648
2649 return iter;
2650}
2651
2652
2653
2654
2655
2656
2657
2658struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2659{
2660 iter->index++;
2661
2662 if (iter->index >= iter->pg->index) {
2663 iter->pg = iter->pg->next;
2664 iter->index = 0;
2665
2666
2667 while (iter->pg && !iter->pg->index)
2668 iter->pg = iter->pg->next;
2669 }
2670
2671 if (!iter->pg)
2672 return NULL;
2673
2674 return iter;
2675}
2676
2677
2678
2679
2680
2681
2682
2683struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2684{
2685 return &iter->pg->records[iter->index];
2686}
2687
2688static int
2689ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
2690{
2691 int ret;
2692
2693 if (unlikely(ftrace_disabled))
2694 return 0;
2695
2696 ret = ftrace_init_nop(mod, rec);
2697 if (ret) {
2698 ftrace_bug_type = FTRACE_BUG_INIT;
2699 ftrace_bug(ret, rec);
2700 return 0;
2701 }
2702 return 1;
2703}
2704
2705
2706
2707
2708
2709void __weak ftrace_arch_code_modify_prepare(void)
2710{
2711}
2712
2713
2714
2715
2716
2717void __weak ftrace_arch_code_modify_post_process(void)
2718{
2719}
2720
2721void ftrace_modify_all_code(int command)
2722{
2723 int update = command & FTRACE_UPDATE_TRACE_FUNC;
2724 int mod_flags = 0;
2725 int err = 0;
2726
2727 if (command & FTRACE_MAY_SLEEP)
2728 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740 if (update) {
2741 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2742 if (FTRACE_WARN_ON(err))
2743 return;
2744 }
2745
2746 if (command & FTRACE_UPDATE_CALLS)
2747 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2748 else if (command & FTRACE_DISABLE_CALLS)
2749 ftrace_replace_code(mod_flags);
2750
2751 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2752 function_trace_op = set_function_trace_op;
2753 smp_wmb();
2754
2755 if (!irqs_disabled())
2756 smp_call_function(ftrace_sync_ipi, NULL, 1);
2757 err = ftrace_update_ftrace_func(ftrace_trace_function);
2758 if (FTRACE_WARN_ON(err))
2759 return;
2760 }
2761
2762 if (command & FTRACE_START_FUNC_RET)
2763 err = ftrace_enable_ftrace_graph_caller();
2764 else if (command & FTRACE_STOP_FUNC_RET)
2765 err = ftrace_disable_ftrace_graph_caller();
2766 FTRACE_WARN_ON(err);
2767}
2768
2769static int __ftrace_modify_code(void *data)
2770{
2771 int *command = data;
2772
2773 ftrace_modify_all_code(*command);
2774
2775 return 0;
2776}
2777
2778
2779
2780
2781
2782
2783
2784
2785void ftrace_run_stop_machine(int command)
2786{
2787 stop_machine(__ftrace_modify_code, &command, NULL);
2788}
2789
2790
2791
2792
2793
2794
2795
2796
2797void __weak arch_ftrace_update_code(int command)
2798{
2799 ftrace_run_stop_machine(command);
2800}
2801
2802static void ftrace_run_update_code(int command)
2803{
2804 ftrace_arch_code_modify_prepare();
2805
2806
2807
2808
2809
2810
2811
2812 arch_ftrace_update_code(command);
2813
2814 ftrace_arch_code_modify_post_process();
2815}
2816
2817static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2818 struct ftrace_ops_hash *old_hash)
2819{
2820 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2821 ops->old_hash.filter_hash = old_hash->filter_hash;
2822 ops->old_hash.notrace_hash = old_hash->notrace_hash;
2823 ftrace_run_update_code(command);
2824 ops->old_hash.filter_hash = NULL;
2825 ops->old_hash.notrace_hash = NULL;
2826 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2827}
2828
2829static ftrace_func_t saved_ftrace_func;
2830static int ftrace_start_up;
2831
2832void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2833{
2834}
2835
2836
2837static LIST_HEAD(ftrace_ops_trampoline_list);
2838
2839static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
2840{
2841 lockdep_assert_held(&ftrace_lock);
2842 list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
2843}
2844
2845static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
2846{
2847 lockdep_assert_held(&ftrace_lock);
2848 list_del_rcu(&ops->list);
2849 synchronize_rcu();
2850}
2851
2852
2853
2854
2855
2856
2857#define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
2858#define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
2859
2860static void ftrace_trampoline_free(struct ftrace_ops *ops)
2861{
2862 if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
2863 ops->trampoline) {
2864
2865
2866
2867
2868 perf_event_text_poke((void *)ops->trampoline,
2869 (void *)ops->trampoline,
2870 ops->trampoline_size, NULL, 0);
2871 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
2872 ops->trampoline, ops->trampoline_size,
2873 true, FTRACE_TRAMPOLINE_SYM);
2874
2875 ftrace_remove_trampoline_from_kallsyms(ops);
2876 }
2877
2878 arch_ftrace_trampoline_free(ops);
2879}
2880
2881static void ftrace_startup_enable(int command)
2882{
2883 if (saved_ftrace_func != ftrace_trace_function) {
2884 saved_ftrace_func = ftrace_trace_function;
2885 command |= FTRACE_UPDATE_TRACE_FUNC;
2886 }
2887
2888 if (!command || !ftrace_enabled)
2889 return;
2890
2891 ftrace_run_update_code(command);
2892}
2893
2894static void ftrace_startup_all(int command)
2895{
2896 update_all_ops = true;
2897 ftrace_startup_enable(command);
2898 update_all_ops = false;
2899}
2900
2901int ftrace_startup(struct ftrace_ops *ops, int command)
2902{
2903 int ret;
2904
2905 if (unlikely(ftrace_disabled))
2906 return -ENODEV;
2907
2908 ret = __register_ftrace_function(ops);
2909 if (ret)
2910 return ret;
2911
2912 ftrace_start_up++;
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2923
2924 ret = ftrace_hash_ipmodify_enable(ops);
2925 if (ret < 0) {
2926
2927 __unregister_ftrace_function(ops);
2928 ftrace_start_up--;
2929 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2930 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2931 ftrace_trampoline_free(ops);
2932 return ret;
2933 }
2934
2935 if (ftrace_hash_rec_enable(ops, 1))
2936 command |= FTRACE_UPDATE_CALLS;
2937
2938 ftrace_startup_enable(command);
2939
2940 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2941
2942 return 0;
2943}
2944
2945int ftrace_shutdown(struct ftrace_ops *ops, int command)
2946{
2947 int ret;
2948
2949 if (unlikely(ftrace_disabled))
2950 return -ENODEV;
2951
2952 ret = __unregister_ftrace_function(ops);
2953 if (ret)
2954 return ret;
2955
2956 ftrace_start_up--;
2957
2958
2959
2960
2961
2962 WARN_ON_ONCE(ftrace_start_up < 0);
2963
2964
2965 ftrace_hash_ipmodify_disable(ops);
2966
2967 if (ftrace_hash_rec_disable(ops, 1))
2968 command |= FTRACE_UPDATE_CALLS;
2969
2970 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2971
2972 if (saved_ftrace_func != ftrace_trace_function) {
2973 saved_ftrace_func = ftrace_trace_function;
2974 command |= FTRACE_UPDATE_TRACE_FUNC;
2975 }
2976
2977 if (!command || !ftrace_enabled) {
2978
2979
2980
2981
2982
2983
2984 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2985 goto free_ops;
2986
2987 return 0;
2988 }
2989
2990
2991
2992
2993
2994 ops->flags |= FTRACE_OPS_FL_REMOVING;
2995 removed_ops = ops;
2996
2997
2998 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2999 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
3000
3001 ftrace_run_update_code(command);
3002
3003
3004
3005
3006
3007 if (rcu_dereference_protected(ftrace_ops_list,
3008 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
3009 struct ftrace_page *pg;
3010 struct dyn_ftrace *rec;
3011
3012 do_for_each_ftrace_rec(pg, rec) {
3013 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
3014 pr_warn(" %pS flags:%lx\n",
3015 (void *)rec->ip, rec->flags);
3016 } while_for_each_ftrace_rec();
3017 }
3018
3019 ops->old_hash.filter_hash = NULL;
3020 ops->old_hash.notrace_hash = NULL;
3021
3022 removed_ops = NULL;
3023 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
3024
3025
3026
3027
3028
3029
3030
3031 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
3032
3033
3034
3035
3036
3037
3038
3039
3040 synchronize_rcu_tasks_rude();
3041
3042
3043
3044
3045
3046
3047
3048
3049 if (IS_ENABLED(CONFIG_PREEMPTION))
3050 synchronize_rcu_tasks();
3051
3052 free_ops:
3053 ftrace_trampoline_free(ops);
3054 }
3055
3056 return 0;
3057}
3058
3059static u64 ftrace_update_time;
3060unsigned long ftrace_update_tot_cnt;
3061unsigned long ftrace_number_of_pages;
3062unsigned long ftrace_number_of_groups;
3063
3064static inline int ops_traces_mod(struct ftrace_ops *ops)
3065{
3066
3067
3068
3069
3070 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
3071 ftrace_hash_empty(ops->func_hash->notrace_hash);
3072}
3073
3074
3075
3076
3077
3078
3079
3080
3081static inline bool
3082ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3083{
3084
3085 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
3086 return false;
3087
3088
3089 if (ops_traces_mod(ops))
3090 return true;
3091
3092
3093 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
3094 !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
3095 return false;
3096
3097
3098 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
3099 return false;
3100
3101 return true;
3102}
3103
3104static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3105{
3106 bool init_nop = ftrace_need_init_nop();
3107 struct ftrace_page *pg;
3108 struct dyn_ftrace *p;
3109 u64 start, stop;
3110 unsigned long update_cnt = 0;
3111 unsigned long rec_flags = 0;
3112 int i;
3113
3114 start = ftrace_now(raw_smp_processor_id());
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127 if (mod)
3128 rec_flags |= FTRACE_FL_DISABLED;
3129
3130 for (pg = new_pgs; pg; pg = pg->next) {
3131
3132 for (i = 0; i < pg->index; i++) {
3133
3134
3135 if (unlikely(ftrace_disabled))
3136 return -1;
3137
3138 p = &pg->records[i];
3139 p->flags = rec_flags;
3140
3141
3142
3143
3144
3145 if (init_nop && !ftrace_nop_initialize(mod, p))
3146 break;
3147
3148 update_cnt++;
3149 }
3150 }
3151
3152 stop = ftrace_now(raw_smp_processor_id());
3153 ftrace_update_time = stop - start;
3154 ftrace_update_tot_cnt += update_cnt;
3155
3156 return 0;
3157}
3158
3159static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3160{
3161 int order;
3162 int pages;
3163 int cnt;
3164
3165 if (WARN_ON(!count))
3166 return -EINVAL;
3167
3168
3169 pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
3170 order = fls(pages) - 1;
3171
3172 again:
3173 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3174
3175 if (!pg->records) {
3176
3177 if (!order)
3178 return -ENOMEM;
3179 order >>= 1;
3180 goto again;
3181 }
3182
3183 ftrace_number_of_pages += 1 << order;
3184 ftrace_number_of_groups++;
3185
3186 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3187 pg->order = order;
3188
3189 if (cnt > count)
3190 cnt = count;
3191
3192 return cnt;
3193}
3194
3195static struct ftrace_page *
3196ftrace_allocate_pages(unsigned long num_to_init)
3197{
3198 struct ftrace_page *start_pg;
3199 struct ftrace_page *pg;
3200 int cnt;
3201
3202 if (!num_to_init)
3203 return NULL;
3204
3205 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3206 if (!pg)
3207 return NULL;
3208
3209
3210
3211
3212
3213
3214 for (;;) {
3215 cnt = ftrace_allocate_records(pg, num_to_init);
3216 if (cnt < 0)
3217 goto free_pages;
3218
3219 num_to_init -= cnt;
3220 if (!num_to_init)
3221 break;
3222
3223 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3224 if (!pg->next)
3225 goto free_pages;
3226
3227 pg = pg->next;
3228 }
3229
3230 return start_pg;
3231
3232 free_pages:
3233 pg = start_pg;
3234 while (pg) {
3235 if (pg->records) {
3236 free_pages((unsigned long)pg->records, pg->order);
3237 ftrace_number_of_pages -= 1 << pg->order;
3238 }
3239 start_pg = pg->next;
3240 kfree(pg);
3241 pg = start_pg;
3242 ftrace_number_of_groups--;
3243 }
3244 pr_info("ftrace: FAILED to allocate memory for functions\n");
3245 return NULL;
3246}
3247
3248#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4)
3249
3250struct ftrace_iterator {
3251 loff_t pos;
3252 loff_t func_pos;
3253 loff_t mod_pos;
3254 struct ftrace_page *pg;
3255 struct dyn_ftrace *func;
3256 struct ftrace_func_probe *probe;
3257 struct ftrace_func_entry *probe_entry;
3258 struct trace_parser parser;
3259 struct ftrace_hash *hash;
3260 struct ftrace_ops *ops;
3261 struct trace_array *tr;
3262 struct list_head *mod_list;
3263 int pidx;
3264 int idx;
3265 unsigned flags;
3266};
3267
3268static void *
3269t_probe_next(struct seq_file *m, loff_t *pos)
3270{
3271 struct ftrace_iterator *iter = m->private;
3272 struct trace_array *tr = iter->ops->private;
3273 struct list_head *func_probes;
3274 struct ftrace_hash *hash;
3275 struct list_head *next;
3276 struct hlist_node *hnd = NULL;
3277 struct hlist_head *hhd;
3278 int size;
3279
3280 (*pos)++;
3281 iter->pos = *pos;
3282
3283 if (!tr)
3284 return NULL;
3285
3286 func_probes = &tr->func_probes;
3287 if (list_empty(func_probes))
3288 return NULL;
3289
3290 if (!iter->probe) {
3291 next = func_probes->next;
3292 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3293 }
3294
3295 if (iter->probe_entry)
3296 hnd = &iter->probe_entry->hlist;
3297
3298 hash = iter->probe->ops.func_hash->filter_hash;
3299
3300
3301
3302
3303
3304 if (!hash || hash == EMPTY_HASH)
3305 return NULL;
3306
3307 size = 1 << hash->size_bits;
3308
3309 retry:
3310 if (iter->pidx >= size) {
3311 if (iter->probe->list.next == func_probes)
3312 return NULL;
3313 next = iter->probe->list.next;
3314 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3315 hash = iter->probe->ops.func_hash->filter_hash;
3316 size = 1 << hash->size_bits;
3317 iter->pidx = 0;
3318 }
3319
3320 hhd = &hash->buckets[iter->pidx];
3321
3322 if (hlist_empty(hhd)) {
3323 iter->pidx++;
3324 hnd = NULL;
3325 goto retry;
3326 }
3327
3328 if (!hnd)
3329 hnd = hhd->first;
3330 else {
3331 hnd = hnd->next;
3332 if (!hnd) {
3333 iter->pidx++;
3334 goto retry;
3335 }
3336 }
3337
3338 if (WARN_ON_ONCE(!hnd))
3339 return NULL;
3340
3341 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3342
3343 return iter;
3344}
3345
3346static void *t_probe_start(struct seq_file *m, loff_t *pos)
3347{
3348 struct ftrace_iterator *iter = m->private;
3349 void *p = NULL;
3350 loff_t l;
3351
3352 if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3353 return NULL;
3354
3355 if (iter->mod_pos > *pos)
3356 return NULL;
3357
3358 iter->probe = NULL;
3359 iter->probe_entry = NULL;
3360 iter->pidx = 0;
3361 for (l = 0; l <= (*pos - iter->mod_pos); ) {
3362 p = t_probe_next(m, &l);
3363 if (!p)
3364 break;
3365 }
3366 if (!p)
3367 return NULL;
3368
3369
3370 iter->flags |= FTRACE_ITER_PROBE;
3371
3372 return iter;
3373}
3374
3375static int
3376t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3377{
3378 struct ftrace_func_entry *probe_entry;
3379 struct ftrace_probe_ops *probe_ops;
3380 struct ftrace_func_probe *probe;
3381
3382 probe = iter->probe;
3383 probe_entry = iter->probe_entry;
3384
3385 if (WARN_ON_ONCE(!probe || !probe_entry))
3386 return -EIO;
3387
3388 probe_ops = probe->probe_ops;
3389
3390 if (probe_ops->print)
3391 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3392
3393 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3394 (void *)probe_ops->func);
3395
3396 return 0;
3397}
3398
3399static void *
3400t_mod_next(struct seq_file *m, loff_t *pos)
3401{
3402 struct ftrace_iterator *iter = m->private;
3403 struct trace_array *tr = iter->tr;
3404
3405 (*pos)++;
3406 iter->pos = *pos;
3407
3408 iter->mod_list = iter->mod_list->next;
3409
3410 if (iter->mod_list == &tr->mod_trace ||
3411 iter->mod_list == &tr->mod_notrace) {
3412 iter->flags &= ~FTRACE_ITER_MOD;
3413 return NULL;
3414 }
3415
3416 iter->mod_pos = *pos;
3417
3418 return iter;
3419}
3420
3421static void *t_mod_start(struct seq_file *m, loff_t *pos)
3422{
3423 struct ftrace_iterator *iter = m->private;
3424 void *p = NULL;
3425 loff_t l;
3426
3427 if (iter->func_pos > *pos)
3428 return NULL;
3429
3430 iter->mod_pos = iter->func_pos;
3431
3432
3433 if (!iter->tr)
3434 return NULL;
3435
3436 for (l = 0; l <= (*pos - iter->func_pos); ) {
3437 p = t_mod_next(m, &l);
3438 if (!p)
3439 break;
3440 }
3441 if (!p) {
3442 iter->flags &= ~FTRACE_ITER_MOD;
3443 return t_probe_start(m, pos);
3444 }
3445
3446
3447 iter->flags |= FTRACE_ITER_MOD;
3448
3449 return iter;
3450}
3451
3452static int
3453t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3454{
3455 struct ftrace_mod_load *ftrace_mod;
3456 struct trace_array *tr = iter->tr;
3457
3458 if (WARN_ON_ONCE(!iter->mod_list) ||
3459 iter->mod_list == &tr->mod_trace ||
3460 iter->mod_list == &tr->mod_notrace)
3461 return -EIO;
3462
3463 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3464
3465 if (ftrace_mod->func)
3466 seq_printf(m, "%s", ftrace_mod->func);
3467 else
3468 seq_putc(m, '*');
3469
3470 seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3471
3472 return 0;
3473}
3474
3475static void *
3476t_func_next(struct seq_file *m, loff_t *pos)
3477{
3478 struct ftrace_iterator *iter = m->private;
3479 struct dyn_ftrace *rec = NULL;
3480
3481 (*pos)++;
3482
3483 retry:
3484 if (iter->idx >= iter->pg->index) {
3485 if (iter->pg->next) {
3486 iter->pg = iter->pg->next;
3487 iter->idx = 0;
3488 goto retry;
3489 }
3490 } else {
3491 rec = &iter->pg->records[iter->idx++];
3492 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3493 !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3494
3495 ((iter->flags & FTRACE_ITER_ENABLED) &&
3496 !(rec->flags & FTRACE_FL_ENABLED))) {
3497
3498 rec = NULL;
3499 goto retry;
3500 }
3501 }
3502
3503 if (!rec)
3504 return NULL;
3505
3506 iter->pos = iter->func_pos = *pos;
3507 iter->func = rec;
3508
3509 return iter;
3510}
3511
3512static void *
3513t_next(struct seq_file *m, void *v, loff_t *pos)
3514{
3515 struct ftrace_iterator *iter = m->private;
3516 loff_t l = *pos;
3517 void *ret;
3518
3519 if (unlikely(ftrace_disabled))
3520 return NULL;
3521
3522 if (iter->flags & FTRACE_ITER_PROBE)
3523 return t_probe_next(m, pos);
3524
3525 if (iter->flags & FTRACE_ITER_MOD)
3526 return t_mod_next(m, pos);
3527
3528 if (iter->flags & FTRACE_ITER_PRINTALL) {
3529
3530 (*pos)++;
3531 return t_mod_start(m, &l);
3532 }
3533
3534 ret = t_func_next(m, pos);
3535
3536 if (!ret)
3537 return t_mod_start(m, &l);
3538
3539 return ret;
3540}
3541
3542static void reset_iter_read(struct ftrace_iterator *iter)
3543{
3544 iter->pos = 0;
3545 iter->func_pos = 0;
3546 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
3547}
3548
3549static void *t_start(struct seq_file *m, loff_t *pos)
3550{
3551 struct ftrace_iterator *iter = m->private;
3552 void *p = NULL;
3553 loff_t l;
3554
3555 mutex_lock(&ftrace_lock);
3556
3557 if (unlikely(ftrace_disabled))
3558 return NULL;
3559
3560
3561
3562
3563 if (*pos < iter->pos)
3564 reset_iter_read(iter);
3565
3566
3567
3568
3569
3570
3571 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3572 ftrace_hash_empty(iter->hash)) {
3573 iter->func_pos = 1;
3574 if (*pos > 0)
3575 return t_mod_start(m, pos);
3576 iter->flags |= FTRACE_ITER_PRINTALL;
3577
3578 iter->flags &= ~FTRACE_ITER_PROBE;
3579 return iter;
3580 }
3581
3582 if (iter->flags & FTRACE_ITER_MOD)
3583 return t_mod_start(m, pos);
3584
3585
3586
3587
3588
3589
3590 iter->pg = ftrace_pages_start;
3591 iter->idx = 0;
3592 for (l = 0; l <= *pos; ) {
3593 p = t_func_next(m, &l);
3594 if (!p)
3595 break;
3596 }
3597
3598 if (!p)
3599 return t_mod_start(m, pos);
3600
3601 return iter;
3602}
3603
3604static void t_stop(struct seq_file *m, void *p)
3605{
3606 mutex_unlock(&ftrace_lock);
3607}
3608
3609void * __weak
3610arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3611{
3612 return NULL;
3613}
3614
3615static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3616 struct dyn_ftrace *rec)
3617{
3618 void *ptr;
3619
3620 ptr = arch_ftrace_trampoline_func(ops, rec);
3621 if (ptr)
3622 seq_printf(m, " ->%pS", ptr);
3623}
3624
3625#ifdef FTRACE_MCOUNT_MAX_OFFSET
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636static int test_for_valid_rec(struct dyn_ftrace *rec)
3637{
3638 char str[KSYM_SYMBOL_LEN];
3639 unsigned long offset;
3640 const char *ret;
3641
3642 ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
3643
3644
3645 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
3646 rec->flags |= FTRACE_FL_DISABLED;
3647 return 0;
3648 }
3649 return 1;
3650}
3651
3652static struct workqueue_struct *ftrace_check_wq __initdata;
3653static struct work_struct ftrace_check_work __initdata;
3654
3655
3656
3657
3658static __init void ftrace_check_work_func(struct work_struct *work)
3659{
3660 struct ftrace_page *pg;
3661 struct dyn_ftrace *rec;
3662
3663 mutex_lock(&ftrace_lock);
3664 do_for_each_ftrace_rec(pg, rec) {
3665 test_for_valid_rec(rec);
3666 } while_for_each_ftrace_rec();
3667 mutex_unlock(&ftrace_lock);
3668}
3669
3670static int __init ftrace_check_for_weak_functions(void)
3671{
3672 INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
3673
3674 ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
3675
3676 queue_work(ftrace_check_wq, &ftrace_check_work);
3677 return 0;
3678}
3679
3680static int __init ftrace_check_sync(void)
3681{
3682
3683 if (ftrace_check_wq)
3684 destroy_workqueue(ftrace_check_wq);
3685 return 0;
3686}
3687
3688late_initcall_sync(ftrace_check_sync);
3689subsys_initcall(ftrace_check_for_weak_functions);
3690
3691static int print_rec(struct seq_file *m, unsigned long ip)
3692{
3693 unsigned long offset;
3694 char str[KSYM_SYMBOL_LEN];
3695 char *modname;
3696 const char *ret;
3697
3698 ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
3699
3700 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
3701 snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
3702 FTRACE_INVALID_FUNCTION, offset);
3703 ret = NULL;
3704 }
3705
3706 seq_puts(m, str);
3707 if (modname)
3708 seq_printf(m, " [%s]", modname);
3709 return ret == NULL ? -1 : 0;
3710}
3711#else
3712static inline int test_for_valid_rec(struct dyn_ftrace *rec)
3713{
3714 return 1;
3715}
3716
3717static inline int print_rec(struct seq_file *m, unsigned long ip)
3718{
3719 seq_printf(m, "%ps", (void *)ip);
3720 return 0;
3721}
3722#endif
3723
3724static int t_show(struct seq_file *m, void *v)
3725{
3726 struct ftrace_iterator *iter = m->private;
3727 struct dyn_ftrace *rec;
3728
3729 if (iter->flags & FTRACE_ITER_PROBE)
3730 return t_probe_show(m, iter);
3731
3732 if (iter->flags & FTRACE_ITER_MOD)
3733 return t_mod_show(m, iter);
3734
3735 if (iter->flags & FTRACE_ITER_PRINTALL) {
3736 if (iter->flags & FTRACE_ITER_NOTRACE)
3737 seq_puts(m, "#### no functions disabled ####\n");
3738 else
3739 seq_puts(m, "#### all functions enabled ####\n");
3740 return 0;
3741 }
3742
3743 rec = iter->func;
3744
3745 if (!rec)
3746 return 0;
3747
3748 if (print_rec(m, rec->ip)) {
3749
3750 WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
3751 seq_putc(m, '\n');
3752 return 0;
3753 }
3754
3755 if (iter->flags & FTRACE_ITER_ENABLED) {
3756 struct ftrace_ops *ops;
3757
3758 seq_printf(m, " (%ld)%s%s%s",
3759 ftrace_rec_count(rec),
3760 rec->flags & FTRACE_FL_REGS ? " R" : " ",
3761 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ",
3762 rec->flags & FTRACE_FL_DIRECT ? " D" : " ");
3763 if (rec->flags & FTRACE_FL_TRAMP_EN) {
3764 ops = ftrace_find_tramp_ops_any(rec);
3765 if (ops) {
3766 do {
3767 seq_printf(m, "\ttramp: %pS (%pS)",
3768 (void *)ops->trampoline,
3769 (void *)ops->func);
3770 add_trampoline_func(m, ops, rec);
3771 ops = ftrace_find_tramp_ops_next(rec, ops);
3772 } while (ops);
3773 } else
3774 seq_puts(m, "\ttramp: ERROR!");
3775 } else {
3776 add_trampoline_func(m, NULL, rec);
3777 }
3778 if (rec->flags & FTRACE_FL_DIRECT) {
3779 unsigned long direct;
3780
3781 direct = ftrace_find_rec_direct(rec->ip);
3782 if (direct)
3783 seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
3784 }
3785 }
3786
3787 seq_putc(m, '\n');
3788
3789 return 0;
3790}
3791
3792static const struct seq_operations show_ftrace_seq_ops = {
3793 .start = t_start,
3794 .next = t_next,
3795 .stop = t_stop,
3796 .show = t_show,
3797};
3798
3799static int
3800ftrace_avail_open(struct inode *inode, struct file *file)
3801{
3802 struct ftrace_iterator *iter;
3803 int ret;
3804
3805 ret = security_locked_down(LOCKDOWN_TRACEFS);
3806 if (ret)
3807 return ret;
3808
3809 if (unlikely(ftrace_disabled))
3810 return -ENODEV;
3811
3812 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3813 if (!iter)
3814 return -ENOMEM;
3815
3816 iter->pg = ftrace_pages_start;
3817 iter->ops = &global_ops;
3818
3819 return 0;
3820}
3821
3822static int
3823ftrace_enabled_open(struct inode *inode, struct file *file)
3824{
3825 struct ftrace_iterator *iter;
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3837 if (!iter)
3838 return -ENOMEM;
3839
3840 iter->pg = ftrace_pages_start;
3841 iter->flags = FTRACE_ITER_ENABLED;
3842 iter->ops = &global_ops;
3843
3844 return 0;
3845}
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863int
3864ftrace_regex_open(struct ftrace_ops *ops, int flag,
3865 struct inode *inode, struct file *file)
3866{
3867 struct ftrace_iterator *iter;
3868 struct ftrace_hash *hash;
3869 struct list_head *mod_head;
3870 struct trace_array *tr = ops->private;
3871 int ret = -ENOMEM;
3872
3873 ftrace_ops_init(ops);
3874
3875 if (unlikely(ftrace_disabled))
3876 return -ENODEV;
3877
3878 if (tracing_check_open_get_tr(tr))
3879 return -ENODEV;
3880
3881 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3882 if (!iter)
3883 goto out;
3884
3885 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
3886 goto out;
3887
3888 iter->ops = ops;
3889 iter->flags = flag;
3890 iter->tr = tr;
3891
3892 mutex_lock(&ops->func_hash->regex_lock);
3893
3894 if (flag & FTRACE_ITER_NOTRACE) {
3895 hash = ops->func_hash->notrace_hash;
3896 mod_head = tr ? &tr->mod_notrace : NULL;
3897 } else {
3898 hash = ops->func_hash->filter_hash;
3899 mod_head = tr ? &tr->mod_trace : NULL;
3900 }
3901
3902 iter->mod_list = mod_head;
3903
3904 if (file->f_mode & FMODE_WRITE) {
3905 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3906
3907 if (file->f_flags & O_TRUNC) {
3908 iter->hash = alloc_ftrace_hash(size_bits);
3909 clear_ftrace_mod_list(mod_head);
3910 } else {
3911 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3912 }
3913
3914 if (!iter->hash) {
3915 trace_parser_put(&iter->parser);
3916 goto out_unlock;
3917 }
3918 } else
3919 iter->hash = hash;
3920
3921 ret = 0;
3922
3923 if (file->f_mode & FMODE_READ) {
3924 iter->pg = ftrace_pages_start;
3925
3926 ret = seq_open(file, &show_ftrace_seq_ops);
3927 if (!ret) {
3928 struct seq_file *m = file->private_data;
3929 m->private = iter;
3930 } else {
3931
3932 free_ftrace_hash(iter->hash);
3933 trace_parser_put(&iter->parser);
3934 }
3935 } else
3936 file->private_data = iter;
3937
3938 out_unlock:
3939 mutex_unlock(&ops->func_hash->regex_lock);
3940
3941 out:
3942 if (ret) {
3943 kfree(iter);
3944 if (tr)
3945 trace_array_put(tr);
3946 }
3947
3948 return ret;
3949}
3950
3951static int
3952ftrace_filter_open(struct inode *inode, struct file *file)
3953{
3954 struct ftrace_ops *ops = inode->i_private;
3955
3956
3957 return ftrace_regex_open(ops,
3958 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3959 inode, file);
3960}
3961
3962static int
3963ftrace_notrace_open(struct inode *inode, struct file *file)
3964{
3965 struct ftrace_ops *ops = inode->i_private;
3966
3967
3968 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3969 inode, file);
3970}
3971
3972
3973struct ftrace_glob {
3974 char *search;
3975 unsigned len;
3976 int type;
3977};
3978
3979
3980
3981
3982
3983
3984char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3985{
3986 return str;
3987}
3988
3989static int ftrace_match(char *str, struct ftrace_glob *g)
3990{
3991 int matched = 0;
3992 int slen;
3993
3994 str = arch_ftrace_match_adjust(str, g->search);
3995
3996 switch (g->type) {
3997 case MATCH_FULL:
3998 if (strcmp(str, g->search) == 0)
3999 matched = 1;
4000 break;
4001 case MATCH_FRONT_ONLY:
4002 if (strncmp(str, g->search, g->len) == 0)
4003 matched = 1;
4004 break;
4005 case MATCH_MIDDLE_ONLY:
4006 if (strstr(str, g->search))
4007 matched = 1;
4008 break;
4009 case MATCH_END_ONLY:
4010 slen = strlen(str);
4011 if (slen >= g->len &&
4012 memcmp(str + slen - g->len, g->search, g->len) == 0)
4013 matched = 1;
4014 break;
4015 case MATCH_GLOB:
4016 if (glob_match(g->search, str))
4017 matched = 1;
4018 break;
4019 }
4020
4021 return matched;
4022}
4023
4024static int
4025enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
4026{
4027 struct ftrace_func_entry *entry;
4028 int ret = 0;
4029
4030 entry = ftrace_lookup_ip(hash, rec->ip);
4031 if (clear_filter) {
4032
4033 if (!entry)
4034 return 0;
4035
4036 free_hash_entry(hash, entry);
4037 } else {
4038
4039 if (entry)
4040 return 0;
4041
4042 ret = add_hash_entry(hash, rec->ip);
4043 }
4044 return ret;
4045}
4046
4047static int
4048add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
4049 int clear_filter)
4050{
4051 long index = simple_strtoul(func_g->search, NULL, 0);
4052 struct ftrace_page *pg;
4053 struct dyn_ftrace *rec;
4054
4055
4056 if (--index < 0)
4057 return 0;
4058
4059 do_for_each_ftrace_rec(pg, rec) {
4060 if (pg->index <= index) {
4061 index -= pg->index;
4062
4063 break;
4064 }
4065 rec = &pg->records[index];
4066 enter_record(hash, rec, clear_filter);
4067 return 1;
4068 } while_for_each_ftrace_rec();
4069 return 0;
4070}
4071
4072#ifdef FTRACE_MCOUNT_MAX_OFFSET
4073static int lookup_ip(unsigned long ip, char **modname, char *str)
4074{
4075 unsigned long offset;
4076
4077 kallsyms_lookup(ip, NULL, &offset, modname, str);
4078 if (offset > FTRACE_MCOUNT_MAX_OFFSET)
4079 return -1;
4080 return 0;
4081}
4082#else
4083static int lookup_ip(unsigned long ip, char **modname, char *str)
4084{
4085 kallsyms_lookup(ip, NULL, NULL, modname, str);
4086 return 0;
4087}
4088#endif
4089
4090static int
4091ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
4092 struct ftrace_glob *mod_g, int exclude_mod)
4093{
4094 char str[KSYM_SYMBOL_LEN];
4095 char *modname;
4096
4097 if (lookup_ip(rec->ip, &modname, str)) {
4098
4099 WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
4100 !(rec->flags & FTRACE_FL_DISABLED));
4101 return 0;
4102 }
4103
4104 if (mod_g) {
4105 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
4106
4107
4108 if (!mod_g->len) {
4109
4110 if (!exclude_mod != !modname)
4111 goto func_match;
4112 return 0;
4113 }
4114
4115
4116
4117
4118
4119
4120
4121
4122 if (!mod_matches == !exclude_mod)
4123 return 0;
4124func_match:
4125
4126 if (!func_g->len)
4127 return 1;
4128 }
4129
4130 return ftrace_match(str, func_g);
4131}
4132
4133static int
4134match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
4135{
4136 struct ftrace_page *pg;
4137 struct dyn_ftrace *rec;
4138 struct ftrace_glob func_g = { .type = MATCH_FULL };
4139 struct ftrace_glob mod_g = { .type = MATCH_FULL };
4140 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
4141 int exclude_mod = 0;
4142 int found = 0;
4143 int ret;
4144 int clear_filter = 0;
4145
4146 if (func) {
4147 func_g.type = filter_parse_regex(func, len, &func_g.search,
4148 &clear_filter);
4149 func_g.len = strlen(func_g.search);
4150 }
4151
4152 if (mod) {
4153 mod_g.type = filter_parse_regex(mod, strlen(mod),
4154 &mod_g.search, &exclude_mod);
4155 mod_g.len = strlen(mod_g.search);
4156 }
4157
4158 mutex_lock(&ftrace_lock);
4159
4160 if (unlikely(ftrace_disabled))
4161 goto out_unlock;
4162
4163 if (func_g.type == MATCH_INDEX) {
4164 found = add_rec_by_index(hash, &func_g, clear_filter);
4165 goto out_unlock;
4166 }
4167
4168 do_for_each_ftrace_rec(pg, rec) {
4169
4170 if (rec->flags & FTRACE_FL_DISABLED)
4171 continue;
4172
4173 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
4174 ret = enter_record(hash, rec, clear_filter);
4175 if (ret < 0) {
4176 found = ret;
4177 goto out_unlock;
4178 }
4179 found = 1;
4180 }
4181 } while_for_each_ftrace_rec();
4182 out_unlock:
4183 mutex_unlock(&ftrace_lock);
4184
4185 return found;
4186}
4187
4188static int
4189ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
4190{
4191 return match_records(hash, buff, len, NULL);
4192}
4193
4194static void ftrace_ops_update_code(struct ftrace_ops *ops,
4195 struct ftrace_ops_hash *old_hash)
4196{
4197 struct ftrace_ops *op;
4198
4199 if (!ftrace_enabled)
4200 return;
4201
4202 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4203 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4204 return;
4205 }
4206
4207
4208
4209
4210
4211
4212 if (ops->func_hash != &global_ops.local_hash)
4213 return;
4214
4215 do_for_each_ftrace_op(op, ftrace_ops_list) {
4216 if (op->func_hash == &global_ops.local_hash &&
4217 op->flags & FTRACE_OPS_FL_ENABLED) {
4218 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4219
4220 return;
4221 }
4222 } while_for_each_ftrace_op(op);
4223}
4224
4225static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4226 struct ftrace_hash **orig_hash,
4227 struct ftrace_hash *hash,
4228 int enable)
4229{
4230 struct ftrace_ops_hash old_hash_ops;
4231 struct ftrace_hash *old_hash;
4232 int ret;
4233
4234 old_hash = *orig_hash;
4235 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4236 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4237 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4238 if (!ret) {
4239 ftrace_ops_update_code(ops, &old_hash_ops);
4240 free_ftrace_hash_rcu(old_hash);
4241 }
4242 return ret;
4243}
4244
4245static bool module_exists(const char *module)
4246{
4247
4248 static const char this_mod[] = "__this_module";
4249 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
4250 unsigned long val;
4251 int n;
4252
4253 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
4254
4255 if (n > sizeof(modname) - 1)
4256 return false;
4257
4258 val = module_kallsyms_lookup_name(modname);
4259 return val != 0;
4260}
4261
4262static int cache_mod(struct trace_array *tr,
4263 const char *func, char *module, int enable)
4264{
4265 struct ftrace_mod_load *ftrace_mod, *n;
4266 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
4267 int ret;
4268
4269 mutex_lock(&ftrace_lock);
4270
4271
4272 if (func[0] == '!') {
4273 func++;
4274 ret = -EINVAL;
4275
4276
4277 list_for_each_entry_safe(ftrace_mod, n, head, list) {
4278 if (strcmp(ftrace_mod->module, module) != 0)
4279 continue;
4280
4281
4282 if (strcmp(func, "*") == 0 ||
4283 (ftrace_mod->func &&
4284 strcmp(ftrace_mod->func, func) == 0)) {
4285 ret = 0;
4286 free_ftrace_mod(ftrace_mod);
4287 continue;
4288 }
4289 }
4290 goto out;
4291 }
4292
4293 ret = -EINVAL;
4294
4295 if (module_exists(module))
4296 goto out;
4297
4298
4299 ret = ftrace_add_mod(tr, func, module, enable);
4300 out:
4301 mutex_unlock(&ftrace_lock);
4302
4303 return ret;
4304}
4305
4306static int
4307ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4308 int reset, int enable);
4309
4310#ifdef CONFIG_MODULES
4311static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4312 char *mod, bool enable)
4313{
4314 struct ftrace_mod_load *ftrace_mod, *n;
4315 struct ftrace_hash **orig_hash, *new_hash;
4316 LIST_HEAD(process_mods);
4317 char *func;
4318
4319 mutex_lock(&ops->func_hash->regex_lock);
4320
4321 if (enable)
4322 orig_hash = &ops->func_hash->filter_hash;
4323 else
4324 orig_hash = &ops->func_hash->notrace_hash;
4325
4326 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4327 *orig_hash);
4328 if (!new_hash)
4329 goto out;
4330
4331 mutex_lock(&ftrace_lock);
4332
4333 list_for_each_entry_safe(ftrace_mod, n, head, list) {
4334
4335 if (strcmp(ftrace_mod->module, mod) != 0)
4336 continue;
4337
4338 if (ftrace_mod->func)
4339 func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4340 else
4341 func = kstrdup("*", GFP_KERNEL);
4342
4343 if (!func)
4344 continue;
4345
4346 list_move(&ftrace_mod->list, &process_mods);
4347
4348
4349 kfree(ftrace_mod->func);
4350 ftrace_mod->func = func;
4351 }
4352
4353 mutex_unlock(&ftrace_lock);
4354
4355 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4356
4357 func = ftrace_mod->func;
4358
4359
4360 match_records(new_hash, func, strlen(func), mod);
4361 free_ftrace_mod(ftrace_mod);
4362 }
4363
4364 if (enable && list_empty(head))
4365 new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4366
4367 mutex_lock(&ftrace_lock);
4368
4369 ftrace_hash_move_and_update_ops(ops, orig_hash,
4370 new_hash, enable);
4371 mutex_unlock(&ftrace_lock);
4372
4373 out:
4374 mutex_unlock(&ops->func_hash->regex_lock);
4375
4376 free_ftrace_hash(new_hash);
4377}
4378
4379static void process_cached_mods(const char *mod_name)
4380{
4381 struct trace_array *tr;
4382 char *mod;
4383
4384 mod = kstrdup(mod_name, GFP_KERNEL);
4385 if (!mod)
4386 return;
4387
4388 mutex_lock(&trace_types_lock);
4389 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4390 if (!list_empty(&tr->mod_trace))
4391 process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4392 if (!list_empty(&tr->mod_notrace))
4393 process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4394 }
4395 mutex_unlock(&trace_types_lock);
4396
4397 kfree(mod);
4398}
4399#endif
4400
4401
4402
4403
4404
4405
4406static int
4407ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4408 char *func_orig, char *cmd, char *module, int enable)
4409{
4410 char *func;
4411 int ret;
4412
4413
4414 func = kstrdup(func_orig, GFP_KERNEL);
4415 if (!func)
4416 return -ENOMEM;
4417
4418
4419
4420
4421
4422
4423
4424
4425 ret = match_records(hash, func, strlen(func), module);
4426 kfree(func);
4427
4428 if (!ret)
4429 return cache_mod(tr, func_orig, module, enable);
4430 if (ret < 0)
4431 return ret;
4432 return 0;
4433}
4434
4435static struct ftrace_func_command ftrace_mod_cmd = {
4436 .name = "mod",
4437 .func = ftrace_mod_callback,
4438};
4439
4440static int __init ftrace_mod_cmd_init(void)
4441{
4442 return register_ftrace_command(&ftrace_mod_cmd);
4443}
4444core_initcall(ftrace_mod_cmd_init);
4445
4446static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4447 struct ftrace_ops *op, struct ftrace_regs *fregs)
4448{
4449 struct ftrace_probe_ops *probe_ops;
4450 struct ftrace_func_probe *probe;
4451
4452 probe = container_of(op, struct ftrace_func_probe, ops);
4453 probe_ops = probe->probe_ops;
4454
4455
4456
4457
4458
4459
4460 preempt_disable_notrace();
4461 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
4462 preempt_enable_notrace();
4463}
4464
4465struct ftrace_func_map {
4466 struct ftrace_func_entry entry;
4467 void *data;
4468};
4469
4470struct ftrace_func_mapper {
4471 struct ftrace_hash hash;
4472};
4473
4474
4475
4476
4477
4478
4479struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
4480{
4481 struct ftrace_hash *hash;
4482
4483
4484
4485
4486
4487
4488 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4489 return (struct ftrace_func_mapper *)hash;
4490}
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4504 unsigned long ip)
4505{
4506 struct ftrace_func_entry *entry;
4507 struct ftrace_func_map *map;
4508
4509 entry = ftrace_lookup_ip(&mapper->hash, ip);
4510 if (!entry)
4511 return NULL;
4512
4513 map = (struct ftrace_func_map *)entry;
4514 return &map->data;
4515}
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4526 unsigned long ip, void *data)
4527{
4528 struct ftrace_func_entry *entry;
4529 struct ftrace_func_map *map;
4530
4531 entry = ftrace_lookup_ip(&mapper->hash, ip);
4532 if (entry)
4533 return -EBUSY;
4534
4535 map = kmalloc(sizeof(*map), GFP_KERNEL);
4536 if (!map)
4537 return -ENOMEM;
4538
4539 map->entry.ip = ip;
4540 map->data = data;
4541
4542 __add_hash_entry(&mapper->hash, &map->entry);
4543
4544 return 0;
4545}
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4558 unsigned long ip)
4559{
4560 struct ftrace_func_entry *entry;
4561 struct ftrace_func_map *map;
4562 void *data;
4563
4564 entry = ftrace_lookup_ip(&mapper->hash, ip);
4565 if (!entry)
4566 return NULL;
4567
4568 map = (struct ftrace_func_map *)entry;
4569 data = map->data;
4570
4571 remove_hash_entry(&mapper->hash, entry);
4572 kfree(entry);
4573
4574 return data;
4575}
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4586 ftrace_mapper_func free_func)
4587{
4588 struct ftrace_func_entry *entry;
4589 struct ftrace_func_map *map;
4590 struct hlist_head *hhd;
4591 int size, i;
4592
4593 if (!mapper)
4594 return;
4595
4596 if (free_func && mapper->hash.count) {
4597 size = 1 << mapper->hash.size_bits;
4598 for (i = 0; i < size; i++) {
4599 hhd = &mapper->hash.buckets[i];
4600 hlist_for_each_entry(entry, hhd, hlist) {
4601 map = (struct ftrace_func_map *)entry;
4602 free_func(map);
4603 }
4604 }
4605 }
4606 free_ftrace_hash(&mapper->hash);
4607}
4608
4609static void release_probe(struct ftrace_func_probe *probe)
4610{
4611 struct ftrace_probe_ops *probe_ops;
4612
4613 mutex_lock(&ftrace_lock);
4614
4615 WARN_ON(probe->ref <= 0);
4616
4617
4618 probe->ref--;
4619
4620 if (!probe->ref) {
4621 probe_ops = probe->probe_ops;
4622
4623
4624
4625
4626 if (probe_ops->free)
4627 probe_ops->free(probe_ops, probe->tr, 0, probe->data);
4628 list_del(&probe->list);
4629 kfree(probe);
4630 }
4631 mutex_unlock(&ftrace_lock);
4632}
4633
4634static void acquire_probe_locked(struct ftrace_func_probe *probe)
4635{
4636
4637
4638
4639
4640 probe->ref++;
4641}
4642
4643int
4644register_ftrace_function_probe(char *glob, struct trace_array *tr,
4645 struct ftrace_probe_ops *probe_ops,
4646 void *data)
4647{
4648 struct ftrace_func_probe *probe = NULL, *iter;
4649 struct ftrace_func_entry *entry;
4650 struct ftrace_hash **orig_hash;
4651 struct ftrace_hash *old_hash;
4652 struct ftrace_hash *hash;
4653 int count = 0;
4654 int size;
4655 int ret;
4656 int i;
4657
4658 if (WARN_ON(!tr))
4659 return -EINVAL;
4660
4661
4662 if (WARN_ON(glob[0] == '!'))
4663 return -EINVAL;
4664
4665
4666 mutex_lock(&ftrace_lock);
4667
4668 list_for_each_entry(iter, &tr->func_probes, list) {
4669 if (iter->probe_ops == probe_ops) {
4670 probe = iter;
4671 break;
4672 }
4673 }
4674 if (!probe) {
4675 probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4676 if (!probe) {
4677 mutex_unlock(&ftrace_lock);
4678 return -ENOMEM;
4679 }
4680 probe->probe_ops = probe_ops;
4681 probe->ops.func = function_trace_probe_call;
4682 probe->tr = tr;
4683 ftrace_ops_init(&probe->ops);
4684 list_add(&probe->list, &tr->func_probes);
4685 }
4686
4687 acquire_probe_locked(probe);
4688
4689 mutex_unlock(&ftrace_lock);
4690
4691
4692
4693
4694
4695 mutex_lock(&probe->ops.func_hash->regex_lock);
4696
4697 orig_hash = &probe->ops.func_hash->filter_hash;
4698 old_hash = *orig_hash;
4699 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4700
4701 if (!hash) {
4702 ret = -ENOMEM;
4703 goto out;
4704 }
4705
4706 ret = ftrace_match_records(hash, glob, strlen(glob));
4707
4708
4709 if (!ret)
4710 ret = -EINVAL;
4711
4712 if (ret < 0)
4713 goto out;
4714
4715 size = 1 << hash->size_bits;
4716 for (i = 0; i < size; i++) {
4717 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4718 if (ftrace_lookup_ip(old_hash, entry->ip))
4719 continue;
4720
4721
4722
4723
4724
4725 if (probe_ops->init) {
4726 ret = probe_ops->init(probe_ops, tr,
4727 entry->ip, data,
4728 &probe->data);
4729 if (ret < 0) {
4730 if (probe_ops->free && count)
4731 probe_ops->free(probe_ops, tr,
4732 0, probe->data);
4733 probe->data = NULL;
4734 goto out;
4735 }
4736 }
4737 count++;
4738 }
4739 }
4740
4741 mutex_lock(&ftrace_lock);
4742
4743 if (!count) {
4744
4745 ret = -EINVAL;
4746 goto out_unlock;
4747 }
4748
4749 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4750 hash, 1);
4751 if (ret < 0)
4752 goto err_unlock;
4753
4754
4755 probe->ref += count;
4756
4757 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4758 ret = ftrace_startup(&probe->ops, 0);
4759
4760 out_unlock:
4761 mutex_unlock(&ftrace_lock);
4762
4763 if (!ret)
4764 ret = count;
4765 out:
4766 mutex_unlock(&probe->ops.func_hash->regex_lock);
4767 free_ftrace_hash(hash);
4768
4769 release_probe(probe);
4770
4771 return ret;
4772
4773 err_unlock:
4774 if (!probe_ops->free || !count)
4775 goto out_unlock;
4776
4777
4778 for (i = 0; i < size; i++) {
4779 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4780 if (ftrace_lookup_ip(old_hash, entry->ip))
4781 continue;
4782 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4783 }
4784 }
4785 goto out_unlock;
4786}
4787
4788int
4789unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4790 struct ftrace_probe_ops *probe_ops)
4791{
4792 struct ftrace_func_probe *probe = NULL, *iter;
4793 struct ftrace_ops_hash old_hash_ops;
4794 struct ftrace_func_entry *entry;
4795 struct ftrace_glob func_g;
4796 struct ftrace_hash **orig_hash;
4797 struct ftrace_hash *old_hash;
4798 struct ftrace_hash *hash = NULL;
4799 struct hlist_node *tmp;
4800 struct hlist_head hhd;
4801 char str[KSYM_SYMBOL_LEN];
4802 int count = 0;
4803 int i, ret = -ENODEV;
4804 int size;
4805
4806 if (!glob || !strlen(glob) || !strcmp(glob, "*"))
4807 func_g.search = NULL;
4808 else {
4809 int not;
4810
4811 func_g.type = filter_parse_regex(glob, strlen(glob),
4812 &func_g.search, ¬);
4813 func_g.len = strlen(func_g.search);
4814
4815
4816 if (WARN_ON(not))
4817 return -EINVAL;
4818 }
4819
4820 mutex_lock(&ftrace_lock);
4821
4822 list_for_each_entry(iter, &tr->func_probes, list) {
4823 if (iter->probe_ops == probe_ops) {
4824 probe = iter;
4825 break;
4826 }
4827 }
4828 if (!probe)
4829 goto err_unlock_ftrace;
4830
4831 ret = -EINVAL;
4832 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4833 goto err_unlock_ftrace;
4834
4835 acquire_probe_locked(probe);
4836
4837 mutex_unlock(&ftrace_lock);
4838
4839 mutex_lock(&probe->ops.func_hash->regex_lock);
4840
4841 orig_hash = &probe->ops.func_hash->filter_hash;
4842 old_hash = *orig_hash;
4843
4844 if (ftrace_hash_empty(old_hash))
4845 goto out_unlock;
4846
4847 old_hash_ops.filter_hash = old_hash;
4848
4849 old_hash_ops.notrace_hash = NULL;
4850
4851 ret = -ENOMEM;
4852 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4853 if (!hash)
4854 goto out_unlock;
4855
4856 INIT_HLIST_HEAD(&hhd);
4857
4858 size = 1 << hash->size_bits;
4859 for (i = 0; i < size; i++) {
4860 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
4861
4862 if (func_g.search) {
4863 kallsyms_lookup(entry->ip, NULL, NULL,
4864 NULL, str);
4865 if (!ftrace_match(str, &func_g))
4866 continue;
4867 }
4868 count++;
4869 remove_hash_entry(hash, entry);
4870 hlist_add_head(&entry->hlist, &hhd);
4871 }
4872 }
4873
4874
4875 if (!count) {
4876 ret = -EINVAL;
4877 goto out_unlock;
4878 }
4879
4880 mutex_lock(&ftrace_lock);
4881
4882 WARN_ON(probe->ref < count);
4883
4884 probe->ref -= count;
4885
4886 if (ftrace_hash_empty(hash))
4887 ftrace_shutdown(&probe->ops, 0);
4888
4889 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4890 hash, 1);
4891
4892
4893 if (ftrace_enabled && !ftrace_hash_empty(hash))
4894 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4895 &old_hash_ops);
4896 synchronize_rcu();
4897
4898 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4899 hlist_del(&entry->hlist);
4900 if (probe_ops->free)
4901 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4902 kfree(entry);
4903 }
4904 mutex_unlock(&ftrace_lock);
4905
4906 out_unlock:
4907 mutex_unlock(&probe->ops.func_hash->regex_lock);
4908 free_ftrace_hash(hash);
4909
4910 release_probe(probe);
4911
4912 return ret;
4913
4914 err_unlock_ftrace:
4915 mutex_unlock(&ftrace_lock);
4916 return ret;
4917}
4918
4919void clear_ftrace_function_probes(struct trace_array *tr)
4920{
4921 struct ftrace_func_probe *probe, *n;
4922
4923 list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4924 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4925}
4926
4927static LIST_HEAD(ftrace_commands);
4928static DEFINE_MUTEX(ftrace_cmd_mutex);
4929
4930
4931
4932
4933
4934__init int register_ftrace_command(struct ftrace_func_command *cmd)
4935{
4936 struct ftrace_func_command *p;
4937 int ret = 0;
4938
4939 mutex_lock(&ftrace_cmd_mutex);
4940 list_for_each_entry(p, &ftrace_commands, list) {
4941 if (strcmp(cmd->name, p->name) == 0) {
4942 ret = -EBUSY;
4943 goto out_unlock;
4944 }
4945 }
4946 list_add(&cmd->list, &ftrace_commands);
4947 out_unlock:
4948 mutex_unlock(&ftrace_cmd_mutex);
4949
4950 return ret;
4951}
4952
4953
4954
4955
4956
4957__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4958{
4959 struct ftrace_func_command *p, *n;
4960 int ret = -ENODEV;
4961
4962 mutex_lock(&ftrace_cmd_mutex);
4963 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4964 if (strcmp(cmd->name, p->name) == 0) {
4965 ret = 0;
4966 list_del_init(&p->list);
4967 goto out_unlock;
4968 }
4969 }
4970 out_unlock:
4971 mutex_unlock(&ftrace_cmd_mutex);
4972
4973 return ret;
4974}
4975
4976static int ftrace_process_regex(struct ftrace_iterator *iter,
4977 char *buff, int len, int enable)
4978{
4979 struct ftrace_hash *hash = iter->hash;
4980 struct trace_array *tr = iter->ops->private;
4981 char *func, *command, *next = buff;
4982 struct ftrace_func_command *p;
4983 int ret = -EINVAL;
4984
4985 func = strsep(&next, ":");
4986
4987 if (!next) {
4988 ret = ftrace_match_records(hash, func, len);
4989 if (!ret)
4990 ret = -EINVAL;
4991 if (ret < 0)
4992 return ret;
4993 return 0;
4994 }
4995
4996
4997
4998 command = strsep(&next, ":");
4999
5000 mutex_lock(&ftrace_cmd_mutex);
5001 list_for_each_entry(p, &ftrace_commands, list) {
5002 if (strcmp(p->name, command) == 0) {
5003 ret = p->func(tr, hash, func, command, next, enable);
5004 goto out_unlock;
5005 }
5006 }
5007 out_unlock:
5008 mutex_unlock(&ftrace_cmd_mutex);
5009
5010 return ret;
5011}
5012
5013static ssize_t
5014ftrace_regex_write(struct file *file, const char __user *ubuf,
5015 size_t cnt, loff_t *ppos, int enable)
5016{
5017 struct ftrace_iterator *iter;
5018 struct trace_parser *parser;
5019 ssize_t ret, read;
5020
5021 if (!cnt)
5022 return 0;
5023
5024 if (file->f_mode & FMODE_READ) {
5025 struct seq_file *m = file->private_data;
5026 iter = m->private;
5027 } else
5028 iter = file->private_data;
5029
5030 if (unlikely(ftrace_disabled))
5031 return -ENODEV;
5032
5033
5034
5035 parser = &iter->parser;
5036 read = trace_get_user(parser, ubuf, cnt, ppos);
5037
5038 if (read >= 0 && trace_parser_loaded(parser) &&
5039 !trace_parser_cont(parser)) {
5040 ret = ftrace_process_regex(iter, parser->buffer,
5041 parser->idx, enable);
5042 trace_parser_clear(parser);
5043 if (ret < 0)
5044 goto out;
5045 }
5046
5047 ret = read;
5048 out:
5049 return ret;
5050}
5051
5052ssize_t
5053ftrace_filter_write(struct file *file, const char __user *ubuf,
5054 size_t cnt, loff_t *ppos)
5055{
5056 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
5057}
5058
5059ssize_t
5060ftrace_notrace_write(struct file *file, const char __user *ubuf,
5061 size_t cnt, loff_t *ppos)
5062{
5063 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
5064}
5065
5066static int
5067__ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
5068{
5069 struct ftrace_func_entry *entry;
5070
5071 ip = ftrace_location(ip);
5072 if (!ip)
5073 return -EINVAL;
5074
5075 if (remove) {
5076 entry = ftrace_lookup_ip(hash, ip);
5077 if (!entry)
5078 return -ENOENT;
5079 free_hash_entry(hash, entry);
5080 return 0;
5081 }
5082
5083 return add_hash_entry(hash, ip);
5084}
5085
5086static int
5087ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
5088 unsigned int cnt, int remove)
5089{
5090 unsigned int i;
5091 int err;
5092
5093 for (i = 0; i < cnt; i++) {
5094 err = __ftrace_match_addr(hash, ips[i], remove);
5095 if (err) {
5096
5097
5098
5099
5100 return err;
5101 }
5102 }
5103 return 0;
5104}
5105
5106static int
5107ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
5108 unsigned long *ips, unsigned int cnt,
5109 int remove, int reset, int enable)
5110{
5111 struct ftrace_hash **orig_hash;
5112 struct ftrace_hash *hash;
5113 int ret;
5114
5115 if (unlikely(ftrace_disabled))
5116 return -ENODEV;
5117
5118 mutex_lock(&ops->func_hash->regex_lock);
5119
5120 if (enable)
5121 orig_hash = &ops->func_hash->filter_hash;
5122 else
5123 orig_hash = &ops->func_hash->notrace_hash;
5124
5125 if (reset)
5126 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5127 else
5128 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
5129
5130 if (!hash) {
5131 ret = -ENOMEM;
5132 goto out_regex_unlock;
5133 }
5134
5135 if (buf && !ftrace_match_records(hash, buf, len)) {
5136 ret = -EINVAL;
5137 goto out_regex_unlock;
5138 }
5139 if (ips) {
5140 ret = ftrace_match_addr(hash, ips, cnt, remove);
5141 if (ret < 0)
5142 goto out_regex_unlock;
5143 }
5144
5145 mutex_lock(&ftrace_lock);
5146 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
5147 mutex_unlock(&ftrace_lock);
5148
5149 out_regex_unlock:
5150 mutex_unlock(&ops->func_hash->regex_lock);
5151
5152 free_ftrace_hash(hash);
5153 return ret;
5154}
5155
5156static int
5157ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
5158 int remove, int reset, int enable)
5159{
5160 return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable);
5161}
5162
5163#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
5164
5165struct ftrace_direct_func {
5166 struct list_head next;
5167 unsigned long addr;
5168 int count;
5169};
5170
5171static LIST_HEAD(ftrace_direct_funcs);
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
5187{
5188 struct ftrace_direct_func *entry;
5189 bool found = false;
5190
5191
5192 list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) {
5193 if (entry->addr == addr) {
5194 found = true;
5195 break;
5196 }
5197 }
5198 if (found)
5199 return entry;
5200
5201 return NULL;
5202}
5203
5204static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
5205{
5206 struct ftrace_direct_func *direct;
5207
5208 direct = kmalloc(sizeof(*direct), GFP_KERNEL);
5209 if (!direct)
5210 return NULL;
5211 direct->addr = addr;
5212 direct->count = 0;
5213 list_add_rcu(&direct->next, &ftrace_direct_funcs);
5214 ftrace_direct_func_count++;
5215 return direct;
5216}
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235int register_ftrace_direct(unsigned long ip, unsigned long addr)
5236{
5237 struct ftrace_direct_func *direct;
5238 struct ftrace_func_entry *entry;
5239 struct ftrace_hash *free_hash = NULL;
5240 struct dyn_ftrace *rec;
5241 int ret = -ENODEV;
5242
5243 mutex_lock(&direct_mutex);
5244
5245 ip = ftrace_location(ip);
5246 if (!ip)
5247 goto out_unlock;
5248
5249
5250 ret = -EBUSY;
5251 if (ftrace_find_rec_direct(ip))
5252 goto out_unlock;
5253
5254 ret = -ENODEV;
5255 rec = lookup_rec(ip, ip);
5256 if (!rec)
5257 goto out_unlock;
5258
5259
5260
5261
5262
5263 if (WARN_ON(rec->flags & FTRACE_FL_DIRECT))
5264 goto out_unlock;
5265
5266
5267 if (ip != rec->ip) {
5268 ip = rec->ip;
5269
5270 if (ftrace_find_rec_direct(ip))
5271 goto out_unlock;
5272 }
5273
5274 ret = -ENOMEM;
5275 direct = ftrace_find_direct_func(addr);
5276 if (!direct) {
5277 direct = ftrace_alloc_direct_func(addr);
5278 if (!direct)
5279 goto out_unlock;
5280 }
5281
5282 entry = ftrace_add_rec_direct(ip, addr, &free_hash);
5283 if (!entry)
5284 goto out_unlock;
5285
5286 ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
5287
5288 if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
5289 ret = register_ftrace_function(&direct_ops);
5290 if (ret)
5291 ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5292 }
5293
5294 if (ret) {
5295 remove_hash_entry(direct_functions, entry);
5296 kfree(entry);
5297 if (!direct->count) {
5298 list_del_rcu(&direct->next);
5299 synchronize_rcu_tasks();
5300 kfree(direct);
5301 if (free_hash)
5302 free_ftrace_hash(free_hash);
5303 free_hash = NULL;
5304 ftrace_direct_func_count--;
5305 }
5306 } else {
5307 direct->count++;
5308 }
5309 out_unlock:
5310 mutex_unlock(&direct_mutex);
5311
5312 if (free_hash) {
5313 synchronize_rcu_tasks();
5314 free_ftrace_hash(free_hash);
5315 }
5316
5317 return ret;
5318}
5319EXPORT_SYMBOL_GPL(register_ftrace_direct);
5320
5321static struct ftrace_func_entry *find_direct_entry(unsigned long *ip,
5322 struct dyn_ftrace **recp)
5323{
5324 struct ftrace_func_entry *entry;
5325 struct dyn_ftrace *rec;
5326
5327 rec = lookup_rec(*ip, *ip);
5328 if (!rec)
5329 return NULL;
5330
5331 entry = __ftrace_lookup_ip(direct_functions, rec->ip);
5332 if (!entry) {
5333 WARN_ON(rec->flags & FTRACE_FL_DIRECT);
5334 return NULL;
5335 }
5336
5337 WARN_ON(!(rec->flags & FTRACE_FL_DIRECT));
5338
5339
5340 *ip = rec->ip;
5341
5342 if (recp)
5343 *recp = rec;
5344
5345 return entry;
5346}
5347
5348int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
5349{
5350 struct ftrace_direct_func *direct;
5351 struct ftrace_func_entry *entry;
5352 struct ftrace_hash *hash;
5353 int ret = -ENODEV;
5354
5355 mutex_lock(&direct_mutex);
5356
5357 ip = ftrace_location(ip);
5358 if (!ip)
5359 goto out_unlock;
5360
5361 entry = find_direct_entry(&ip, NULL);
5362 if (!entry)
5363 goto out_unlock;
5364
5365 hash = direct_ops.func_hash->filter_hash;
5366 if (hash->count == 1)
5367 unregister_ftrace_function(&direct_ops);
5368
5369 ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5370
5371 WARN_ON(ret);
5372
5373 remove_hash_entry(direct_functions, entry);
5374
5375 direct = ftrace_find_direct_func(addr);
5376 if (!WARN_ON(!direct)) {
5377
5378 direct->count--;
5379 WARN_ON(direct->count < 0);
5380 if (!direct->count) {
5381 list_del_rcu(&direct->next);
5382 synchronize_rcu_tasks();
5383 kfree(direct);
5384 kfree(entry);
5385 ftrace_direct_func_count--;
5386 }
5387 }
5388 out_unlock:
5389 mutex_unlock(&direct_mutex);
5390
5391 return ret;
5392}
5393EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
5394
5395static struct ftrace_ops stub_ops = {
5396 .func = ftrace_stub,
5397};
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
5418 struct dyn_ftrace *rec,
5419 unsigned long old_addr,
5420 unsigned long new_addr)
5421{
5422 unsigned long ip = rec->ip;
5423 int ret;
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434
5435 mutex_unlock(&ftrace_lock);
5436
5437
5438
5439
5440
5441
5442
5443 ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0);
5444 if (ret)
5445 goto out_lock;
5446
5447 ret = register_ftrace_function(&stub_ops);
5448 if (ret) {
5449 ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5450 goto out_lock;
5451 }
5452
5453 entry->direct = new_addr;
5454
5455
5456
5457
5458
5459 unregister_ftrace_function(&stub_ops);
5460 ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5461
5462 out_lock:
5463 mutex_lock(&ftrace_lock);
5464
5465 return ret;
5466}
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482int modify_ftrace_direct(unsigned long ip,
5483 unsigned long old_addr, unsigned long new_addr)
5484{
5485 struct ftrace_direct_func *direct, *new_direct = NULL;
5486 struct ftrace_func_entry *entry;
5487 struct dyn_ftrace *rec;
5488 int ret = -ENODEV;
5489
5490 mutex_lock(&direct_mutex);
5491
5492 mutex_lock(&ftrace_lock);
5493
5494 ip = ftrace_location(ip);
5495 if (!ip)
5496 goto out_unlock;
5497
5498 entry = find_direct_entry(&ip, &rec);
5499 if (!entry)
5500 goto out_unlock;
5501
5502 ret = -EINVAL;
5503 if (entry->direct != old_addr)
5504 goto out_unlock;
5505
5506 direct = ftrace_find_direct_func(old_addr);
5507 if (WARN_ON(!direct))
5508 goto out_unlock;
5509 if (direct->count > 1) {
5510 ret = -ENOMEM;
5511 new_direct = ftrace_alloc_direct_func(new_addr);
5512 if (!new_direct)
5513 goto out_unlock;
5514 direct->count--;
5515 new_direct->count++;
5516 } else {
5517 direct->addr = new_addr;
5518 }
5519
5520
5521
5522
5523
5524
5525
5526 if (ftrace_rec_count(rec) == 1) {
5527 ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr);
5528 } else {
5529 entry->direct = new_addr;
5530 ret = 0;
5531 }
5532
5533 if (unlikely(ret && new_direct)) {
5534 direct->count++;
5535 list_del_rcu(&new_direct->next);
5536 synchronize_rcu_tasks();
5537 kfree(new_direct);
5538 ftrace_direct_func_count--;
5539 }
5540
5541 out_unlock:
5542 mutex_unlock(&ftrace_lock);
5543 mutex_unlock(&direct_mutex);
5544 return ret;
5545}
5546EXPORT_SYMBOL_GPL(modify_ftrace_direct);
5547
5548#define MULTI_FLAGS (FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_DIRECT | \
5549 FTRACE_OPS_FL_SAVE_REGS)
5550
5551static int check_direct_multi(struct ftrace_ops *ops)
5552{
5553 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5554 return -EINVAL;
5555 if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
5556 return -EINVAL;
5557 return 0;
5558}
5559
5560static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
5561{
5562 struct ftrace_func_entry *entry, *del;
5563 int size, i;
5564
5565 size = 1 << hash->size_bits;
5566 for (i = 0; i < size; i++) {
5567 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5568 del = __ftrace_lookup_ip(direct_functions, entry->ip);
5569 if (del && del->direct == addr) {
5570 remove_hash_entry(direct_functions, del);
5571 kfree(del);
5572 }
5573 }
5574 }
5575}
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598
5599int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5600{
5601 struct ftrace_hash *hash, *free_hash = NULL;
5602 struct ftrace_func_entry *entry, *new;
5603 int err = -EBUSY, size, i;
5604
5605 if (ops->func || ops->trampoline)
5606 return -EINVAL;
5607 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5608 return -EINVAL;
5609 if (ops->flags & FTRACE_OPS_FL_ENABLED)
5610 return -EINVAL;
5611
5612 hash = ops->func_hash->filter_hash;
5613 if (ftrace_hash_empty(hash))
5614 return -EINVAL;
5615
5616 mutex_lock(&direct_mutex);
5617
5618
5619 size = 1 << hash->size_bits;
5620 for (i = 0; i < size; i++) {
5621 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5622 if (ftrace_find_rec_direct(entry->ip))
5623 goto out_unlock;
5624 }
5625 }
5626
5627
5628 err = -ENOMEM;
5629 for (i = 0; i < size; i++) {
5630 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5631 new = ftrace_add_rec_direct(entry->ip, addr, &free_hash);
5632 if (!new)
5633 goto out_remove;
5634 entry->direct = addr;
5635 }
5636 }
5637
5638 ops->func = call_direct_funcs;
5639 ops->flags = MULTI_FLAGS;
5640 ops->trampoline = FTRACE_REGS_ADDR;
5641
5642 err = register_ftrace_function(ops);
5643
5644 out_remove:
5645 if (err)
5646 remove_direct_functions_hash(hash, addr);
5647
5648 out_unlock:
5649 mutex_unlock(&direct_mutex);
5650
5651 if (free_hash) {
5652 synchronize_rcu_tasks();
5653 free_ftrace_hash(free_hash);
5654 }
5655 return err;
5656}
5657EXPORT_SYMBOL_GPL(register_ftrace_direct_multi);
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5673{
5674 struct ftrace_hash *hash = ops->func_hash->filter_hash;
5675 int err;
5676
5677 if (check_direct_multi(ops))
5678 return -EINVAL;
5679 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5680 return -EINVAL;
5681
5682 mutex_lock(&direct_mutex);
5683 err = unregister_ftrace_function(ops);
5684 remove_direct_functions_hash(hash, addr);
5685 mutex_unlock(&direct_mutex);
5686
5687
5688 ops->func = NULL;
5689 ops->trampoline = 0;
5690 return err;
5691}
5692EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi);
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707
5708
5709int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5710{
5711 struct ftrace_hash *hash;
5712 struct ftrace_func_entry *entry, *iter;
5713 static struct ftrace_ops tmp_ops = {
5714 .func = ftrace_stub,
5715 .flags = FTRACE_OPS_FL_STUB,
5716 };
5717 int i, size;
5718 int err;
5719
5720 if (check_direct_multi(ops))
5721 return -EINVAL;
5722 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5723 return -EINVAL;
5724
5725 mutex_lock(&direct_mutex);
5726
5727
5728 ftrace_ops_init(&tmp_ops);
5729 tmp_ops.func_hash = ops->func_hash;
5730
5731 err = register_ftrace_function(&tmp_ops);
5732 if (err)
5733 goto out_direct;
5734
5735
5736
5737
5738
5739 mutex_lock(&ftrace_lock);
5740
5741 hash = ops->func_hash->filter_hash;
5742 size = 1 << hash->size_bits;
5743 for (i = 0; i < size; i++) {
5744 hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
5745 entry = __ftrace_lookup_ip(direct_functions, iter->ip);
5746 if (!entry)
5747 continue;
5748 entry->direct = addr;
5749 }
5750 }
5751
5752 mutex_unlock(&ftrace_lock);
5753
5754
5755 unregister_ftrace_function(&tmp_ops);
5756
5757 out_direct:
5758 mutex_unlock(&direct_mutex);
5759 return err;
5760}
5761EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi);
5762#endif
5763
5764
5765
5766
5767
5768
5769
5770
5771
5772
5773
5774int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
5775 int remove, int reset)
5776{
5777 ftrace_ops_init(ops);
5778 return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
5779}
5780EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791
5792
5793int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
5794 unsigned int cnt, int remove, int reset)
5795{
5796 ftrace_ops_init(ops);
5797 return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
5798}
5799EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
5800
5801
5802
5803
5804
5805
5806
5807
5808void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
5809{
5810 if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
5811 return;
5812
5813 ftrace_ops_init(ops);
5814 ops->func_hash = &global_ops.local_hash;
5815}
5816EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
5817
5818static int
5819ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
5820 int reset, int enable)
5821{
5822 return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable);
5823}
5824
5825
5826
5827
5828
5829
5830
5831
5832
5833
5834
5835int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
5836 int len, int reset)
5837{
5838 ftrace_ops_init(ops);
5839 return ftrace_set_regex(ops, buf, len, reset, 1);
5840}
5841EXPORT_SYMBOL_GPL(ftrace_set_filter);
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
5855 int len, int reset)
5856{
5857 ftrace_ops_init(ops);
5858 return ftrace_set_regex(ops, buf, len, reset, 0);
5859}
5860EXPORT_SYMBOL_GPL(ftrace_set_notrace);
5861
5862
5863
5864
5865
5866
5867
5868
5869
5870void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
5871{
5872 ftrace_set_regex(&global_ops, buf, len, reset, 1);
5873}
5874EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
5875
5876
5877
5878
5879
5880
5881
5882
5883
5884
5885
5886void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
5887{
5888 ftrace_set_regex(&global_ops, buf, len, reset, 0);
5889}
5890EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
5891
5892
5893
5894
5895#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
5896static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5897static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
5898
5899
5900bool ftrace_filter_param __initdata;
5901
5902static int __init set_ftrace_notrace(char *str)
5903{
5904 ftrace_filter_param = true;
5905 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
5906 return 1;
5907}
5908__setup("ftrace_notrace=", set_ftrace_notrace);
5909
5910static int __init set_ftrace_filter(char *str)
5911{
5912 ftrace_filter_param = true;
5913 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
5914 return 1;
5915}
5916__setup("ftrace_filter=", set_ftrace_filter);
5917
5918#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5919static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
5920static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5921static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
5922
5923static int __init set_graph_function(char *str)
5924{
5925 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
5926 return 1;
5927}
5928__setup("ftrace_graph_filter=", set_graph_function);
5929
5930static int __init set_graph_notrace_function(char *str)
5931{
5932 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
5933 return 1;
5934}
5935__setup("ftrace_graph_notrace=", set_graph_notrace_function);
5936
5937static int __init set_graph_max_depth_function(char *str)
5938{
5939 if (!str)
5940 return 0;
5941 fgraph_max_depth = simple_strtoul(str, NULL, 0);
5942 return 1;
5943}
5944__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
5945
5946static void __init set_ftrace_early_graph(char *buf, int enable)
5947{
5948 int ret;
5949 char *func;
5950 struct ftrace_hash *hash;
5951
5952 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5953 if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
5954 return;
5955
5956 while (buf) {
5957 func = strsep(&buf, ",");
5958
5959 ret = ftrace_graph_set_hash(hash, func);
5960 if (ret)
5961 printk(KERN_DEBUG "ftrace: function %s not "
5962 "traceable\n", func);
5963 }
5964
5965 if (enable)
5966 ftrace_graph_hash = hash;
5967 else
5968 ftrace_graph_notrace_hash = hash;
5969}
5970#endif
5971
5972void __init
5973ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
5974{
5975 char *func;
5976
5977 ftrace_ops_init(ops);
5978
5979 while (buf) {
5980 func = strsep(&buf, ",");
5981 ftrace_set_regex(ops, func, strlen(func), 0, enable);
5982 }
5983}
5984
5985static void __init set_ftrace_early_filters(void)
5986{
5987 if (ftrace_filter_buf[0])
5988 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
5989 if (ftrace_notrace_buf[0])
5990 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
5991#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5992 if (ftrace_graph_buf[0])
5993 set_ftrace_early_graph(ftrace_graph_buf, 1);
5994 if (ftrace_graph_notrace_buf[0])
5995 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
5996#endif
5997}
5998
5999int ftrace_regex_release(struct inode *inode, struct file *file)
6000{
6001 struct seq_file *m = (struct seq_file *)file->private_data;
6002 struct ftrace_iterator *iter;
6003 struct ftrace_hash **orig_hash;
6004 struct trace_parser *parser;
6005 int filter_hash;
6006
6007 if (file->f_mode & FMODE_READ) {
6008 iter = m->private;
6009 seq_release(inode, file);
6010 } else
6011 iter = file->private_data;
6012
6013 parser = &iter->parser;
6014 if (trace_parser_loaded(parser)) {
6015 int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
6016
6017 ftrace_process_regex(iter, parser->buffer,
6018 parser->idx, enable);
6019 }
6020
6021 trace_parser_put(parser);
6022
6023 mutex_lock(&iter->ops->func_hash->regex_lock);
6024
6025 if (file->f_mode & FMODE_WRITE) {
6026 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
6027
6028 if (filter_hash) {
6029 orig_hash = &iter->ops->func_hash->filter_hash;
6030 if (iter->tr && !list_empty(&iter->tr->mod_trace))
6031 iter->hash->flags |= FTRACE_HASH_FL_MOD;
6032 } else
6033 orig_hash = &iter->ops->func_hash->notrace_hash;
6034
6035 mutex_lock(&ftrace_lock);
6036 ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
6037 iter->hash, filter_hash);
6038 mutex_unlock(&ftrace_lock);
6039 } else {
6040
6041 iter->hash = NULL;
6042 }
6043
6044 mutex_unlock(&iter->ops->func_hash->regex_lock);
6045 free_ftrace_hash(iter->hash);
6046 if (iter->tr)
6047 trace_array_put(iter->tr);
6048 kfree(iter);
6049
6050 return 0;
6051}
6052
6053static const struct file_operations ftrace_avail_fops = {
6054 .open = ftrace_avail_open,
6055 .read = seq_read,
6056 .llseek = seq_lseek,
6057 .release = seq_release_private,
6058};
6059
6060static const struct file_operations ftrace_enabled_fops = {
6061 .open = ftrace_enabled_open,
6062 .read = seq_read,
6063 .llseek = seq_lseek,
6064 .release = seq_release_private,
6065};
6066
6067static const struct file_operations ftrace_filter_fops = {
6068 .open = ftrace_filter_open,
6069 .read = seq_read,
6070 .write = ftrace_filter_write,
6071 .llseek = tracing_lseek,
6072 .release = ftrace_regex_release,
6073};
6074
6075static const struct file_operations ftrace_notrace_fops = {
6076 .open = ftrace_notrace_open,
6077 .read = seq_read,
6078 .write = ftrace_notrace_write,
6079 .llseek = tracing_lseek,
6080 .release = ftrace_regex_release,
6081};
6082
6083#ifdef CONFIG_FUNCTION_GRAPH_TRACER
6084
6085static DEFINE_MUTEX(graph_lock);
6086
6087struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
6088struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
6089
6090enum graph_filter_type {
6091 GRAPH_FILTER_NOTRACE = 0,
6092 GRAPH_FILTER_FUNCTION,
6093};
6094
6095#define FTRACE_GRAPH_EMPTY ((void *)1)
6096
6097struct ftrace_graph_data {
6098 struct ftrace_hash *hash;
6099 struct ftrace_func_entry *entry;
6100 int idx;
6101 enum graph_filter_type type;
6102 struct ftrace_hash *new_hash;
6103 const struct seq_operations *seq_ops;
6104 struct trace_parser parser;
6105};
6106
6107static void *
6108__g_next(struct seq_file *m, loff_t *pos)
6109{
6110 struct ftrace_graph_data *fgd = m->private;
6111 struct ftrace_func_entry *entry = fgd->entry;
6112 struct hlist_head *head;
6113 int i, idx = fgd->idx;
6114
6115 if (*pos >= fgd->hash->count)
6116 return NULL;
6117
6118 if (entry) {
6119 hlist_for_each_entry_continue(entry, hlist) {
6120 fgd->entry = entry;
6121 return entry;
6122 }
6123
6124 idx++;
6125 }
6126
6127 for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
6128 head = &fgd->hash->buckets[i];
6129 hlist_for_each_entry(entry, head, hlist) {
6130 fgd->entry = entry;
6131 fgd->idx = i;
6132 return entry;
6133 }
6134 }
6135 return NULL;
6136}
6137
6138static void *
6139g_next(struct seq_file *m, void *v, loff_t *pos)
6140{
6141 (*pos)++;
6142 return __g_next(m, pos);
6143}
6144
6145static void *g_start(struct seq_file *m, loff_t *pos)
6146{
6147 struct ftrace_graph_data *fgd = m->private;
6148
6149 mutex_lock(&graph_lock);
6150
6151 if (fgd->type == GRAPH_FILTER_FUNCTION)
6152 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6153 lockdep_is_held(&graph_lock));
6154 else
6155 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6156 lockdep_is_held(&graph_lock));
6157
6158
6159 if (ftrace_hash_empty(fgd->hash) && !*pos)
6160 return FTRACE_GRAPH_EMPTY;
6161
6162 fgd->idx = 0;
6163 fgd->entry = NULL;
6164 return __g_next(m, pos);
6165}
6166
6167static void g_stop(struct seq_file *m, void *p)
6168{
6169 mutex_unlock(&graph_lock);
6170}
6171
6172static int g_show(struct seq_file *m, void *v)
6173{
6174 struct ftrace_func_entry *entry = v;
6175
6176 if (!entry)
6177 return 0;
6178
6179 if (entry == FTRACE_GRAPH_EMPTY) {
6180 struct ftrace_graph_data *fgd = m->private;
6181
6182 if (fgd->type == GRAPH_FILTER_FUNCTION)
6183 seq_puts(m, "#### all functions enabled ####\n");
6184 else
6185 seq_puts(m, "#### no functions disabled ####\n");
6186 return 0;
6187 }
6188
6189 seq_printf(m, "%ps\n", (void *)entry->ip);
6190
6191 return 0;
6192}
6193
6194static const struct seq_operations ftrace_graph_seq_ops = {
6195 .start = g_start,
6196 .next = g_next,
6197 .stop = g_stop,
6198 .show = g_show,
6199};
6200
6201static int
6202__ftrace_graph_open(struct inode *inode, struct file *file,
6203 struct ftrace_graph_data *fgd)
6204{
6205 int ret;
6206 struct ftrace_hash *new_hash = NULL;
6207
6208 ret = security_locked_down(LOCKDOWN_TRACEFS);
6209 if (ret)
6210 return ret;
6211
6212 if (file->f_mode & FMODE_WRITE) {
6213 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
6214
6215 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
6216 return -ENOMEM;
6217
6218 if (file->f_flags & O_TRUNC)
6219 new_hash = alloc_ftrace_hash(size_bits);
6220 else
6221 new_hash = alloc_and_copy_ftrace_hash(size_bits,
6222 fgd->hash);
6223 if (!new_hash) {
6224 ret = -ENOMEM;
6225 goto out;
6226 }
6227 }
6228
6229 if (file->f_mode & FMODE_READ) {
6230 ret = seq_open(file, &ftrace_graph_seq_ops);
6231 if (!ret) {
6232 struct seq_file *m = file->private_data;
6233 m->private = fgd;
6234 } else {
6235
6236 free_ftrace_hash(new_hash);
6237 new_hash = NULL;
6238 }
6239 } else
6240 file->private_data = fgd;
6241
6242out:
6243 if (ret < 0 && file->f_mode & FMODE_WRITE)
6244 trace_parser_put(&fgd->parser);
6245
6246 fgd->new_hash = new_hash;
6247
6248
6249
6250
6251
6252
6253 fgd->hash = NULL;
6254
6255 return ret;
6256}
6257
6258static int
6259ftrace_graph_open(struct inode *inode, struct file *file)
6260{
6261 struct ftrace_graph_data *fgd;
6262 int ret;
6263
6264 if (unlikely(ftrace_disabled))
6265 return -ENODEV;
6266
6267 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6268 if (fgd == NULL)
6269 return -ENOMEM;
6270
6271 mutex_lock(&graph_lock);
6272
6273 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6274 lockdep_is_held(&graph_lock));
6275 fgd->type = GRAPH_FILTER_FUNCTION;
6276 fgd->seq_ops = &ftrace_graph_seq_ops;
6277
6278 ret = __ftrace_graph_open(inode, file, fgd);
6279 if (ret < 0)
6280 kfree(fgd);
6281
6282 mutex_unlock(&graph_lock);
6283 return ret;
6284}
6285
6286static int
6287ftrace_graph_notrace_open(struct inode *inode, struct file *file)
6288{
6289 struct ftrace_graph_data *fgd;
6290 int ret;
6291
6292 if (unlikely(ftrace_disabled))
6293 return -ENODEV;
6294
6295 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6296 if (fgd == NULL)
6297 return -ENOMEM;
6298
6299 mutex_lock(&graph_lock);
6300
6301 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6302 lockdep_is_held(&graph_lock));
6303 fgd->type = GRAPH_FILTER_NOTRACE;
6304 fgd->seq_ops = &ftrace_graph_seq_ops;
6305
6306 ret = __ftrace_graph_open(inode, file, fgd);
6307 if (ret < 0)
6308 kfree(fgd);
6309
6310 mutex_unlock(&graph_lock);
6311 return ret;
6312}
6313
6314static int
6315ftrace_graph_release(struct inode *inode, struct file *file)
6316{
6317 struct ftrace_graph_data *fgd;
6318 struct ftrace_hash *old_hash, *new_hash;
6319 struct trace_parser *parser;
6320 int ret = 0;
6321
6322 if (file->f_mode & FMODE_READ) {
6323 struct seq_file *m = file->private_data;
6324
6325 fgd = m->private;
6326 seq_release(inode, file);
6327 } else {
6328 fgd = file->private_data;
6329 }
6330
6331
6332 if (file->f_mode & FMODE_WRITE) {
6333
6334 parser = &fgd->parser;
6335
6336 if (trace_parser_loaded((parser))) {
6337 ret = ftrace_graph_set_hash(fgd->new_hash,
6338 parser->buffer);
6339 }
6340
6341 trace_parser_put(parser);
6342
6343 new_hash = __ftrace_hash_move(fgd->new_hash);
6344 if (!new_hash) {
6345 ret = -ENOMEM;
6346 goto out;
6347 }
6348
6349 mutex_lock(&graph_lock);
6350
6351 if (fgd->type == GRAPH_FILTER_FUNCTION) {
6352 old_hash = rcu_dereference_protected(ftrace_graph_hash,
6353 lockdep_is_held(&graph_lock));
6354 rcu_assign_pointer(ftrace_graph_hash, new_hash);
6355 } else {
6356 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6357 lockdep_is_held(&graph_lock));
6358 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
6359 }
6360
6361 mutex_unlock(&graph_lock);
6362
6363
6364
6365
6366
6367
6368
6369
6370
6371 if (old_hash != EMPTY_HASH)
6372 synchronize_rcu_tasks_rude();
6373
6374 free_ftrace_hash(old_hash);
6375 }
6376
6377 out:
6378 free_ftrace_hash(fgd->new_hash);
6379 kfree(fgd);
6380
6381 return ret;
6382}
6383
6384static int
6385ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
6386{
6387 struct ftrace_glob func_g;
6388 struct dyn_ftrace *rec;
6389 struct ftrace_page *pg;
6390 struct ftrace_func_entry *entry;
6391 int fail = 1;
6392 int not;
6393
6394
6395 func_g.type = filter_parse_regex(buffer, strlen(buffer),
6396 &func_g.search, ¬);
6397
6398 func_g.len = strlen(func_g.search);
6399
6400 mutex_lock(&ftrace_lock);
6401
6402 if (unlikely(ftrace_disabled)) {
6403 mutex_unlock(&ftrace_lock);
6404 return -ENODEV;
6405 }
6406
6407 do_for_each_ftrace_rec(pg, rec) {
6408
6409 if (rec->flags & FTRACE_FL_DISABLED)
6410 continue;
6411
6412 if (ftrace_match_record(rec, &func_g, NULL, 0)) {
6413 entry = ftrace_lookup_ip(hash, rec->ip);
6414
6415 if (!not) {
6416 fail = 0;
6417
6418 if (entry)
6419 continue;
6420 if (add_hash_entry(hash, rec->ip) < 0)
6421 goto out;
6422 } else {
6423 if (entry) {
6424 free_hash_entry(hash, entry);
6425 fail = 0;
6426 }
6427 }
6428 }
6429 } while_for_each_ftrace_rec();
6430out:
6431 mutex_unlock(&ftrace_lock);
6432
6433 if (fail)
6434 return -EINVAL;
6435
6436 return 0;
6437}
6438
6439static ssize_t
6440ftrace_graph_write(struct file *file, const char __user *ubuf,
6441 size_t cnt, loff_t *ppos)
6442{
6443 ssize_t read, ret = 0;
6444 struct ftrace_graph_data *fgd = file->private_data;
6445 struct trace_parser *parser;
6446
6447 if (!cnt)
6448 return 0;
6449
6450
6451 if (file->f_mode & FMODE_READ) {
6452 struct seq_file *m = file->private_data;
6453 fgd = m->private;
6454 }
6455
6456 parser = &fgd->parser;
6457
6458 read = trace_get_user(parser, ubuf, cnt, ppos);
6459
6460 if (read >= 0 && trace_parser_loaded(parser) &&
6461 !trace_parser_cont(parser)) {
6462
6463 ret = ftrace_graph_set_hash(fgd->new_hash,
6464 parser->buffer);
6465 trace_parser_clear(parser);
6466 }
6467
6468 if (!ret)
6469 ret = read;
6470
6471 return ret;
6472}
6473
6474static const struct file_operations ftrace_graph_fops = {
6475 .open = ftrace_graph_open,
6476 .read = seq_read,
6477 .write = ftrace_graph_write,
6478 .llseek = tracing_lseek,
6479 .release = ftrace_graph_release,
6480};
6481
6482static const struct file_operations ftrace_graph_notrace_fops = {
6483 .open = ftrace_graph_notrace_open,
6484 .read = seq_read,
6485 .write = ftrace_graph_write,
6486 .llseek = tracing_lseek,
6487 .release = ftrace_graph_release,
6488};
6489#endif
6490
6491void ftrace_create_filter_files(struct ftrace_ops *ops,
6492 struct dentry *parent)
6493{
6494
6495 trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
6496 ops, &ftrace_filter_fops);
6497
6498 trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
6499 ops, &ftrace_notrace_fops);
6500}
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6513{
6514 mutex_lock(&ftrace_lock);
6515 if (ops->flags & FTRACE_OPS_FL_ENABLED)
6516 ftrace_shutdown(ops, 0);
6517 ops->flags |= FTRACE_OPS_FL_DELETED;
6518 ftrace_free_filter(ops);
6519 mutex_unlock(&ftrace_lock);
6520}
6521
6522static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
6523{
6524
6525 trace_create_file("available_filter_functions", TRACE_MODE_READ,
6526 d_tracer, NULL, &ftrace_avail_fops);
6527
6528 trace_create_file("enabled_functions", TRACE_MODE_READ,
6529 d_tracer, NULL, &ftrace_enabled_fops);
6530
6531 ftrace_create_filter_files(&global_ops, d_tracer);
6532
6533#ifdef CONFIG_FUNCTION_GRAPH_TRACER
6534 trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
6535 NULL,
6536 &ftrace_graph_fops);
6537 trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
6538 NULL,
6539 &ftrace_graph_notrace_fops);
6540#endif
6541
6542 return 0;
6543}
6544
6545static int ftrace_cmp_ips(const void *a, const void *b)
6546{
6547 const unsigned long *ipa = a;
6548 const unsigned long *ipb = b;
6549
6550 if (*ipa > *ipb)
6551 return 1;
6552 if (*ipa < *ipb)
6553 return -1;
6554 return 0;
6555}
6556
6557#ifdef CONFIG_FTRACE_SORT_STARTUP_TEST
6558static void test_is_sorted(unsigned long *start, unsigned long count)
6559{
6560 int i;
6561
6562 for (i = 1; i < count; i++) {
6563 if (WARN(start[i - 1] > start[i],
6564 "[%d] %pS at %lx is not sorted with %pS at %lx\n", i,
6565 (void *)start[i - 1], start[i - 1],
6566 (void *)start[i], start[i]))
6567 break;
6568 }
6569 if (i == count)
6570 pr_info("ftrace section at %px sorted properly\n", start);
6571}
6572#else
6573static void test_is_sorted(unsigned long *start, unsigned long count)
6574{
6575}
6576#endif
6577
6578static int ftrace_process_locs(struct module *mod,
6579 unsigned long *start,
6580 unsigned long *end)
6581{
6582 struct ftrace_page *start_pg;
6583 struct ftrace_page *pg;
6584 struct dyn_ftrace *rec;
6585 unsigned long count;
6586 unsigned long *p;
6587 unsigned long addr;
6588 unsigned long flags = 0;
6589 int ret = -ENOMEM;
6590
6591 count = end - start;
6592
6593 if (!count)
6594 return 0;
6595
6596
6597
6598
6599
6600
6601 if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) {
6602 sort(start, count, sizeof(*start),
6603 ftrace_cmp_ips, NULL);
6604 } else {
6605 test_is_sorted(start, count);
6606 }
6607
6608 start_pg = ftrace_allocate_pages(count);
6609 if (!start_pg)
6610 return -ENOMEM;
6611
6612 mutex_lock(&ftrace_lock);
6613
6614
6615
6616
6617
6618
6619 if (!mod) {
6620 WARN_ON(ftrace_pages || ftrace_pages_start);
6621
6622 ftrace_pages = ftrace_pages_start = start_pg;
6623 } else {
6624 if (!ftrace_pages)
6625 goto out;
6626
6627 if (WARN_ON(ftrace_pages->next)) {
6628
6629 while (ftrace_pages->next)
6630 ftrace_pages = ftrace_pages->next;
6631 }
6632
6633 ftrace_pages->next = start_pg;
6634 }
6635
6636 p = start;
6637 pg = start_pg;
6638 while (p < end) {
6639 unsigned long end_offset;
6640 addr = ftrace_call_adjust(*p++);
6641
6642
6643
6644
6645
6646
6647 if (!addr)
6648 continue;
6649
6650 end_offset = (pg->index+1) * sizeof(pg->records[0]);
6651 if (end_offset > PAGE_SIZE << pg->order) {
6652
6653 if (WARN_ON(!pg->next))
6654 break;
6655 pg = pg->next;
6656 }
6657
6658 rec = &pg->records[pg->index++];
6659 rec->ip = addr;
6660 }
6661
6662
6663 WARN_ON(pg->next);
6664
6665
6666 ftrace_pages = pg;
6667
6668
6669
6670
6671
6672
6673
6674
6675
6676 if (!mod)
6677 local_irq_save(flags);
6678 ftrace_update_code(mod, start_pg);
6679 if (!mod)
6680 local_irq_restore(flags);
6681 ret = 0;
6682 out:
6683 mutex_unlock(&ftrace_lock);
6684
6685 return ret;
6686}
6687
6688struct ftrace_mod_func {
6689 struct list_head list;
6690 char *name;
6691 unsigned long ip;
6692 unsigned int size;
6693};
6694
6695struct ftrace_mod_map {
6696 struct rcu_head rcu;
6697 struct list_head list;
6698 struct module *mod;
6699 unsigned long start_addr;
6700 unsigned long end_addr;
6701 struct list_head funcs;
6702 unsigned int num_funcs;
6703};
6704
6705static int ftrace_get_trampoline_kallsym(unsigned int symnum,
6706 unsigned long *value, char *type,
6707 char *name, char *module_name,
6708 int *exported)
6709{
6710 struct ftrace_ops *op;
6711
6712 list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
6713 if (!op->trampoline || symnum--)
6714 continue;
6715 *value = op->trampoline;
6716 *type = 't';
6717 strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
6718 strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
6719 *exported = 0;
6720 return 0;
6721 }
6722
6723 return -ERANGE;
6724}
6725
6726#ifdef CONFIG_MODULES
6727
6728#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
6729
6730static LIST_HEAD(ftrace_mod_maps);
6731
6732static int referenced_filters(struct dyn_ftrace *rec)
6733{
6734 struct ftrace_ops *ops;
6735 int cnt = 0;
6736
6737 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
6738 if (ops_references_rec(ops, rec)) {
6739 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
6740 continue;
6741 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
6742 continue;
6743 cnt++;
6744 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
6745 rec->flags |= FTRACE_FL_REGS;
6746 if (cnt == 1 && ops->trampoline)
6747 rec->flags |= FTRACE_FL_TRAMP;
6748 else
6749 rec->flags &= ~FTRACE_FL_TRAMP;
6750 }
6751 }
6752
6753 return cnt;
6754}
6755
6756static void
6757clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
6758{
6759 struct ftrace_func_entry *entry;
6760 struct dyn_ftrace *rec;
6761 int i;
6762
6763 if (ftrace_hash_empty(hash))
6764 return;
6765
6766 for (i = 0; i < pg->index; i++) {
6767 rec = &pg->records[i];
6768 entry = __ftrace_lookup_ip(hash, rec->ip);
6769
6770
6771
6772
6773
6774 if (entry)
6775 entry->ip = 0;
6776 }
6777}
6778
6779
6780static void clear_mod_from_hashes(struct ftrace_page *pg)
6781{
6782 struct trace_array *tr;
6783
6784 mutex_lock(&trace_types_lock);
6785 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6786 if (!tr->ops || !tr->ops->func_hash)
6787 continue;
6788 mutex_lock(&tr->ops->func_hash->regex_lock);
6789 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
6790 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
6791 mutex_unlock(&tr->ops->func_hash->regex_lock);
6792 }
6793 mutex_unlock(&trace_types_lock);
6794}
6795
6796static void ftrace_free_mod_map(struct rcu_head *rcu)
6797{
6798 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
6799 struct ftrace_mod_func *mod_func;
6800 struct ftrace_mod_func *n;
6801
6802
6803 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
6804 kfree(mod_func->name);
6805 list_del(&mod_func->list);
6806 kfree(mod_func);
6807 }
6808
6809 kfree(mod_map);
6810}
6811
6812void ftrace_release_mod(struct module *mod)
6813{
6814 struct ftrace_mod_map *mod_map;
6815 struct ftrace_mod_map *n;
6816 struct dyn_ftrace *rec;
6817 struct ftrace_page **last_pg;
6818 struct ftrace_page *tmp_page = NULL;
6819 struct ftrace_page *pg;
6820
6821 mutex_lock(&ftrace_lock);
6822
6823 if (ftrace_disabled)
6824 goto out_unlock;
6825
6826 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
6827 if (mod_map->mod == mod) {
6828 list_del_rcu(&mod_map->list);
6829 call_rcu(&mod_map->rcu, ftrace_free_mod_map);
6830 break;
6831 }
6832 }
6833
6834
6835
6836
6837
6838 last_pg = &ftrace_pages_start;
6839 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
6840 rec = &pg->records[0];
6841 if (within_module_core(rec->ip, mod) ||
6842 within_module_init(rec->ip, mod)) {
6843
6844
6845
6846
6847 if (WARN_ON(pg == ftrace_pages_start))
6848 goto out_unlock;
6849
6850
6851 if (pg == ftrace_pages)
6852 ftrace_pages = next_to_ftrace_page(last_pg);
6853
6854 ftrace_update_tot_cnt -= pg->index;
6855 *last_pg = pg->next;
6856
6857 pg->next = tmp_page;
6858 tmp_page = pg;
6859 } else
6860 last_pg = &pg->next;
6861 }
6862 out_unlock:
6863 mutex_unlock(&ftrace_lock);
6864
6865 for (pg = tmp_page; pg; pg = tmp_page) {
6866
6867
6868 clear_mod_from_hashes(pg);
6869
6870 if (pg->records) {
6871 free_pages((unsigned long)pg->records, pg->order);
6872 ftrace_number_of_pages -= 1 << pg->order;
6873 }
6874 tmp_page = pg->next;
6875 kfree(pg);
6876 ftrace_number_of_groups--;
6877 }
6878}
6879
6880void ftrace_module_enable(struct module *mod)
6881{
6882 struct dyn_ftrace *rec;
6883 struct ftrace_page *pg;
6884
6885 mutex_lock(&ftrace_lock);
6886
6887 if (ftrace_disabled)
6888 goto out_unlock;
6889
6890
6891
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903 if (ftrace_start_up)
6904 ftrace_arch_code_modify_prepare();
6905
6906 do_for_each_ftrace_rec(pg, rec) {
6907 int cnt;
6908
6909
6910
6911
6912
6913
6914 if (!within_module_core(rec->ip, mod) &&
6915 !within_module_init(rec->ip, mod))
6916 break;
6917
6918
6919 if (!test_for_valid_rec(rec)) {
6920
6921 rec->flags = FTRACE_FL_DISABLED;
6922 continue;
6923 }
6924
6925 cnt = 0;
6926
6927
6928
6929
6930
6931
6932
6933 if (ftrace_start_up)
6934 cnt += referenced_filters(rec);
6935
6936 rec->flags &= ~FTRACE_FL_DISABLED;
6937 rec->flags += cnt;
6938
6939 if (ftrace_start_up && cnt) {
6940 int failed = __ftrace_replace_code(rec, 1);
6941 if (failed) {
6942 ftrace_bug(failed, rec);
6943 goto out_loop;
6944 }
6945 }
6946
6947 } while_for_each_ftrace_rec();
6948
6949 out_loop:
6950 if (ftrace_start_up)
6951 ftrace_arch_code_modify_post_process();
6952
6953 out_unlock:
6954 mutex_unlock(&ftrace_lock);
6955
6956 process_cached_mods(mod->name);
6957}
6958
6959void ftrace_module_init(struct module *mod)
6960{
6961 int ret;
6962
6963 if (ftrace_disabled || !mod->num_ftrace_callsites)
6964 return;
6965
6966 ret = ftrace_process_locs(mod, mod->ftrace_callsites,
6967 mod->ftrace_callsites + mod->num_ftrace_callsites);
6968 if (ret)
6969 pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
6970 mod->name);
6971}
6972
6973static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6974 struct dyn_ftrace *rec)
6975{
6976 struct ftrace_mod_func *mod_func;
6977 unsigned long symsize;
6978 unsigned long offset;
6979 char str[KSYM_SYMBOL_LEN];
6980 char *modname;
6981 const char *ret;
6982
6983 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
6984 if (!ret)
6985 return;
6986
6987 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
6988 if (!mod_func)
6989 return;
6990
6991 mod_func->name = kstrdup(str, GFP_KERNEL);
6992 if (!mod_func->name) {
6993 kfree(mod_func);
6994 return;
6995 }
6996
6997 mod_func->ip = rec->ip - offset;
6998 mod_func->size = symsize;
6999
7000 mod_map->num_funcs++;
7001
7002 list_add_rcu(&mod_func->list, &mod_map->funcs);
7003}
7004
7005static struct ftrace_mod_map *
7006allocate_ftrace_mod_map(struct module *mod,
7007 unsigned long start, unsigned long end)
7008{
7009 struct ftrace_mod_map *mod_map;
7010
7011 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
7012 if (!mod_map)
7013 return NULL;
7014
7015 mod_map->mod = mod;
7016 mod_map->start_addr = start;
7017 mod_map->end_addr = end;
7018 mod_map->num_funcs = 0;
7019
7020 INIT_LIST_HEAD_RCU(&mod_map->funcs);
7021
7022 list_add_rcu(&mod_map->list, &ftrace_mod_maps);
7023
7024 return mod_map;
7025}
7026
7027static const char *
7028ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
7029 unsigned long addr, unsigned long *size,
7030 unsigned long *off, char *sym)
7031{
7032 struct ftrace_mod_func *found_func = NULL;
7033 struct ftrace_mod_func *mod_func;
7034
7035 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7036 if (addr >= mod_func->ip &&
7037 addr < mod_func->ip + mod_func->size) {
7038 found_func = mod_func;
7039 break;
7040 }
7041 }
7042
7043 if (found_func) {
7044 if (size)
7045 *size = found_func->size;
7046 if (off)
7047 *off = addr - found_func->ip;
7048 if (sym)
7049 strlcpy(sym, found_func->name, KSYM_NAME_LEN);
7050
7051 return found_func->name;
7052 }
7053
7054 return NULL;
7055}
7056
7057const char *
7058ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
7059 unsigned long *off, char **modname, char *sym)
7060{
7061 struct ftrace_mod_map *mod_map;
7062 const char *ret = NULL;
7063
7064
7065 preempt_disable();
7066 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7067 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
7068 if (ret) {
7069 if (modname)
7070 *modname = mod_map->mod->name;
7071 break;
7072 }
7073 }
7074 preempt_enable();
7075
7076 return ret;
7077}
7078
7079int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7080 char *type, char *name,
7081 char *module_name, int *exported)
7082{
7083 struct ftrace_mod_map *mod_map;
7084 struct ftrace_mod_func *mod_func;
7085 int ret;
7086
7087 preempt_disable();
7088 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7089
7090 if (symnum >= mod_map->num_funcs) {
7091 symnum -= mod_map->num_funcs;
7092 continue;
7093 }
7094
7095 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7096 if (symnum > 1) {
7097 symnum--;
7098 continue;
7099 }
7100
7101 *value = mod_func->ip;
7102 *type = 'T';
7103 strlcpy(name, mod_func->name, KSYM_NAME_LEN);
7104 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
7105 *exported = 1;
7106 preempt_enable();
7107 return 0;
7108 }
7109 WARN_ON(1);
7110 break;
7111 }
7112 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7113 module_name, exported);
7114 preempt_enable();
7115 return ret;
7116}
7117
7118#else
7119static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7120 struct dyn_ftrace *rec) { }
7121static inline struct ftrace_mod_map *
7122allocate_ftrace_mod_map(struct module *mod,
7123 unsigned long start, unsigned long end)
7124{
7125 return NULL;
7126}
7127int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7128 char *type, char *name, char *module_name,
7129 int *exported)
7130{
7131 int ret;
7132
7133 preempt_disable();
7134 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7135 module_name, exported);
7136 preempt_enable();
7137 return ret;
7138}
7139#endif
7140
7141struct ftrace_init_func {
7142 struct list_head list;
7143 unsigned long ip;
7144};
7145
7146
7147static void
7148clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
7149{
7150 struct ftrace_func_entry *entry;
7151
7152 entry = ftrace_lookup_ip(hash, func->ip);
7153
7154
7155
7156
7157
7158 if (entry)
7159 entry->ip = 0;
7160}
7161
7162static void
7163clear_func_from_hashes(struct ftrace_init_func *func)
7164{
7165 struct trace_array *tr;
7166
7167 mutex_lock(&trace_types_lock);
7168 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7169 if (!tr->ops || !tr->ops->func_hash)
7170 continue;
7171 mutex_lock(&tr->ops->func_hash->regex_lock);
7172 clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
7173 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
7174 mutex_unlock(&tr->ops->func_hash->regex_lock);
7175 }
7176 mutex_unlock(&trace_types_lock);
7177}
7178
7179static void add_to_clear_hash_list(struct list_head *clear_list,
7180 struct dyn_ftrace *rec)
7181{
7182 struct ftrace_init_func *func;
7183
7184 func = kmalloc(sizeof(*func), GFP_KERNEL);
7185 if (!func) {
7186 MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
7187 return;
7188 }
7189
7190 func->ip = rec->ip;
7191 list_add(&func->list, clear_list);
7192}
7193
7194void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
7195{
7196 unsigned long start = (unsigned long)(start_ptr);
7197 unsigned long end = (unsigned long)(end_ptr);
7198 struct ftrace_page **last_pg = &ftrace_pages_start;
7199 struct ftrace_page *pg;
7200 struct dyn_ftrace *rec;
7201 struct dyn_ftrace key;
7202 struct ftrace_mod_map *mod_map = NULL;
7203 struct ftrace_init_func *func, *func_next;
7204 struct list_head clear_hash;
7205
7206 INIT_LIST_HEAD(&clear_hash);
7207
7208 key.ip = start;
7209 key.flags = end;
7210
7211 mutex_lock(&ftrace_lock);
7212
7213
7214
7215
7216
7217
7218 if (mod && ftrace_ops_list != &ftrace_list_end)
7219 mod_map = allocate_ftrace_mod_map(mod, start, end);
7220
7221 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
7222 if (end < pg->records[0].ip ||
7223 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
7224 continue;
7225 again:
7226 rec = bsearch(&key, pg->records, pg->index,
7227 sizeof(struct dyn_ftrace),
7228 ftrace_cmp_recs);
7229 if (!rec)
7230 continue;
7231
7232
7233 add_to_clear_hash_list(&clear_hash, rec);
7234
7235 if (mod_map)
7236 save_ftrace_mod_rec(mod_map, rec);
7237
7238 pg->index--;
7239 ftrace_update_tot_cnt--;
7240 if (!pg->index) {
7241 *last_pg = pg->next;
7242 if (pg->records) {
7243 free_pages((unsigned long)pg->records, pg->order);
7244 ftrace_number_of_pages -= 1 << pg->order;
7245 }
7246 ftrace_number_of_groups--;
7247 kfree(pg);
7248 pg = container_of(last_pg, struct ftrace_page, next);
7249 if (!(*last_pg))
7250 ftrace_pages = pg;
7251 continue;
7252 }
7253 memmove(rec, rec + 1,
7254 (pg->index - (rec - pg->records)) * sizeof(*rec));
7255
7256 goto again;
7257 }
7258 mutex_unlock(&ftrace_lock);
7259
7260 list_for_each_entry_safe(func, func_next, &clear_hash, list) {
7261 clear_func_from_hashes(func);
7262 kfree(func);
7263 }
7264}
7265
7266void __init ftrace_free_init_mem(void)
7267{
7268 void *start = (void *)(&__init_begin);
7269 void *end = (void *)(&__init_end);
7270
7271 ftrace_boot_snapshot();
7272
7273 ftrace_free_mem(NULL, start, end);
7274}
7275
7276int __init __weak ftrace_dyn_arch_init(void)
7277{
7278 return 0;
7279}
7280
7281void __init ftrace_init(void)
7282{
7283 extern unsigned long __start_mcount_loc[];
7284 extern unsigned long __stop_mcount_loc[];
7285 unsigned long count, flags;
7286 int ret;
7287
7288 local_irq_save(flags);
7289 ret = ftrace_dyn_arch_init();
7290 local_irq_restore(flags);
7291 if (ret)
7292 goto failed;
7293
7294 count = __stop_mcount_loc - __start_mcount_loc;
7295 if (!count) {
7296 pr_info("ftrace: No functions to be traced?\n");
7297 goto failed;
7298 }
7299
7300 pr_info("ftrace: allocating %ld entries in %ld pages\n",
7301 count, count / ENTRIES_PER_PAGE + 1);
7302
7303 ret = ftrace_process_locs(NULL,
7304 __start_mcount_loc,
7305 __stop_mcount_loc);
7306 if (ret) {
7307 pr_warn("ftrace: failed to allocate entries for functions\n");
7308 goto failed;
7309 }
7310
7311 pr_info("ftrace: allocated %ld pages with %ld groups\n",
7312 ftrace_number_of_pages, ftrace_number_of_groups);
7313
7314 last_ftrace_enabled = ftrace_enabled = 1;
7315
7316 set_ftrace_early_filters();
7317
7318 return;
7319 failed:
7320 ftrace_disabled = 1;
7321}
7322
7323
7324void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
7325{
7326}
7327
7328static void ftrace_update_trampoline(struct ftrace_ops *ops)
7329{
7330 unsigned long trampoline = ops->trampoline;
7331
7332 arch_ftrace_update_trampoline(ops);
7333 if (ops->trampoline && ops->trampoline != trampoline &&
7334 (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
7335
7336 ftrace_add_trampoline_to_kallsyms(ops);
7337 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
7338 ops->trampoline, ops->trampoline_size, false,
7339 FTRACE_TRAMPOLINE_SYM);
7340
7341
7342
7343
7344 perf_event_text_poke((void *)ops->trampoline, NULL, 0,
7345 (void *)ops->trampoline,
7346 ops->trampoline_size);
7347 }
7348}
7349
7350void ftrace_init_trace_array(struct trace_array *tr)
7351{
7352 INIT_LIST_HEAD(&tr->func_probes);
7353 INIT_LIST_HEAD(&tr->mod_trace);
7354 INIT_LIST_HEAD(&tr->mod_notrace);
7355}
7356#else
7357
7358struct ftrace_ops global_ops = {
7359 .func = ftrace_stub,
7360 .flags = FTRACE_OPS_FL_INITIALIZED |
7361 FTRACE_OPS_FL_PID,
7362};
7363
7364static int __init ftrace_nodyn_init(void)
7365{
7366 ftrace_enabled = 1;
7367 return 0;
7368}
7369core_initcall(ftrace_nodyn_init);
7370
7371static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
7372static inline void ftrace_startup_all(int command) { }
7373
7374static void ftrace_update_trampoline(struct ftrace_ops *ops)
7375{
7376}
7377
7378#endif
7379
7380__init void ftrace_init_global_array_ops(struct trace_array *tr)
7381{
7382 tr->ops = &global_ops;
7383 tr->ops->private = tr;
7384 ftrace_init_trace_array(tr);
7385}
7386
7387void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
7388{
7389
7390 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
7391 if (WARN_ON(tr->ops->func != ftrace_stub))
7392 printk("ftrace ops had %pS for function\n",
7393 tr->ops->func);
7394 }
7395 tr->ops->func = func;
7396 tr->ops->private = tr;
7397}
7398
7399void ftrace_reset_array_ops(struct trace_array *tr)
7400{
7401 tr->ops->func = ftrace_stub;
7402}
7403
7404static nokprobe_inline void
7405__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7406 struct ftrace_ops *ignored, struct ftrace_regs *fregs)
7407{
7408 struct pt_regs *regs = ftrace_get_regs(fregs);
7409 struct ftrace_ops *op;
7410 int bit;
7411
7412
7413
7414
7415
7416
7417 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7418 if (bit < 0)
7419 return;
7420
7421 do_for_each_ftrace_op(op, ftrace_ops_list) {
7422
7423 if (op->flags & FTRACE_OPS_FL_STUB)
7424 continue;
7425
7426
7427
7428
7429
7430
7431
7432
7433
7434 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
7435 ftrace_ops_test(op, ip, regs)) {
7436 if (FTRACE_WARN_ON(!op->func)) {
7437 pr_warn("op=%p %pS\n", op, op);
7438 goto out;
7439 }
7440 op->func(ip, parent_ip, op, fregs);
7441 }
7442 } while_for_each_ftrace_op(op);
7443out:
7444 trace_clear_recursion(bit);
7445}
7446
7447
7448
7449
7450
7451
7452
7453
7454
7455
7456
7457
7458
7459
7460
7461
7462
7463#if ARCH_SUPPORTS_FTRACE_OPS
7464void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7465 struct ftrace_ops *op, struct ftrace_regs *fregs)
7466{
7467 __ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
7468}
7469#else
7470void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
7471{
7472 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
7473}
7474#endif
7475NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
7476
7477
7478
7479
7480
7481
7482static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
7483 struct ftrace_ops *op, struct ftrace_regs *fregs)
7484{
7485 int bit;
7486
7487 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7488 if (bit < 0)
7489 return;
7490
7491 if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
7492 op->func(ip, parent_ip, op, fregs);
7493
7494 trace_clear_recursion(bit);
7495}
7496NOKPROBE_SYMBOL(ftrace_ops_assist_func);
7497
7498
7499
7500
7501
7502
7503
7504
7505
7506
7507
7508
7509ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
7510{
7511
7512
7513
7514
7515 if (ops->flags & (FTRACE_OPS_FL_RECURSION |
7516 FTRACE_OPS_FL_RCU))
7517 return ftrace_ops_assist_func;
7518
7519 return ops->func;
7520}
7521
7522static void
7523ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
7524 struct task_struct *prev,
7525 struct task_struct *next,
7526 unsigned int prev_state)
7527{
7528 struct trace_array *tr = data;
7529 struct trace_pid_list *pid_list;
7530 struct trace_pid_list *no_pid_list;
7531
7532 pid_list = rcu_dereference_sched(tr->function_pids);
7533 no_pid_list = rcu_dereference_sched(tr->function_no_pids);
7534
7535 if (trace_ignore_this_task(pid_list, no_pid_list, next))
7536 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7537 FTRACE_PID_IGNORE);
7538 else
7539 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7540 next->pid);
7541}
7542
7543static void
7544ftrace_pid_follow_sched_process_fork(void *data,
7545 struct task_struct *self,
7546 struct task_struct *task)
7547{
7548 struct trace_pid_list *pid_list;
7549 struct trace_array *tr = data;
7550
7551 pid_list = rcu_dereference_sched(tr->function_pids);
7552 trace_filter_add_remove_task(pid_list, self, task);
7553
7554 pid_list = rcu_dereference_sched(tr->function_no_pids);
7555 trace_filter_add_remove_task(pid_list, self, task);
7556}
7557
7558static void
7559ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
7560{
7561 struct trace_pid_list *pid_list;
7562 struct trace_array *tr = data;
7563
7564 pid_list = rcu_dereference_sched(tr->function_pids);
7565 trace_filter_add_remove_task(pid_list, NULL, task);
7566
7567 pid_list = rcu_dereference_sched(tr->function_no_pids);
7568 trace_filter_add_remove_task(pid_list, NULL, task);
7569}
7570
7571void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
7572{
7573 if (enable) {
7574 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7575 tr);
7576 register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
7577 tr);
7578 } else {
7579 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7580 tr);
7581 unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
7582 tr);
7583 }
7584}
7585
7586static void clear_ftrace_pids(struct trace_array *tr, int type)
7587{
7588 struct trace_pid_list *pid_list;
7589 struct trace_pid_list *no_pid_list;
7590 int cpu;
7591
7592 pid_list = rcu_dereference_protected(tr->function_pids,
7593 lockdep_is_held(&ftrace_lock));
7594 no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7595 lockdep_is_held(&ftrace_lock));
7596
7597
7598 if (!pid_type_enabled(type, pid_list, no_pid_list))
7599 return;
7600
7601
7602 if (!still_need_pid_events(type, pid_list, no_pid_list)) {
7603 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7604 for_each_possible_cpu(cpu)
7605 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
7606 }
7607
7608 if (type & TRACE_PIDS)
7609 rcu_assign_pointer(tr->function_pids, NULL);
7610
7611 if (type & TRACE_NO_PIDS)
7612 rcu_assign_pointer(tr->function_no_pids, NULL);
7613
7614
7615 synchronize_rcu();
7616
7617 if ((type & TRACE_PIDS) && pid_list)
7618 trace_pid_list_free(pid_list);
7619
7620 if ((type & TRACE_NO_PIDS) && no_pid_list)
7621 trace_pid_list_free(no_pid_list);
7622}
7623
7624void ftrace_clear_pids(struct trace_array *tr)
7625{
7626 mutex_lock(&ftrace_lock);
7627
7628 clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
7629
7630 mutex_unlock(&ftrace_lock);
7631}
7632
7633static void ftrace_pid_reset(struct trace_array *tr, int type)
7634{
7635 mutex_lock(&ftrace_lock);
7636 clear_ftrace_pids(tr, type);
7637
7638 ftrace_update_pid_func();
7639 ftrace_startup_all(0);
7640
7641 mutex_unlock(&ftrace_lock);
7642}
7643
7644
7645#define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
7646
7647static void *fpid_start(struct seq_file *m, loff_t *pos)
7648 __acquires(RCU)
7649{
7650 struct trace_pid_list *pid_list;
7651 struct trace_array *tr = m->private;
7652
7653 mutex_lock(&ftrace_lock);
7654 rcu_read_lock_sched();
7655
7656 pid_list = rcu_dereference_sched(tr->function_pids);
7657
7658 if (!pid_list)
7659 return !(*pos) ? FTRACE_NO_PIDS : NULL;
7660
7661 return trace_pid_start(pid_list, pos);
7662}
7663
7664static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
7665{
7666 struct trace_array *tr = m->private;
7667 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
7668
7669 if (v == FTRACE_NO_PIDS) {
7670 (*pos)++;
7671 return NULL;
7672 }
7673 return trace_pid_next(pid_list, v, pos);
7674}
7675
7676static void fpid_stop(struct seq_file *m, void *p)
7677 __releases(RCU)
7678{
7679 rcu_read_unlock_sched();
7680 mutex_unlock(&ftrace_lock);
7681}
7682
7683static int fpid_show(struct seq_file *m, void *v)
7684{
7685 if (v == FTRACE_NO_PIDS) {
7686 seq_puts(m, "no pid\n");
7687 return 0;
7688 }
7689
7690 return trace_pid_show(m, v);
7691}
7692
7693static const struct seq_operations ftrace_pid_sops = {
7694 .start = fpid_start,
7695 .next = fpid_next,
7696 .stop = fpid_stop,
7697 .show = fpid_show,
7698};
7699
7700static void *fnpid_start(struct seq_file *m, loff_t *pos)
7701 __acquires(RCU)
7702{
7703 struct trace_pid_list *pid_list;
7704 struct trace_array *tr = m->private;
7705
7706 mutex_lock(&ftrace_lock);
7707 rcu_read_lock_sched();
7708
7709 pid_list = rcu_dereference_sched(tr->function_no_pids);
7710
7711 if (!pid_list)
7712 return !(*pos) ? FTRACE_NO_PIDS : NULL;
7713
7714 return trace_pid_start(pid_list, pos);
7715}
7716
7717static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
7718{
7719 struct trace_array *tr = m->private;
7720 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
7721
7722 if (v == FTRACE_NO_PIDS) {
7723 (*pos)++;
7724 return NULL;
7725 }
7726 return trace_pid_next(pid_list, v, pos);
7727}
7728
7729static const struct seq_operations ftrace_no_pid_sops = {
7730 .start = fnpid_start,
7731 .next = fnpid_next,
7732 .stop = fpid_stop,
7733 .show = fpid_show,
7734};
7735
7736static int pid_open(struct inode *inode, struct file *file, int type)
7737{
7738 const struct seq_operations *seq_ops;
7739 struct trace_array *tr = inode->i_private;
7740 struct seq_file *m;
7741 int ret = 0;
7742
7743 ret = tracing_check_open_get_tr(tr);
7744 if (ret)
7745 return ret;
7746
7747 if ((file->f_mode & FMODE_WRITE) &&
7748 (file->f_flags & O_TRUNC))
7749 ftrace_pid_reset(tr, type);
7750
7751 switch (type) {
7752 case TRACE_PIDS:
7753 seq_ops = &ftrace_pid_sops;
7754 break;
7755 case TRACE_NO_PIDS:
7756 seq_ops = &ftrace_no_pid_sops;
7757 break;
7758 default:
7759 trace_array_put(tr);
7760 WARN_ON_ONCE(1);
7761 return -EINVAL;
7762 }
7763
7764 ret = seq_open(file, seq_ops);
7765 if (ret < 0) {
7766 trace_array_put(tr);
7767 } else {
7768 m = file->private_data;
7769
7770 m->private = tr;
7771 }
7772
7773 return ret;
7774}
7775
7776static int
7777ftrace_pid_open(struct inode *inode, struct file *file)
7778{
7779 return pid_open(inode, file, TRACE_PIDS);
7780}
7781
7782static int
7783ftrace_no_pid_open(struct inode *inode, struct file *file)
7784{
7785 return pid_open(inode, file, TRACE_NO_PIDS);
7786}
7787
7788static void ignore_task_cpu(void *data)
7789{
7790 struct trace_array *tr = data;
7791 struct trace_pid_list *pid_list;
7792 struct trace_pid_list *no_pid_list;
7793
7794
7795
7796
7797
7798 pid_list = rcu_dereference_protected(tr->function_pids,
7799 mutex_is_locked(&ftrace_lock));
7800 no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7801 mutex_is_locked(&ftrace_lock));
7802
7803 if (trace_ignore_this_task(pid_list, no_pid_list, current))
7804 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7805 FTRACE_PID_IGNORE);
7806 else
7807 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7808 current->pid);
7809}
7810
7811static ssize_t
7812pid_write(struct file *filp, const char __user *ubuf,
7813 size_t cnt, loff_t *ppos, int type)
7814{
7815 struct seq_file *m = filp->private_data;
7816 struct trace_array *tr = m->private;
7817 struct trace_pid_list *filtered_pids;
7818 struct trace_pid_list *other_pids;
7819 struct trace_pid_list *pid_list;
7820 ssize_t ret;
7821
7822 if (!cnt)
7823 return 0;
7824
7825 mutex_lock(&ftrace_lock);
7826
7827 switch (type) {
7828 case TRACE_PIDS:
7829 filtered_pids = rcu_dereference_protected(tr->function_pids,
7830 lockdep_is_held(&ftrace_lock));
7831 other_pids = rcu_dereference_protected(tr->function_no_pids,
7832 lockdep_is_held(&ftrace_lock));
7833 break;
7834 case TRACE_NO_PIDS:
7835 filtered_pids = rcu_dereference_protected(tr->function_no_pids,
7836 lockdep_is_held(&ftrace_lock));
7837 other_pids = rcu_dereference_protected(tr->function_pids,
7838 lockdep_is_held(&ftrace_lock));
7839 break;
7840 default:
7841 ret = -EINVAL;
7842 WARN_ON_ONCE(1);
7843 goto out;
7844 }
7845
7846 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
7847 if (ret < 0)
7848 goto out;
7849
7850 switch (type) {
7851 case TRACE_PIDS:
7852 rcu_assign_pointer(tr->function_pids, pid_list);
7853 break;
7854 case TRACE_NO_PIDS:
7855 rcu_assign_pointer(tr->function_no_pids, pid_list);
7856 break;
7857 }
7858
7859
7860 if (filtered_pids) {
7861 synchronize_rcu();
7862 trace_pid_list_free(filtered_pids);
7863 } else if (pid_list && !other_pids) {
7864
7865 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7866 }
7867
7868
7869
7870
7871
7872
7873 on_each_cpu(ignore_task_cpu, tr, 1);
7874
7875 ftrace_update_pid_func();
7876 ftrace_startup_all(0);
7877 out:
7878 mutex_unlock(&ftrace_lock);
7879
7880 if (ret > 0)
7881 *ppos += ret;
7882
7883 return ret;
7884}
7885
7886static ssize_t
7887ftrace_pid_write(struct file *filp, const char __user *ubuf,
7888 size_t cnt, loff_t *ppos)
7889{
7890 return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
7891}
7892
7893static ssize_t
7894ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
7895 size_t cnt, loff_t *ppos)
7896{
7897 return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
7898}
7899
7900static int
7901ftrace_pid_release(struct inode *inode, struct file *file)
7902{
7903 struct trace_array *tr = inode->i_private;
7904
7905 trace_array_put(tr);
7906
7907 return seq_release(inode, file);
7908}
7909
7910static const struct file_operations ftrace_pid_fops = {
7911 .open = ftrace_pid_open,
7912 .write = ftrace_pid_write,
7913 .read = seq_read,
7914 .llseek = tracing_lseek,
7915 .release = ftrace_pid_release,
7916};
7917
7918static const struct file_operations ftrace_no_pid_fops = {
7919 .open = ftrace_no_pid_open,
7920 .write = ftrace_no_pid_write,
7921 .read = seq_read,
7922 .llseek = tracing_lseek,
7923 .release = ftrace_pid_release,
7924};
7925
7926void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7927{
7928 trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
7929 tr, &ftrace_pid_fops);
7930 trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
7931 d_tracer, tr, &ftrace_no_pid_fops);
7932}
7933
7934void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
7935 struct dentry *d_tracer)
7936{
7937
7938 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
7939
7940 ftrace_init_dyn_tracefs(d_tracer);
7941 ftrace_profile_tracefs(d_tracer);
7942}
7943
7944
7945
7946
7947
7948
7949
7950
7951void ftrace_kill(void)
7952{
7953 ftrace_disabled = 1;
7954 ftrace_enabled = 0;
7955 ftrace_trace_function = ftrace_stub;
7956}
7957
7958
7959
7960
7961
7962
7963int ftrace_is_dead(void)
7964{
7965 return ftrace_disabled;
7966}
7967
7968
7969
7970
7971
7972
7973
7974
7975
7976
7977
7978
7979int register_ftrace_function(struct ftrace_ops *ops)
7980{
7981 int ret;
7982
7983 ftrace_ops_init(ops);
7984
7985 mutex_lock(&ftrace_lock);
7986
7987 ret = ftrace_startup(ops, 0);
7988
7989 mutex_unlock(&ftrace_lock);
7990
7991 return ret;
7992}
7993EXPORT_SYMBOL_GPL(register_ftrace_function);
7994
7995
7996
7997
7998
7999
8000
8001int unregister_ftrace_function(struct ftrace_ops *ops)
8002{
8003 int ret;
8004
8005 mutex_lock(&ftrace_lock);
8006 ret = ftrace_shutdown(ops, 0);
8007 mutex_unlock(&ftrace_lock);
8008
8009 return ret;
8010}
8011EXPORT_SYMBOL_GPL(unregister_ftrace_function);
8012
8013static int symbols_cmp(const void *a, const void *b)
8014{
8015 const char **str_a = (const char **) a;
8016 const char **str_b = (const char **) b;
8017
8018 return strcmp(*str_a, *str_b);
8019}
8020
8021struct kallsyms_data {
8022 unsigned long *addrs;
8023 const char **syms;
8024 size_t cnt;
8025 size_t found;
8026};
8027
8028static int kallsyms_callback(void *data, const char *name,
8029 struct module *mod, unsigned long addr)
8030{
8031 struct kallsyms_data *args = data;
8032 const char **sym;
8033 int idx;
8034
8035 sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
8036 if (!sym)
8037 return 0;
8038
8039 idx = sym - args->syms;
8040 if (args->addrs[idx])
8041 return 0;
8042
8043 addr = ftrace_location(addr);
8044 if (!addr)
8045 return 0;
8046
8047 args->addrs[idx] = addr;
8048 args->found++;
8049 return args->found == args->cnt ? 1 : 0;
8050}
8051
8052
8053
8054
8055
8056
8057
8058
8059
8060
8061
8062
8063
8064
8065
8066
8067
8068int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
8069{
8070 struct kallsyms_data args;
8071 int err;
8072
8073 memset(addrs, 0, sizeof(*addrs) * cnt);
8074 args.addrs = addrs;
8075 args.syms = sorted_syms;
8076 args.cnt = cnt;
8077 args.found = 0;
8078 err = kallsyms_on_each_symbol(kallsyms_callback, &args);
8079 if (err < 0)
8080 return err;
8081 return args.found == args.cnt ? 0 : -ESRCH;
8082}
8083
8084#ifdef CONFIG_SYSCTL
8085
8086#ifdef CONFIG_DYNAMIC_FTRACE
8087static void ftrace_startup_sysctl(void)
8088{
8089 int command;
8090
8091 if (unlikely(ftrace_disabled))
8092 return;
8093
8094
8095 saved_ftrace_func = NULL;
8096
8097 if (ftrace_start_up) {
8098 command = FTRACE_UPDATE_CALLS;
8099 if (ftrace_graph_active)
8100 command |= FTRACE_START_FUNC_RET;
8101 ftrace_startup_enable(command);
8102 }
8103}
8104
8105static void ftrace_shutdown_sysctl(void)
8106{
8107 int command;
8108
8109 if (unlikely(ftrace_disabled))
8110 return;
8111
8112
8113 if (ftrace_start_up) {
8114 command = FTRACE_DISABLE_CALLS;
8115 if (ftrace_graph_active)
8116 command |= FTRACE_STOP_FUNC_RET;
8117 ftrace_run_update_code(command);
8118 }
8119}
8120#else
8121# define ftrace_startup_sysctl() do { } while (0)
8122# define ftrace_shutdown_sysctl() do { } while (0)
8123#endif
8124
8125static bool is_permanent_ops_registered(void)
8126{
8127 struct ftrace_ops *op;
8128
8129 do_for_each_ftrace_op(op, ftrace_ops_list) {
8130 if (op->flags & FTRACE_OPS_FL_PERMANENT)
8131 return true;
8132 } while_for_each_ftrace_op(op);
8133
8134 return false;
8135}
8136
8137static int
8138ftrace_enable_sysctl(struct ctl_table *table, int write,
8139 void *buffer, size_t *lenp, loff_t *ppos)
8140{
8141 int ret = -ENODEV;
8142
8143 mutex_lock(&ftrace_lock);
8144
8145 if (unlikely(ftrace_disabled))
8146 goto out;
8147
8148 ret = proc_dointvec(table, write, buffer, lenp, ppos);
8149
8150 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
8151 goto out;
8152
8153 if (ftrace_enabled) {
8154
8155
8156 if (rcu_dereference_protected(ftrace_ops_list,
8157 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
8158 update_ftrace_function();
8159
8160 ftrace_startup_sysctl();
8161
8162 } else {
8163 if (is_permanent_ops_registered()) {
8164 ftrace_enabled = true;
8165 ret = -EBUSY;
8166 goto out;
8167 }
8168
8169
8170 ftrace_trace_function = ftrace_stub;
8171
8172 ftrace_shutdown_sysctl();
8173 }
8174
8175 last_ftrace_enabled = !!ftrace_enabled;
8176 out:
8177 mutex_unlock(&ftrace_lock);
8178 return ret;
8179}
8180
8181static struct ctl_table ftrace_sysctls[] = {
8182 {
8183 .procname = "ftrace_enabled",
8184 .data = &ftrace_enabled,
8185 .maxlen = sizeof(int),
8186 .mode = 0644,
8187 .proc_handler = ftrace_enable_sysctl,
8188 },
8189 {}
8190};
8191
8192static int __init ftrace_sysctl_init(void)
8193{
8194 register_sysctl_init("kernel", ftrace_sysctls);
8195 return 0;
8196}
8197late_initcall(ftrace_sysctl_init);
8198#endif
8199