1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/sched/task.h>
19#include <linux/kallsyms.h>
20#include <linux/seq_file.h>
21#include <linux/suspend.h>
22#include <linux/tracefs.h>
23#include <linux/hardirq.h>
24#include <linux/kthread.h>
25#include <linux/uaccess.h>
26#include <linux/bsearch.h>
27#include <linux/module.h>
28#include <linux/ftrace.h>
29#include <linux/sysctl.h>
30#include <linux/slab.h>
31#include <linux/ctype.h>
32#include <linux/sort.h>
33#include <linux/list.h>
34#include <linux/hash.h>
35#include <linux/rcupdate.h>
36#include <linux/kprobes.h>
37
38#include <trace/events/sched.h>
39
40#include <asm/sections.h>
41#include <asm/setup.h>
42
43#include "trace_output.h"
44#include "trace_stat.h"
45
46#define FTRACE_WARN_ON(cond) \
47 ({ \
48 int ___r = cond; \
49 if (WARN_ON(___r)) \
50 ftrace_kill(); \
51 ___r; \
52 })
53
54#define FTRACE_WARN_ON_ONCE(cond) \
55 ({ \
56 int ___r = cond; \
57 if (WARN_ON_ONCE(___r)) \
58 ftrace_kill(); \
59 ___r; \
60 })
61
62
63#define FTRACE_HASH_BITS 7
64#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
65#define FTRACE_HASH_DEFAULT_BITS 10
66#define FTRACE_HASH_MAX_BITS 12
67
68#ifdef CONFIG_DYNAMIC_FTRACE
69#define INIT_OPS_HASH(opsname) \
70 .func_hash = &opsname.local_hash, \
71 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
72#define ASSIGN_OPS_HASH(opsname, val) \
73 .func_hash = val, \
74 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
75#else
76#define INIT_OPS_HASH(opsname)
77#define ASSIGN_OPS_HASH(opsname, val)
78#endif
79
80static struct ftrace_ops ftrace_list_end __read_mostly = {
81 .func = ftrace_stub,
82 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
83 INIT_OPS_HASH(ftrace_list_end)
84};
85
86
87int ftrace_enabled __read_mostly;
88static int last_ftrace_enabled;
89
90
91struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
92
93static struct ftrace_ops *set_function_trace_op;
94
95static bool ftrace_pids_enabled(struct ftrace_ops *ops)
96{
97 struct trace_array *tr;
98
99 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
100 return false;
101
102 tr = ops->private;
103
104 return tr->function_pids != NULL;
105}
106
107static void ftrace_update_trampoline(struct ftrace_ops *ops);
108
109
110
111
112
113static int ftrace_disabled __read_mostly;
114
115static DEFINE_MUTEX(ftrace_lock);
116
117static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
118ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
119static struct ftrace_ops global_ops;
120
121#if ARCH_SUPPORTS_FTRACE_OPS
122static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
123 struct ftrace_ops *op, struct pt_regs *regs);
124#else
125
126static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
127#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
128#endif
129
130
131
132
133
134
135
136
137
138
139#define do_for_each_ftrace_op(op, list) \
140 op = rcu_dereference_raw_notrace(list); \
141 do
142
143
144
145
146#define while_for_each_ftrace_op(op) \
147 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
148 unlikely((op) != &ftrace_list_end))
149
150static inline void ftrace_ops_init(struct ftrace_ops *ops)
151{
152#ifdef CONFIG_DYNAMIC_FTRACE
153 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
154 mutex_init(&ops->local_hash.regex_lock);
155 ops->func_hash = &ops->local_hash;
156 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
157 }
158#endif
159}
160
161
162
163
164
165
166int ftrace_nr_registered_ops(void)
167{
168 struct ftrace_ops *ops;
169 int cnt = 0;
170
171 mutex_lock(&ftrace_lock);
172
173 for (ops = rcu_dereference_protected(ftrace_ops_list,
174 lockdep_is_held(&ftrace_lock));
175 ops != &ftrace_list_end;
176 ops = rcu_dereference_protected(ops->next,
177 lockdep_is_held(&ftrace_lock)))
178 cnt++;
179
180 mutex_unlock(&ftrace_lock);
181
182 return cnt;
183}
184
185static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
186 struct ftrace_ops *op, struct pt_regs *regs)
187{
188 struct trace_array *tr = op->private;
189
190 if (tr && this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid))
191 return;
192
193 op->saved_func(ip, parent_ip, op, regs);
194}
195
196static void ftrace_sync_ipi(void *data)
197{
198
199 smp_rmb();
200}
201
202#ifdef CONFIG_FUNCTION_GRAPH_TRACER
203static void update_function_graph_func(void);
204
205
206static bool fgraph_sleep_time = true;
207static bool fgraph_graph_time = true;
208
209#else
210static inline void update_function_graph_func(void) { }
211#endif
212
213
214static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
215{
216
217
218
219
220 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
221 FTRACE_FORCE_LIST_FUNC)
222 return ftrace_ops_list_func;
223
224 return ftrace_ops_get_func(ops);
225}
226
227static void update_ftrace_function(void)
228{
229 ftrace_func_t func;
230
231
232
233
234
235
236 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
237 lockdep_is_held(&ftrace_lock));
238
239
240 if (set_function_trace_op == &ftrace_list_end) {
241 func = ftrace_stub;
242
243
244
245
246
247
248 } else if (rcu_dereference_protected(ftrace_ops_list->next,
249 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
250 func = ftrace_ops_get_list_func(ftrace_ops_list);
251
252 } else {
253
254 set_function_trace_op = &ftrace_list_end;
255 func = ftrace_ops_list_func;
256 }
257
258 update_function_graph_func();
259
260
261 if (ftrace_trace_function == func)
262 return;
263
264
265
266
267
268 if (func == ftrace_ops_list_func) {
269 ftrace_trace_function = func;
270
271
272
273
274 return;
275 }
276
277#ifndef CONFIG_DYNAMIC_FTRACE
278
279
280
281
282
283
284
285
286
287
288 ftrace_trace_function = ftrace_ops_list_func;
289
290
291
292
293 synchronize_rcu_tasks_rude();
294
295 function_trace_op = set_function_trace_op;
296
297 smp_wmb();
298
299 smp_call_function(ftrace_sync_ipi, NULL, 1);
300
301#endif
302
303 ftrace_trace_function = func;
304}
305
306int using_ftrace_ops_list_func(void)
307{
308 return ftrace_trace_function == ftrace_ops_list_func;
309}
310
311static void add_ftrace_ops(struct ftrace_ops __rcu **list,
312 struct ftrace_ops *ops)
313{
314 rcu_assign_pointer(ops->next, *list);
315
316
317
318
319
320
321
322 rcu_assign_pointer(*list, ops);
323}
324
325static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
326 struct ftrace_ops *ops)
327{
328 struct ftrace_ops **p;
329
330
331
332
333
334 if (rcu_dereference_protected(*list,
335 lockdep_is_held(&ftrace_lock)) == ops &&
336 rcu_dereference_protected(ops->next,
337 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
338 *list = &ftrace_list_end;
339 return 0;
340 }
341
342 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
343 if (*p == ops)
344 break;
345
346 if (*p != ops)
347 return -1;
348
349 *p = (*p)->next;
350 return 0;
351}
352
353static void ftrace_update_trampoline(struct ftrace_ops *ops);
354
355static int __register_ftrace_function(struct ftrace_ops *ops)
356{
357 if (ops->flags & FTRACE_OPS_FL_DELETED)
358 return -EINVAL;
359
360 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
361 return -EBUSY;
362
363#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
364
365
366
367
368
369 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
370 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
371 return -EINVAL;
372
373 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
374 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
375#endif
376 if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
377 return -EBUSY;
378
379 if (!core_kernel_data((unsigned long)ops))
380 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
381
382 add_ftrace_ops(&ftrace_ops_list, ops);
383
384
385 ops->saved_func = ops->func;
386
387 if (ftrace_pids_enabled(ops))
388 ops->func = ftrace_pid_func;
389
390 ftrace_update_trampoline(ops);
391
392 if (ftrace_enabled)
393 update_ftrace_function();
394
395 return 0;
396}
397
398static int __unregister_ftrace_function(struct ftrace_ops *ops)
399{
400 int ret;
401
402 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
403 return -EBUSY;
404
405 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
406
407 if (ret < 0)
408 return ret;
409
410 if (ftrace_enabled)
411 update_ftrace_function();
412
413 ops->func = ops->saved_func;
414
415 return 0;
416}
417
418static void ftrace_update_pid_func(void)
419{
420 struct ftrace_ops *op;
421
422
423 if (ftrace_trace_function == ftrace_stub)
424 return;
425
426 do_for_each_ftrace_op(op, ftrace_ops_list) {
427 if (op->flags & FTRACE_OPS_FL_PID) {
428 op->func = ftrace_pids_enabled(op) ?
429 ftrace_pid_func : op->saved_func;
430 ftrace_update_trampoline(op);
431 }
432 } while_for_each_ftrace_op(op);
433
434 update_ftrace_function();
435}
436
437#ifdef CONFIG_FUNCTION_PROFILER
438struct ftrace_profile {
439 struct hlist_node node;
440 unsigned long ip;
441 unsigned long counter;
442#ifdef CONFIG_FUNCTION_GRAPH_TRACER
443 unsigned long long time;
444 unsigned long long time_squared;
445#endif
446};
447
448struct ftrace_profile_page {
449 struct ftrace_profile_page *next;
450 unsigned long index;
451 struct ftrace_profile records[];
452};
453
454struct ftrace_profile_stat {
455 atomic_t disabled;
456 struct hlist_head *hash;
457 struct ftrace_profile_page *pages;
458 struct ftrace_profile_page *start;
459 struct tracer_stat stat;
460};
461
462#define PROFILE_RECORDS_SIZE \
463 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
464
465#define PROFILES_PER_PAGE \
466 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
467
468static int ftrace_profile_enabled __read_mostly;
469
470
471static DEFINE_MUTEX(ftrace_profile_lock);
472
473static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
474
475#define FTRACE_PROFILE_HASH_BITS 10
476#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
477
478static void *
479function_stat_next(void *v, int idx)
480{
481 struct ftrace_profile *rec = v;
482 struct ftrace_profile_page *pg;
483
484 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
485
486 again:
487 if (idx != 0)
488 rec++;
489
490 if ((void *)rec >= (void *)&pg->records[pg->index]) {
491 pg = pg->next;
492 if (!pg)
493 return NULL;
494 rec = &pg->records[0];
495 if (!rec->counter)
496 goto again;
497 }
498
499 return rec;
500}
501
502static void *function_stat_start(struct tracer_stat *trace)
503{
504 struct ftrace_profile_stat *stat =
505 container_of(trace, struct ftrace_profile_stat, stat);
506
507 if (!stat || !stat->start)
508 return NULL;
509
510 return function_stat_next(&stat->start->records[0], 0);
511}
512
513#ifdef CONFIG_FUNCTION_GRAPH_TRACER
514
515static int function_stat_cmp(void *p1, void *p2)
516{
517 struct ftrace_profile *a = p1;
518 struct ftrace_profile *b = p2;
519
520 if (a->time < b->time)
521 return -1;
522 if (a->time > b->time)
523 return 1;
524 else
525 return 0;
526}
527#else
528
529static int function_stat_cmp(void *p1, void *p2)
530{
531 struct ftrace_profile *a = p1;
532 struct ftrace_profile *b = p2;
533
534 if (a->counter < b->counter)
535 return -1;
536 if (a->counter > b->counter)
537 return 1;
538 else
539 return 0;
540}
541#endif
542
543static int function_stat_headers(struct seq_file *m)
544{
545#ifdef CONFIG_FUNCTION_GRAPH_TRACER
546 seq_puts(m, " Function "
547 "Hit Time Avg s^2\n"
548 " -------- "
549 "--- ---- --- ---\n");
550#else
551 seq_puts(m, " Function Hit\n"
552 " -------- ---\n");
553#endif
554 return 0;
555}
556
557static int function_stat_show(struct seq_file *m, void *v)
558{
559 struct ftrace_profile *rec = v;
560 char str[KSYM_SYMBOL_LEN];
561 int ret = 0;
562#ifdef CONFIG_FUNCTION_GRAPH_TRACER
563 static struct trace_seq s;
564 unsigned long long avg;
565 unsigned long long stddev;
566#endif
567 mutex_lock(&ftrace_profile_lock);
568
569
570 if (unlikely(rec->counter == 0)) {
571 ret = -EBUSY;
572 goto out;
573 }
574
575#ifdef CONFIG_FUNCTION_GRAPH_TRACER
576 avg = rec->time;
577 do_div(avg, rec->counter);
578 if (tracing_thresh && (avg < tracing_thresh))
579 goto out;
580#endif
581
582 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
583 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
584
585#ifdef CONFIG_FUNCTION_GRAPH_TRACER
586 seq_puts(m, " ");
587
588
589 if (rec->counter <= 1)
590 stddev = 0;
591 else {
592
593
594
595
596 stddev = rec->counter * rec->time_squared -
597 rec->time * rec->time;
598
599
600
601
602
603 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
604 }
605
606 trace_seq_init(&s);
607 trace_print_graph_duration(rec->time, &s);
608 trace_seq_puts(&s, " ");
609 trace_print_graph_duration(avg, &s);
610 trace_seq_puts(&s, " ");
611 trace_print_graph_duration(stddev, &s);
612 trace_print_seq(m, &s);
613#endif
614 seq_putc(m, '\n');
615out:
616 mutex_unlock(&ftrace_profile_lock);
617
618 return ret;
619}
620
621static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
622{
623 struct ftrace_profile_page *pg;
624
625 pg = stat->pages = stat->start;
626
627 while (pg) {
628 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
629 pg->index = 0;
630 pg = pg->next;
631 }
632
633 memset(stat->hash, 0,
634 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
635}
636
637int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
638{
639 struct ftrace_profile_page *pg;
640 int functions;
641 int pages;
642 int i;
643
644
645 if (stat->pages)
646 return 0;
647
648 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
649 if (!stat->pages)
650 return -ENOMEM;
651
652#ifdef CONFIG_DYNAMIC_FTRACE
653 functions = ftrace_update_tot_cnt;
654#else
655
656
657
658
659
660
661
662 functions = 20000;
663#endif
664
665 pg = stat->start = stat->pages;
666
667 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
668
669 for (i = 1; i < pages; i++) {
670 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
671 if (!pg->next)
672 goto out_free;
673 pg = pg->next;
674 }
675
676 return 0;
677
678 out_free:
679 pg = stat->start;
680 while (pg) {
681 unsigned long tmp = (unsigned long)pg;
682
683 pg = pg->next;
684 free_page(tmp);
685 }
686
687 stat->pages = NULL;
688 stat->start = NULL;
689
690 return -ENOMEM;
691}
692
693static int ftrace_profile_init_cpu(int cpu)
694{
695 struct ftrace_profile_stat *stat;
696 int size;
697
698 stat = &per_cpu(ftrace_profile_stats, cpu);
699
700 if (stat->hash) {
701
702 ftrace_profile_reset(stat);
703 return 0;
704 }
705
706
707
708
709
710 size = FTRACE_PROFILE_HASH_SIZE;
711
712 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
713
714 if (!stat->hash)
715 return -ENOMEM;
716
717
718 if (ftrace_profile_pages_init(stat) < 0) {
719 kfree(stat->hash);
720 stat->hash = NULL;
721 return -ENOMEM;
722 }
723
724 return 0;
725}
726
727static int ftrace_profile_init(void)
728{
729 int cpu;
730 int ret = 0;
731
732 for_each_possible_cpu(cpu) {
733 ret = ftrace_profile_init_cpu(cpu);
734 if (ret)
735 break;
736 }
737
738 return ret;
739}
740
741
742static struct ftrace_profile *
743ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
744{
745 struct ftrace_profile *rec;
746 struct hlist_head *hhd;
747 unsigned long key;
748
749 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
750 hhd = &stat->hash[key];
751
752 if (hlist_empty(hhd))
753 return NULL;
754
755 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
756 if (rec->ip == ip)
757 return rec;
758 }
759
760 return NULL;
761}
762
763static void ftrace_add_profile(struct ftrace_profile_stat *stat,
764 struct ftrace_profile *rec)
765{
766 unsigned long key;
767
768 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
769 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
770}
771
772
773
774
775static struct ftrace_profile *
776ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
777{
778 struct ftrace_profile *rec = NULL;
779
780
781 if (atomic_inc_return(&stat->disabled) != 1)
782 goto out;
783
784
785
786
787
788 rec = ftrace_find_profiled_func(stat, ip);
789 if (rec)
790 goto out;
791
792 if (stat->pages->index == PROFILES_PER_PAGE) {
793 if (!stat->pages->next)
794 goto out;
795 stat->pages = stat->pages->next;
796 }
797
798 rec = &stat->pages->records[stat->pages->index++];
799 rec->ip = ip;
800 ftrace_add_profile(stat, rec);
801
802 out:
803 atomic_dec(&stat->disabled);
804
805 return rec;
806}
807
808static void
809function_profile_call(unsigned long ip, unsigned long parent_ip,
810 struct ftrace_ops *ops, struct pt_regs *regs)
811{
812 struct ftrace_profile_stat *stat;
813 struct ftrace_profile *rec;
814 unsigned long flags;
815
816 if (!ftrace_profile_enabled)
817 return;
818
819 local_irq_save(flags);
820
821 stat = this_cpu_ptr(&ftrace_profile_stats);
822 if (!stat->hash || !ftrace_profile_enabled)
823 goto out;
824
825 rec = ftrace_find_profiled_func(stat, ip);
826 if (!rec) {
827 rec = ftrace_profile_alloc(stat, ip);
828 if (!rec)
829 goto out;
830 }
831
832 rec->counter++;
833 out:
834 local_irq_restore(flags);
835}
836
837#ifdef CONFIG_FUNCTION_GRAPH_TRACER
838static int profile_graph_entry(struct ftrace_graph_ent *trace)
839{
840 int index = trace->depth;
841
842 function_profile_call(trace->func, 0, NULL, NULL);
843
844
845 if (!current->ret_stack)
846 return 0;
847
848 if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
849 current->ret_stack[index].subtime = 0;
850
851 return 1;
852}
853
854static void profile_graph_return(struct ftrace_graph_ret *trace)
855{
856 struct ftrace_profile_stat *stat;
857 unsigned long long calltime;
858 struct ftrace_profile *rec;
859 unsigned long flags;
860
861 local_irq_save(flags);
862 stat = this_cpu_ptr(&ftrace_profile_stats);
863 if (!stat->hash || !ftrace_profile_enabled)
864 goto out;
865
866
867 if (!trace->calltime)
868 goto out;
869
870 calltime = trace->rettime - trace->calltime;
871
872 if (!fgraph_graph_time) {
873 int index;
874
875 index = trace->depth;
876
877
878 if (index)
879 current->ret_stack[index - 1].subtime += calltime;
880
881 if (current->ret_stack[index].subtime < calltime)
882 calltime -= current->ret_stack[index].subtime;
883 else
884 calltime = 0;
885 }
886
887 rec = ftrace_find_profiled_func(stat, trace->func);
888 if (rec) {
889 rec->time += calltime;
890 rec->time_squared += calltime * calltime;
891 }
892
893 out:
894 local_irq_restore(flags);
895}
896
897static int register_ftrace_profiler(void)
898{
899 return register_ftrace_graph(&profile_graph_return,
900 &profile_graph_entry);
901}
902
903static void unregister_ftrace_profiler(void)
904{
905 unregister_ftrace_graph();
906}
907#else
908static struct ftrace_ops ftrace_profile_ops __read_mostly = {
909 .func = function_profile_call,
910 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
911 INIT_OPS_HASH(ftrace_profile_ops)
912};
913
914static int register_ftrace_profiler(void)
915{
916 return register_ftrace_function(&ftrace_profile_ops);
917}
918
919static void unregister_ftrace_profiler(void)
920{
921 unregister_ftrace_function(&ftrace_profile_ops);
922}
923#endif
924
925static ssize_t
926ftrace_profile_write(struct file *filp, const char __user *ubuf,
927 size_t cnt, loff_t *ppos)
928{
929 unsigned long val;
930 int ret;
931
932 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
933 if (ret)
934 return ret;
935
936 val = !!val;
937
938 mutex_lock(&ftrace_profile_lock);
939 if (ftrace_profile_enabled ^ val) {
940 if (val) {
941 ret = ftrace_profile_init();
942 if (ret < 0) {
943 cnt = ret;
944 goto out;
945 }
946
947 ret = register_ftrace_profiler();
948 if (ret < 0) {
949 cnt = ret;
950 goto out;
951 }
952 ftrace_profile_enabled = 1;
953 } else {
954 ftrace_profile_enabled = 0;
955
956
957
958
959 unregister_ftrace_profiler();
960 }
961 }
962 out:
963 mutex_unlock(&ftrace_profile_lock);
964
965 *ppos += cnt;
966
967 return cnt;
968}
969
970static ssize_t
971ftrace_profile_read(struct file *filp, char __user *ubuf,
972 size_t cnt, loff_t *ppos)
973{
974 char buf[64];
975 int r;
976
977 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
978 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
979}
980
981static const struct file_operations ftrace_profile_fops = {
982 .open = tracing_open_generic,
983 .read = ftrace_profile_read,
984 .write = ftrace_profile_write,
985 .llseek = default_llseek,
986};
987
988
989static struct tracer_stat function_stats __initdata = {
990 .name = "functions",
991 .stat_start = function_stat_start,
992 .stat_next = function_stat_next,
993 .stat_cmp = function_stat_cmp,
994 .stat_headers = function_stat_headers,
995 .stat_show = function_stat_show
996};
997
998static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
999{
1000 struct ftrace_profile_stat *stat;
1001 struct dentry *entry;
1002 char *name;
1003 int ret;
1004 int cpu;
1005
1006 for_each_possible_cpu(cpu) {
1007 stat = &per_cpu(ftrace_profile_stats, cpu);
1008
1009 name = kasprintf(GFP_KERNEL, "function%d", cpu);
1010 if (!name) {
1011
1012
1013
1014
1015 WARN(1,
1016 "Could not allocate stat file for cpu %d\n",
1017 cpu);
1018 return;
1019 }
1020 stat->stat = function_stats;
1021 stat->stat.name = name;
1022 ret = register_stat_tracer(&stat->stat);
1023 if (ret) {
1024 WARN(1,
1025 "Could not register function stat for cpu %d\n",
1026 cpu);
1027 kfree(name);
1028 return;
1029 }
1030 }
1031
1032 entry = tracefs_create_file("function_profile_enabled", 0644,
1033 d_tracer, NULL, &ftrace_profile_fops);
1034 if (!entry)
1035 pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
1036}
1037
1038#else
1039static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1040{
1041}
1042#endif
1043
1044static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1045
1046#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1047static int ftrace_graph_active;
1048#else
1049# define ftrace_graph_active 0
1050#endif
1051
1052#ifdef CONFIG_DYNAMIC_FTRACE
1053
1054static struct ftrace_ops *removed_ops;
1055
1056
1057
1058
1059
1060static bool update_all_ops;
1061
1062#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1063# error Dynamic ftrace depends on MCOUNT_RECORD
1064#endif
1065
1066struct ftrace_func_probe {
1067 struct ftrace_probe_ops *probe_ops;
1068 struct ftrace_ops ops;
1069 struct trace_array *tr;
1070 struct list_head list;
1071 void *data;
1072 int ref;
1073};
1074
1075
1076
1077
1078
1079
1080
1081static const struct hlist_head empty_buckets[1];
1082static const struct ftrace_hash empty_hash = {
1083 .buckets = (struct hlist_head *)empty_buckets,
1084};
1085#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1086
1087static struct ftrace_ops global_ops = {
1088 .func = ftrace_stub,
1089 .local_hash.notrace_hash = EMPTY_HASH,
1090 .local_hash.filter_hash = EMPTY_HASH,
1091 INIT_OPS_HASH(global_ops)
1092 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
1093 FTRACE_OPS_FL_INITIALIZED |
1094 FTRACE_OPS_FL_PID,
1095};
1096
1097
1098
1099
1100struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1101{
1102 struct ftrace_ops *op = NULL;
1103
1104
1105
1106
1107
1108 preempt_disable_notrace();
1109
1110 do_for_each_ftrace_op(op, ftrace_ops_list) {
1111
1112
1113
1114
1115
1116 if (op->trampoline && op->trampoline_size)
1117 if (addr >= op->trampoline &&
1118 addr < op->trampoline + op->trampoline_size) {
1119 preempt_enable_notrace();
1120 return op;
1121 }
1122 } while_for_each_ftrace_op(op);
1123 preempt_enable_notrace();
1124
1125 return NULL;
1126}
1127
1128
1129
1130
1131
1132
1133
1134bool is_ftrace_trampoline(unsigned long addr)
1135{
1136 return ftrace_ops_trampoline(addr) != NULL;
1137}
1138
1139struct ftrace_page {
1140 struct ftrace_page *next;
1141 struct dyn_ftrace *records;
1142 int index;
1143 int size;
1144};
1145
1146#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1147#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1148
1149
1150#define NR_TO_INIT 10000
1151
1152static struct ftrace_page *ftrace_pages_start;
1153static struct ftrace_page *ftrace_pages;
1154
1155static __always_inline unsigned long
1156ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1157{
1158 if (hash->size_bits > 0)
1159 return hash_long(ip, hash->size_bits);
1160
1161 return 0;
1162}
1163
1164
1165static __always_inline struct ftrace_func_entry *
1166__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1167{
1168 unsigned long key;
1169 struct ftrace_func_entry *entry;
1170 struct hlist_head *hhd;
1171
1172 key = ftrace_hash_key(hash, ip);
1173 hhd = &hash->buckets[key];
1174
1175 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1176 if (entry->ip == ip)
1177 return entry;
1178 }
1179 return NULL;
1180}
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192struct ftrace_func_entry *
1193ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1194{
1195 if (ftrace_hash_empty(hash))
1196 return NULL;
1197
1198 return __ftrace_lookup_ip(hash, ip);
1199}
1200
1201static void __add_hash_entry(struct ftrace_hash *hash,
1202 struct ftrace_func_entry *entry)
1203{
1204 struct hlist_head *hhd;
1205 unsigned long key;
1206
1207 key = ftrace_hash_key(hash, entry->ip);
1208 hhd = &hash->buckets[key];
1209 hlist_add_head(&entry->hlist, hhd);
1210 hash->count++;
1211}
1212
1213static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1214{
1215 struct ftrace_func_entry *entry;
1216
1217 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1218 if (!entry)
1219 return -ENOMEM;
1220
1221 entry->ip = ip;
1222 __add_hash_entry(hash, entry);
1223
1224 return 0;
1225}
1226
1227static void
1228free_hash_entry(struct ftrace_hash *hash,
1229 struct ftrace_func_entry *entry)
1230{
1231 hlist_del(&entry->hlist);
1232 kfree(entry);
1233 hash->count--;
1234}
1235
1236static void
1237remove_hash_entry(struct ftrace_hash *hash,
1238 struct ftrace_func_entry *entry)
1239{
1240 hlist_del_rcu(&entry->hlist);
1241 hash->count--;
1242}
1243
1244static void ftrace_hash_clear(struct ftrace_hash *hash)
1245{
1246 struct hlist_head *hhd;
1247 struct hlist_node *tn;
1248 struct ftrace_func_entry *entry;
1249 int size = 1 << hash->size_bits;
1250 int i;
1251
1252 if (!hash->count)
1253 return;
1254
1255 for (i = 0; i < size; i++) {
1256 hhd = &hash->buckets[i];
1257 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1258 free_hash_entry(hash, entry);
1259 }
1260 FTRACE_WARN_ON(hash->count);
1261}
1262
1263static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1264{
1265 list_del(&ftrace_mod->list);
1266 kfree(ftrace_mod->module);
1267 kfree(ftrace_mod->func);
1268 kfree(ftrace_mod);
1269}
1270
1271static void clear_ftrace_mod_list(struct list_head *head)
1272{
1273 struct ftrace_mod_load *p, *n;
1274
1275
1276 if (!head)
1277 return;
1278
1279 mutex_lock(&ftrace_lock);
1280 list_for_each_entry_safe(p, n, head, list)
1281 free_ftrace_mod(p);
1282 mutex_unlock(&ftrace_lock);
1283}
1284
1285static void free_ftrace_hash(struct ftrace_hash *hash)
1286{
1287 if (!hash || hash == EMPTY_HASH)
1288 return;
1289 ftrace_hash_clear(hash);
1290 kfree(hash->buckets);
1291 kfree(hash);
1292}
1293
1294static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1295{
1296 struct ftrace_hash *hash;
1297
1298 hash = container_of(rcu, struct ftrace_hash, rcu);
1299 free_ftrace_hash(hash);
1300}
1301
1302static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1303{
1304 if (!hash || hash == EMPTY_HASH)
1305 return;
1306 call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1307}
1308
1309void ftrace_free_filter(struct ftrace_ops *ops)
1310{
1311 ftrace_ops_init(ops);
1312 free_ftrace_hash(ops->func_hash->filter_hash);
1313 free_ftrace_hash(ops->func_hash->notrace_hash);
1314}
1315
1316static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1317{
1318 struct ftrace_hash *hash;
1319 int size;
1320
1321 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1322 if (!hash)
1323 return NULL;
1324
1325 size = 1 << size_bits;
1326 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1327
1328 if (!hash->buckets) {
1329 kfree(hash);
1330 return NULL;
1331 }
1332
1333 hash->size_bits = size_bits;
1334
1335 return hash;
1336}
1337
1338
1339static int ftrace_add_mod(struct trace_array *tr,
1340 const char *func, const char *module,
1341 int enable)
1342{
1343 struct ftrace_mod_load *ftrace_mod;
1344 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1345
1346 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1347 if (!ftrace_mod)
1348 return -ENOMEM;
1349
1350 ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1351 ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1352 ftrace_mod->enable = enable;
1353
1354 if (!ftrace_mod->func || !ftrace_mod->module)
1355 goto out_free;
1356
1357 list_add(&ftrace_mod->list, mod_head);
1358
1359 return 0;
1360
1361 out_free:
1362 free_ftrace_mod(ftrace_mod);
1363
1364 return -ENOMEM;
1365}
1366
1367static struct ftrace_hash *
1368alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1369{
1370 struct ftrace_func_entry *entry;
1371 struct ftrace_hash *new_hash;
1372 int size;
1373 int ret;
1374 int i;
1375
1376 new_hash = alloc_ftrace_hash(size_bits);
1377 if (!new_hash)
1378 return NULL;
1379
1380 if (hash)
1381 new_hash->flags = hash->flags;
1382
1383
1384 if (ftrace_hash_empty(hash))
1385 return new_hash;
1386
1387 size = 1 << hash->size_bits;
1388 for (i = 0; i < size; i++) {
1389 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1390 ret = add_hash_entry(new_hash, entry->ip);
1391 if (ret < 0)
1392 goto free_hash;
1393 }
1394 }
1395
1396 FTRACE_WARN_ON(new_hash->count != hash->count);
1397
1398 return new_hash;
1399
1400 free_hash:
1401 free_ftrace_hash(new_hash);
1402 return NULL;
1403}
1404
1405static void
1406ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1407static void
1408ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1409
1410static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1411 struct ftrace_hash *new_hash);
1412
1413static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
1414{
1415 struct ftrace_func_entry *entry;
1416 struct ftrace_hash *new_hash;
1417 struct hlist_head *hhd;
1418 struct hlist_node *tn;
1419 int bits = 0;
1420 int i;
1421
1422
1423
1424
1425 for (size /= 2; size; size >>= 1)
1426 bits++;
1427
1428
1429 if (bits > FTRACE_HASH_MAX_BITS)
1430 bits = FTRACE_HASH_MAX_BITS;
1431
1432 new_hash = alloc_ftrace_hash(bits);
1433 if (!new_hash)
1434 return NULL;
1435
1436 new_hash->flags = src->flags;
1437
1438 size = 1 << src->size_bits;
1439 for (i = 0; i < size; i++) {
1440 hhd = &src->buckets[i];
1441 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1442 remove_hash_entry(src, entry);
1443 __add_hash_entry(new_hash, entry);
1444 }
1445 }
1446 return new_hash;
1447}
1448
1449static struct ftrace_hash *
1450__ftrace_hash_move(struct ftrace_hash *src)
1451{
1452 int size = src->count;
1453
1454
1455
1456
1457 if (ftrace_hash_empty(src))
1458 return EMPTY_HASH;
1459
1460 return dup_hash(src, size);
1461}
1462
1463static int
1464ftrace_hash_move(struct ftrace_ops *ops, int enable,
1465 struct ftrace_hash **dst, struct ftrace_hash *src)
1466{
1467 struct ftrace_hash *new_hash;
1468 int ret;
1469
1470
1471 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1472 return -EINVAL;
1473
1474 new_hash = __ftrace_hash_move(src);
1475 if (!new_hash)
1476 return -ENOMEM;
1477
1478
1479 if (enable) {
1480
1481 ret = ftrace_hash_ipmodify_update(ops, new_hash);
1482 if (ret < 0) {
1483 free_ftrace_hash(new_hash);
1484 return ret;
1485 }
1486 }
1487
1488
1489
1490
1491
1492 ftrace_hash_rec_disable_modify(ops, enable);
1493
1494 rcu_assign_pointer(*dst, new_hash);
1495
1496 ftrace_hash_rec_enable_modify(ops, enable);
1497
1498 return 0;
1499}
1500
1501static bool hash_contains_ip(unsigned long ip,
1502 struct ftrace_ops_hash *hash)
1503{
1504
1505
1506
1507
1508
1509
1510 return (ftrace_hash_empty(hash->filter_hash) ||
1511 __ftrace_lookup_ip(hash->filter_hash, ip)) &&
1512 (ftrace_hash_empty(hash->notrace_hash) ||
1513 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528static int
1529ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1530{
1531 struct ftrace_ops_hash hash;
1532 int ret;
1533
1534#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1535
1536
1537
1538
1539
1540 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1541 return 0;
1542#endif
1543
1544 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1545 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1546
1547 if (hash_contains_ip(ip, &hash))
1548 ret = 1;
1549 else
1550 ret = 0;
1551
1552 return ret;
1553}
1554
1555
1556
1557
1558
1559#define do_for_each_ftrace_rec(pg, rec) \
1560 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1561 int _____i; \
1562 for (_____i = 0; _____i < pg->index; _____i++) { \
1563 rec = &pg->records[_____i];
1564
1565#define while_for_each_ftrace_rec() \
1566 } \
1567 }
1568
1569
1570static int ftrace_cmp_recs(const void *a, const void *b)
1571{
1572 const struct dyn_ftrace *key = a;
1573 const struct dyn_ftrace *rec = b;
1574
1575 if (key->flags < rec->ip)
1576 return -1;
1577 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1578 return 1;
1579 return 0;
1580}
1581
1582static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1583{
1584 struct ftrace_page *pg;
1585 struct dyn_ftrace *rec = NULL;
1586 struct dyn_ftrace key;
1587
1588 key.ip = start;
1589 key.flags = end;
1590
1591 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1592 if (end < pg->records[0].ip ||
1593 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1594 continue;
1595 rec = bsearch(&key, pg->records, pg->index,
1596 sizeof(struct dyn_ftrace),
1597 ftrace_cmp_recs);
1598 if (rec)
1599 break;
1600 }
1601 return rec;
1602}
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1617{
1618 struct dyn_ftrace *rec;
1619
1620 rec = lookup_rec(start, end);
1621 if (rec)
1622 return rec->ip;
1623
1624 return 0;
1625}
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636unsigned long ftrace_location(unsigned long ip)
1637{
1638 return ftrace_location_range(ip, ip);
1639}
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651int ftrace_text_reserved(const void *start, const void *end)
1652{
1653 unsigned long ret;
1654
1655 ret = ftrace_location_range((unsigned long)start,
1656 (unsigned long)end);
1657
1658 return (int)!!ret;
1659}
1660
1661
1662static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1663{
1664 struct ftrace_ops *ops;
1665 bool keep_regs = false;
1666
1667 for (ops = ftrace_ops_list;
1668 ops != &ftrace_list_end; ops = ops->next) {
1669
1670 if (ftrace_ops_test(ops, rec->ip, rec)) {
1671 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1672 keep_regs = true;
1673 break;
1674 }
1675 }
1676 }
1677
1678 return keep_regs;
1679}
1680
1681static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1682 int filter_hash,
1683 bool inc)
1684{
1685 struct ftrace_hash *hash;
1686 struct ftrace_hash *other_hash;
1687 struct ftrace_page *pg;
1688 struct dyn_ftrace *rec;
1689 bool update = false;
1690 int count = 0;
1691 int all = false;
1692
1693
1694 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1695 return false;
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708 if (filter_hash) {
1709 hash = ops->func_hash->filter_hash;
1710 other_hash = ops->func_hash->notrace_hash;
1711 if (ftrace_hash_empty(hash))
1712 all = true;
1713 } else {
1714 inc = !inc;
1715 hash = ops->func_hash->notrace_hash;
1716 other_hash = ops->func_hash->filter_hash;
1717
1718
1719
1720
1721 if (ftrace_hash_empty(hash))
1722 return false;
1723 }
1724
1725 do_for_each_ftrace_rec(pg, rec) {
1726 int in_other_hash = 0;
1727 int in_hash = 0;
1728 int match = 0;
1729
1730 if (rec->flags & FTRACE_FL_DISABLED)
1731 continue;
1732
1733 if (all) {
1734
1735
1736
1737
1738 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1739 match = 1;
1740 } else {
1741 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1742 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754 if (filter_hash && in_hash && !in_other_hash)
1755 match = 1;
1756 else if (!filter_hash && in_hash &&
1757 (in_other_hash || ftrace_hash_empty(other_hash)))
1758 match = 1;
1759 }
1760 if (!match)
1761 continue;
1762
1763 if (inc) {
1764 rec->flags++;
1765 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1766 return false;
1767
1768 if (ops->flags & FTRACE_OPS_FL_DIRECT)
1769 rec->flags |= FTRACE_FL_DIRECT;
1770
1771
1772
1773
1774
1775
1776 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1777 rec->flags |= FTRACE_FL_TRAMP;
1778 else
1779
1780
1781
1782
1783
1784
1785 rec->flags &= ~FTRACE_FL_TRAMP;
1786
1787
1788
1789
1790
1791 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1792 rec->flags |= FTRACE_FL_REGS;
1793 } else {
1794 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1795 return false;
1796 rec->flags--;
1797
1798
1799
1800
1801
1802
1803
1804 if (ops->flags & FTRACE_OPS_FL_DIRECT)
1805 rec->flags &= ~FTRACE_FL_DIRECT;
1806
1807
1808
1809
1810
1811
1812
1813 if (ftrace_rec_count(rec) > 0 &&
1814 rec->flags & FTRACE_FL_REGS &&
1815 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1816 if (!test_rec_ops_needs_regs(rec))
1817 rec->flags &= ~FTRACE_FL_REGS;
1818 }
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829 rec->flags &= ~FTRACE_FL_TRAMP;
1830
1831
1832
1833
1834
1835 }
1836 count++;
1837
1838
1839 update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;
1840
1841
1842 if (!all && count == hash->count)
1843 return update;
1844 } while_for_each_ftrace_rec();
1845
1846 return update;
1847}
1848
1849static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1850 int filter_hash)
1851{
1852 return __ftrace_hash_rec_update(ops, filter_hash, 0);
1853}
1854
1855static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1856 int filter_hash)
1857{
1858 return __ftrace_hash_rec_update(ops, filter_hash, 1);
1859}
1860
1861static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1862 int filter_hash, int inc)
1863{
1864 struct ftrace_ops *op;
1865
1866 __ftrace_hash_rec_update(ops, filter_hash, inc);
1867
1868 if (ops->func_hash != &global_ops.local_hash)
1869 return;
1870
1871
1872
1873
1874
1875 do_for_each_ftrace_op(op, ftrace_ops_list) {
1876
1877 if (op == ops)
1878 continue;
1879 if (op->func_hash == &global_ops.local_hash)
1880 __ftrace_hash_rec_update(op, filter_hash, inc);
1881 } while_for_each_ftrace_op(op);
1882}
1883
1884static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1885 int filter_hash)
1886{
1887 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1888}
1889
1890static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1891 int filter_hash)
1892{
1893 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1894}
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1906 struct ftrace_hash *old_hash,
1907 struct ftrace_hash *new_hash)
1908{
1909 struct ftrace_page *pg;
1910 struct dyn_ftrace *rec, *end = NULL;
1911 int in_old, in_new;
1912
1913
1914 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1915 return 0;
1916
1917 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1918 return 0;
1919
1920
1921
1922
1923
1924 if (!new_hash || !old_hash)
1925 return -EINVAL;
1926
1927
1928 do_for_each_ftrace_rec(pg, rec) {
1929
1930 if (rec->flags & FTRACE_FL_DISABLED)
1931 continue;
1932
1933
1934 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1935 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1936 if (in_old == in_new)
1937 continue;
1938
1939 if (in_new) {
1940
1941 if (rec->flags & FTRACE_FL_IPMODIFY)
1942 goto rollback;
1943 rec->flags |= FTRACE_FL_IPMODIFY;
1944 } else
1945 rec->flags &= ~FTRACE_FL_IPMODIFY;
1946 } while_for_each_ftrace_rec();
1947
1948 return 0;
1949
1950rollback:
1951 end = rec;
1952
1953
1954 do_for_each_ftrace_rec(pg, rec) {
1955
1956 if (rec->flags & FTRACE_FL_DISABLED)
1957 continue;
1958
1959 if (rec == end)
1960 goto err_out;
1961
1962 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1963 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1964 if (in_old == in_new)
1965 continue;
1966
1967 if (in_new)
1968 rec->flags &= ~FTRACE_FL_IPMODIFY;
1969 else
1970 rec->flags |= FTRACE_FL_IPMODIFY;
1971 } while_for_each_ftrace_rec();
1972
1973err_out:
1974 return -EBUSY;
1975}
1976
1977static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1978{
1979 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1980
1981 if (ftrace_hash_empty(hash))
1982 hash = NULL;
1983
1984 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1985}
1986
1987
1988static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1989{
1990 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1991
1992 if (ftrace_hash_empty(hash))
1993 hash = NULL;
1994
1995 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1996}
1997
1998static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1999 struct ftrace_hash *new_hash)
2000{
2001 struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
2002
2003 if (ftrace_hash_empty(old_hash))
2004 old_hash = NULL;
2005
2006 if (ftrace_hash_empty(new_hash))
2007 new_hash = NULL;
2008
2009 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
2010}
2011
2012static void print_ip_ins(const char *fmt, const unsigned char *p)
2013{
2014 int i;
2015
2016 printk(KERN_CONT "%s", fmt);
2017
2018 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
2019 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
2020}
2021
2022static struct ftrace_ops *
2023ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
2024static struct ftrace_ops *
2025ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
2026
2027enum ftrace_bug_type ftrace_bug_type;
2028const void *ftrace_expected;
2029
2030static void print_bug_type(void)
2031{
2032 switch (ftrace_bug_type) {
2033 case FTRACE_BUG_UNKNOWN:
2034 break;
2035 case FTRACE_BUG_INIT:
2036 pr_info("Initializing ftrace call sites\n");
2037 break;
2038 case FTRACE_BUG_NOP:
2039 pr_info("Setting ftrace call site to NOP\n");
2040 break;
2041 case FTRACE_BUG_CALL:
2042 pr_info("Setting ftrace call site to call ftrace function\n");
2043 break;
2044 case FTRACE_BUG_UPDATE:
2045 pr_info("Updating ftrace call site to call a different ftrace function\n");
2046 break;
2047 }
2048}
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062void ftrace_bug(int failed, struct dyn_ftrace *rec)
2063{
2064 unsigned long ip = rec ? rec->ip : 0;
2065
2066 switch (failed) {
2067 case -EFAULT:
2068 FTRACE_WARN_ON_ONCE(1);
2069 pr_info("ftrace faulted on modifying ");
2070 print_ip_sym(ip);
2071 break;
2072 case -EINVAL:
2073 FTRACE_WARN_ON_ONCE(1);
2074 pr_info("ftrace failed to modify ");
2075 print_ip_sym(ip);
2076 print_ip_ins(" actual: ", (unsigned char *)ip);
2077 pr_cont("\n");
2078 if (ftrace_expected) {
2079 print_ip_ins(" expected: ", ftrace_expected);
2080 pr_cont("\n");
2081 }
2082 break;
2083 case -EPERM:
2084 FTRACE_WARN_ON_ONCE(1);
2085 pr_info("ftrace faulted on writing ");
2086 print_ip_sym(ip);
2087 break;
2088 default:
2089 FTRACE_WARN_ON_ONCE(1);
2090 pr_info("ftrace faulted on unknown error ");
2091 print_ip_sym(ip);
2092 }
2093 print_bug_type();
2094 if (rec) {
2095 struct ftrace_ops *ops = NULL;
2096
2097 pr_info("ftrace record flags: %lx\n", rec->flags);
2098 pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2099 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2100 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2101 ops = ftrace_find_tramp_ops_any(rec);
2102 if (ops) {
2103 do {
2104 pr_cont("\ttramp: %pS (%pS)",
2105 (void *)ops->trampoline,
2106 (void *)ops->func);
2107 ops = ftrace_find_tramp_ops_next(rec, ops);
2108 } while (ops);
2109 } else
2110 pr_cont("\ttramp: ERROR!");
2111
2112 }
2113 ip = ftrace_get_addr_curr(rec);
2114 pr_cont("\n expected tramp: %lx\n", ip);
2115 }
2116}
2117
2118static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
2119{
2120 unsigned long flag = 0UL;
2121
2122 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2123
2124 if (rec->flags & FTRACE_FL_DISABLED)
2125 return FTRACE_UPDATE_IGNORE;
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138 if (enable && ftrace_rec_count(rec))
2139 flag = FTRACE_FL_ENABLED;
2140
2141
2142
2143
2144
2145
2146
2147 if (flag) {
2148 if (!(rec->flags & FTRACE_FL_REGS) !=
2149 !(rec->flags & FTRACE_FL_REGS_EN))
2150 flag |= FTRACE_FL_REGS;
2151
2152 if (!(rec->flags & FTRACE_FL_TRAMP) !=
2153 !(rec->flags & FTRACE_FL_TRAMP_EN))
2154 flag |= FTRACE_FL_TRAMP;
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166 if (ftrace_rec_count(rec) == 1) {
2167 if (!(rec->flags & FTRACE_FL_DIRECT) !=
2168 !(rec->flags & FTRACE_FL_DIRECT_EN))
2169 flag |= FTRACE_FL_DIRECT;
2170 } else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2171 flag |= FTRACE_FL_DIRECT;
2172 }
2173 }
2174
2175
2176 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2177 return FTRACE_UPDATE_IGNORE;
2178
2179 if (flag) {
2180
2181 flag ^= rec->flags & FTRACE_FL_ENABLED;
2182
2183 if (update) {
2184 rec->flags |= FTRACE_FL_ENABLED;
2185 if (flag & FTRACE_FL_REGS) {
2186 if (rec->flags & FTRACE_FL_REGS)
2187 rec->flags |= FTRACE_FL_REGS_EN;
2188 else
2189 rec->flags &= ~FTRACE_FL_REGS_EN;
2190 }
2191 if (flag & FTRACE_FL_TRAMP) {
2192 if (rec->flags & FTRACE_FL_TRAMP)
2193 rec->flags |= FTRACE_FL_TRAMP_EN;
2194 else
2195 rec->flags &= ~FTRACE_FL_TRAMP_EN;
2196 }
2197 if (flag & FTRACE_FL_DIRECT) {
2198
2199
2200
2201
2202
2203 if (ftrace_rec_count(rec) == 1) {
2204 if (rec->flags & FTRACE_FL_DIRECT)
2205 rec->flags |= FTRACE_FL_DIRECT_EN;
2206 else
2207 rec->flags &= ~FTRACE_FL_DIRECT_EN;
2208 } else {
2209
2210
2211
2212
2213 rec->flags &= ~FTRACE_FL_DIRECT_EN;
2214 }
2215 }
2216 }
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226 if (flag & FTRACE_FL_ENABLED) {
2227 ftrace_bug_type = FTRACE_BUG_CALL;
2228 return FTRACE_UPDATE_MAKE_CALL;
2229 }
2230
2231 ftrace_bug_type = FTRACE_BUG_UPDATE;
2232 return FTRACE_UPDATE_MODIFY_CALL;
2233 }
2234
2235 if (update) {
2236
2237 if (!ftrace_rec_count(rec))
2238 rec->flags = 0;
2239 else
2240
2241
2242
2243
2244 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2245 FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN);
2246 }
2247
2248 ftrace_bug_type = FTRACE_BUG_NOP;
2249 return FTRACE_UPDATE_MAKE_NOP;
2250}
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260int ftrace_update_record(struct dyn_ftrace *rec, int enable)
2261{
2262 return ftrace_check_record(rec, enable, 1);
2263}
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274int ftrace_test_record(struct dyn_ftrace *rec, int enable)
2275{
2276 return ftrace_check_record(rec, enable, 0);
2277}
2278
2279static struct ftrace_ops *
2280ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2281{
2282 struct ftrace_ops *op;
2283 unsigned long ip = rec->ip;
2284
2285 do_for_each_ftrace_op(op, ftrace_ops_list) {
2286
2287 if (!op->trampoline)
2288 continue;
2289
2290 if (hash_contains_ip(ip, op->func_hash))
2291 return op;
2292 } while_for_each_ftrace_op(op);
2293
2294 return NULL;
2295}
2296
2297static struct ftrace_ops *
2298ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2299 struct ftrace_ops *op)
2300{
2301 unsigned long ip = rec->ip;
2302
2303 while_for_each_ftrace_op(op) {
2304
2305 if (!op->trampoline)
2306 continue;
2307
2308 if (hash_contains_ip(ip, op->func_hash))
2309 return op;
2310 }
2311
2312 return NULL;
2313}
2314
2315static struct ftrace_ops *
2316ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2317{
2318 struct ftrace_ops *op;
2319 unsigned long ip = rec->ip;
2320
2321
2322
2323
2324
2325
2326
2327 if (removed_ops) {
2328 if (hash_contains_ip(ip, &removed_ops->old_hash))
2329 return removed_ops;
2330 }
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350 do_for_each_ftrace_op(op, ftrace_ops_list) {
2351
2352 if (!op->trampoline)
2353 continue;
2354
2355
2356
2357
2358
2359 if (op->flags & FTRACE_OPS_FL_ADDING)
2360 continue;
2361
2362
2363
2364
2365
2366
2367
2368 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2369 hash_contains_ip(ip, &op->old_hash))
2370 return op;
2371
2372
2373
2374
2375
2376 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2377 hash_contains_ip(ip, op->func_hash))
2378 return op;
2379
2380 } while_for_each_ftrace_op(op);
2381
2382 return NULL;
2383}
2384
2385static struct ftrace_ops *
2386ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2387{
2388 struct ftrace_ops *op;
2389 unsigned long ip = rec->ip;
2390
2391 do_for_each_ftrace_op(op, ftrace_ops_list) {
2392
2393 if (hash_contains_ip(ip, op->func_hash))
2394 return op;
2395 } while_for_each_ftrace_op(op);
2396
2397 return NULL;
2398}
2399
2400#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2401
2402static struct ftrace_hash *direct_functions = EMPTY_HASH;
2403static DEFINE_MUTEX(direct_mutex);
2404int ftrace_direct_func_count;
2405
2406
2407
2408
2409
2410unsigned long ftrace_find_rec_direct(unsigned long ip)
2411{
2412 struct ftrace_func_entry *entry;
2413
2414 entry = __ftrace_lookup_ip(direct_functions, ip);
2415 if (!entry)
2416 return 0;
2417
2418 return entry->direct;
2419}
2420
2421static void call_direct_funcs(unsigned long ip, unsigned long pip,
2422 struct ftrace_ops *ops, struct pt_regs *regs)
2423{
2424 unsigned long addr;
2425
2426 addr = ftrace_find_rec_direct(ip);
2427 if (!addr)
2428 return;
2429
2430 arch_ftrace_set_direct_caller(regs, addr);
2431}
2432
2433struct ftrace_ops direct_ops = {
2434 .func = call_direct_funcs,
2435 .flags = FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE
2436 | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
2437 | FTRACE_OPS_FL_PERMANENT,
2438};
2439#endif
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2452{
2453 struct ftrace_ops *ops;
2454 unsigned long addr;
2455
2456 if ((rec->flags & FTRACE_FL_DIRECT) &&
2457 (ftrace_rec_count(rec) == 1)) {
2458 addr = ftrace_find_rec_direct(rec->ip);
2459 if (addr)
2460 return addr;
2461 WARN_ON_ONCE(1);
2462 }
2463
2464
2465 if (rec->flags & FTRACE_FL_TRAMP) {
2466 ops = ftrace_find_tramp_ops_new(rec);
2467 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2468 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2469 (void *)rec->ip, (void *)rec->ip, rec->flags);
2470
2471 return (unsigned long)FTRACE_ADDR;
2472 }
2473 return ops->trampoline;
2474 }
2475
2476 if (rec->flags & FTRACE_FL_REGS)
2477 return (unsigned long)FTRACE_REGS_ADDR;
2478 else
2479 return (unsigned long)FTRACE_ADDR;
2480}
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2493{
2494 struct ftrace_ops *ops;
2495 unsigned long addr;
2496
2497
2498 if (rec->flags & FTRACE_FL_DIRECT_EN) {
2499 addr = ftrace_find_rec_direct(rec->ip);
2500 if (addr)
2501 return addr;
2502 WARN_ON_ONCE(1);
2503 }
2504
2505
2506 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2507 ops = ftrace_find_tramp_ops_curr(rec);
2508 if (FTRACE_WARN_ON(!ops)) {
2509 pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2510 (void *)rec->ip, (void *)rec->ip);
2511
2512 return (unsigned long)FTRACE_ADDR;
2513 }
2514 return ops->trampoline;
2515 }
2516
2517 if (rec->flags & FTRACE_FL_REGS_EN)
2518 return (unsigned long)FTRACE_REGS_ADDR;
2519 else
2520 return (unsigned long)FTRACE_ADDR;
2521}
2522
2523static int
2524__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2525{
2526 unsigned long ftrace_old_addr;
2527 unsigned long ftrace_addr;
2528 int ret;
2529
2530 ftrace_addr = ftrace_get_addr_new(rec);
2531
2532
2533 ftrace_old_addr = ftrace_get_addr_curr(rec);
2534
2535 ret = ftrace_update_record(rec, enable);
2536
2537 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2538
2539 switch (ret) {
2540 case FTRACE_UPDATE_IGNORE:
2541 return 0;
2542
2543 case FTRACE_UPDATE_MAKE_CALL:
2544 ftrace_bug_type = FTRACE_BUG_CALL;
2545 return ftrace_make_call(rec, ftrace_addr);
2546
2547 case FTRACE_UPDATE_MAKE_NOP:
2548 ftrace_bug_type = FTRACE_BUG_NOP;
2549 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2550
2551 case FTRACE_UPDATE_MODIFY_CALL:
2552 ftrace_bug_type = FTRACE_BUG_UPDATE;
2553 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2554 }
2555
2556 return -1;
2557}
2558
2559void __weak ftrace_replace_code(int enable)
2560{
2561 struct dyn_ftrace *rec;
2562 struct ftrace_page *pg;
2563 int failed;
2564
2565 if (unlikely(ftrace_disabled))
2566 return;
2567
2568 do_for_each_ftrace_rec(pg, rec) {
2569
2570 if (rec->flags & FTRACE_FL_DISABLED)
2571 continue;
2572
2573 failed = __ftrace_replace_code(rec, enable);
2574 if (failed) {
2575 ftrace_bug(failed, rec);
2576
2577 return;
2578 }
2579 } while_for_each_ftrace_rec();
2580}
2581
2582struct ftrace_rec_iter {
2583 struct ftrace_page *pg;
2584 int index;
2585};
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2597{
2598
2599
2600
2601
2602 static struct ftrace_rec_iter ftrace_rec_iter;
2603 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2604
2605 iter->pg = ftrace_pages_start;
2606 iter->index = 0;
2607
2608
2609 while (iter->pg && !iter->pg->index)
2610 iter->pg = iter->pg->next;
2611
2612 if (!iter->pg)
2613 return NULL;
2614
2615 return iter;
2616}
2617
2618
2619
2620
2621
2622
2623
2624struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2625{
2626 iter->index++;
2627
2628 if (iter->index >= iter->pg->index) {
2629 iter->pg = iter->pg->next;
2630 iter->index = 0;
2631
2632
2633 while (iter->pg && !iter->pg->index)
2634 iter->pg = iter->pg->next;
2635 }
2636
2637 if (!iter->pg)
2638 return NULL;
2639
2640 return iter;
2641}
2642
2643
2644
2645
2646
2647
2648
2649struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2650{
2651 return &iter->pg->records[iter->index];
2652}
2653
2654static int
2655ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2656{
2657 int ret;
2658
2659 if (unlikely(ftrace_disabled))
2660 return 0;
2661
2662 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2663 if (ret) {
2664 ftrace_bug_type = FTRACE_BUG_INIT;
2665 ftrace_bug(ret, rec);
2666 return 0;
2667 }
2668 return 1;
2669}
2670
2671
2672
2673
2674
2675int __weak ftrace_arch_code_modify_prepare(void)
2676{
2677 return 0;
2678}
2679
2680
2681
2682
2683
2684int __weak ftrace_arch_code_modify_post_process(void)
2685{
2686 return 0;
2687}
2688
2689void ftrace_modify_all_code(int command)
2690{
2691 int update = command & FTRACE_UPDATE_TRACE_FUNC;
2692 int err = 0;
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704 if (update) {
2705 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2706 if (FTRACE_WARN_ON(err))
2707 return;
2708 }
2709
2710 if (command & FTRACE_UPDATE_CALLS)
2711 ftrace_replace_code(1);
2712 else if (command & FTRACE_DISABLE_CALLS)
2713 ftrace_replace_code(0);
2714
2715 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2716 function_trace_op = set_function_trace_op;
2717 smp_wmb();
2718
2719 if (!irqs_disabled())
2720 smp_call_function(ftrace_sync_ipi, NULL, 1);
2721 err = ftrace_update_ftrace_func(ftrace_trace_function);
2722 if (FTRACE_WARN_ON(err))
2723 return;
2724 }
2725
2726 if (command & FTRACE_START_FUNC_RET)
2727 err = ftrace_enable_ftrace_graph_caller();
2728 else if (command & FTRACE_STOP_FUNC_RET)
2729 err = ftrace_disable_ftrace_graph_caller();
2730 FTRACE_WARN_ON(err);
2731}
2732
2733static int __ftrace_modify_code(void *data)
2734{
2735 int *command = data;
2736
2737 ftrace_modify_all_code(*command);
2738
2739 return 0;
2740}
2741
2742
2743
2744
2745
2746
2747
2748
2749void ftrace_run_stop_machine(int command)
2750{
2751 stop_machine(__ftrace_modify_code, &command, NULL);
2752}
2753
2754
2755
2756
2757
2758
2759
2760
2761void __weak arch_ftrace_update_code(int command)
2762{
2763 ftrace_run_stop_machine(command);
2764}
2765
2766static void ftrace_run_update_code(int command)
2767{
2768 int ret;
2769
2770 ret = ftrace_arch_code_modify_prepare();
2771 FTRACE_WARN_ON(ret);
2772 if (ret)
2773 return;
2774
2775
2776
2777
2778
2779
2780
2781 arch_ftrace_update_code(command);
2782
2783 ret = ftrace_arch_code_modify_post_process();
2784 FTRACE_WARN_ON(ret);
2785}
2786
2787static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2788 struct ftrace_ops_hash *old_hash)
2789{
2790 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2791 ops->old_hash.filter_hash = old_hash->filter_hash;
2792 ops->old_hash.notrace_hash = old_hash->notrace_hash;
2793 ftrace_run_update_code(command);
2794 ops->old_hash.filter_hash = NULL;
2795 ops->old_hash.notrace_hash = NULL;
2796 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2797}
2798
2799static ftrace_func_t saved_ftrace_func;
2800static int ftrace_start_up;
2801
2802void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2803{
2804}
2805
2806static void ftrace_startup_enable(int command)
2807{
2808 if (saved_ftrace_func != ftrace_trace_function) {
2809 saved_ftrace_func = ftrace_trace_function;
2810 command |= FTRACE_UPDATE_TRACE_FUNC;
2811 }
2812
2813 if (!command || !ftrace_enabled)
2814 return;
2815
2816 ftrace_run_update_code(command);
2817}
2818
2819static void ftrace_startup_all(int command)
2820{
2821 update_all_ops = true;
2822 ftrace_startup_enable(command);
2823 update_all_ops = false;
2824}
2825
2826static int ftrace_startup(struct ftrace_ops *ops, int command)
2827{
2828 int ret;
2829
2830 if (unlikely(ftrace_disabled))
2831 return -ENODEV;
2832
2833 ret = __register_ftrace_function(ops);
2834 if (ret)
2835 return ret;
2836
2837 ftrace_start_up++;
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2848
2849 ret = ftrace_hash_ipmodify_enable(ops);
2850 if (ret < 0) {
2851
2852 __unregister_ftrace_function(ops);
2853 ftrace_start_up--;
2854 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2855 return ret;
2856 }
2857
2858 if (ftrace_hash_rec_enable(ops, 1))
2859 command |= FTRACE_UPDATE_CALLS;
2860
2861 ftrace_startup_enable(command);
2862
2863 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2864
2865 return 0;
2866}
2867
2868static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2869{
2870 int ret;
2871
2872 if (unlikely(ftrace_disabled))
2873 return -ENODEV;
2874
2875 ret = __unregister_ftrace_function(ops);
2876 if (ret)
2877 return ret;
2878
2879 ftrace_start_up--;
2880
2881
2882
2883
2884
2885 WARN_ON_ONCE(ftrace_start_up < 0);
2886
2887
2888 ftrace_hash_ipmodify_disable(ops);
2889
2890 if (ftrace_hash_rec_disable(ops, 1))
2891 command |= FTRACE_UPDATE_CALLS;
2892
2893 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2894
2895 if (saved_ftrace_func != ftrace_trace_function) {
2896 saved_ftrace_func = ftrace_trace_function;
2897 command |= FTRACE_UPDATE_TRACE_FUNC;
2898 }
2899
2900 if (!command || !ftrace_enabled) {
2901
2902
2903
2904
2905
2906
2907 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2908 goto free_ops;
2909
2910 return 0;
2911 }
2912
2913
2914
2915
2916
2917 ops->flags |= FTRACE_OPS_FL_REMOVING;
2918 removed_ops = ops;
2919
2920
2921 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2922 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2923
2924 ftrace_run_update_code(command);
2925
2926
2927
2928
2929
2930 if (rcu_dereference_protected(ftrace_ops_list,
2931 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
2932 struct ftrace_page *pg;
2933 struct dyn_ftrace *rec;
2934
2935 do_for_each_ftrace_rec(pg, rec) {
2936 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
2937 pr_warn(" %pS flags:%lx\n",
2938 (void *)rec->ip, rec->flags);
2939 } while_for_each_ftrace_rec();
2940 }
2941
2942 ops->old_hash.filter_hash = NULL;
2943 ops->old_hash.notrace_hash = NULL;
2944
2945 removed_ops = NULL;
2946 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2947
2948
2949
2950
2951
2952
2953
2954 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
2955
2956
2957
2958
2959
2960
2961
2962
2963 synchronize_rcu_tasks_rude();
2964
2965
2966
2967
2968
2969
2970
2971
2972 if (IS_ENABLED(CONFIG_PREEMPTION))
2973 synchronize_rcu_tasks();
2974
2975 free_ops:
2976 arch_ftrace_trampoline_free(ops);
2977 }
2978
2979 return 0;
2980}
2981
2982static void ftrace_startup_sysctl(void)
2983{
2984 int command;
2985
2986 if (unlikely(ftrace_disabled))
2987 return;
2988
2989
2990 saved_ftrace_func = NULL;
2991
2992 if (ftrace_start_up) {
2993 command = FTRACE_UPDATE_CALLS;
2994 if (ftrace_graph_active)
2995 command |= FTRACE_START_FUNC_RET;
2996 ftrace_startup_enable(command);
2997 }
2998}
2999
3000static void ftrace_shutdown_sysctl(void)
3001{
3002 int command;
3003
3004 if (unlikely(ftrace_disabled))
3005 return;
3006
3007
3008 if (ftrace_start_up) {
3009 command = FTRACE_DISABLE_CALLS;
3010 if (ftrace_graph_active)
3011 command |= FTRACE_STOP_FUNC_RET;
3012 ftrace_run_update_code(command);
3013 }
3014}
3015
3016static u64 ftrace_update_time;
3017unsigned long ftrace_update_tot_cnt;
3018unsigned long ftrace_number_of_pages;
3019unsigned long ftrace_number_of_groups;
3020
3021static inline int ops_traces_mod(struct ftrace_ops *ops)
3022{
3023
3024
3025
3026
3027 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
3028 ftrace_hash_empty(ops->func_hash->notrace_hash);
3029}
3030
3031
3032
3033
3034
3035
3036
3037
3038static inline bool
3039ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3040{
3041
3042 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
3043 return 0;
3044
3045
3046 if (ops_traces_mod(ops))
3047 return 1;
3048
3049
3050 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
3051 !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
3052 return 0;
3053
3054
3055 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
3056 return 0;
3057
3058 return 1;
3059}
3060
3061static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3062{
3063 struct ftrace_page *pg;
3064 struct dyn_ftrace *p;
3065 u64 start, stop;
3066 unsigned long update_cnt = 0;
3067 unsigned long rec_flags = 0;
3068 int i;
3069
3070 start = ftrace_now(raw_smp_processor_id());
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083 if (mod)
3084 rec_flags |= FTRACE_FL_DISABLED;
3085
3086 for (pg = new_pgs; pg; pg = pg->next) {
3087
3088 for (i = 0; i < pg->index; i++) {
3089
3090
3091 if (unlikely(ftrace_disabled))
3092 return -1;
3093
3094 p = &pg->records[i];
3095 p->flags = rec_flags;
3096
3097
3098
3099
3100
3101 if (!ftrace_code_disable(mod, p))
3102 break;
3103
3104 update_cnt++;
3105 }
3106 }
3107
3108 stop = ftrace_now(raw_smp_processor_id());
3109 ftrace_update_time = stop - start;
3110 ftrace_update_tot_cnt += update_cnt;
3111
3112 return 0;
3113}
3114
3115static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3116{
3117 int order;
3118 int cnt;
3119
3120 if (WARN_ON(!count))
3121 return -EINVAL;
3122
3123 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
3124
3125
3126
3127
3128
3129 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
3130 order--;
3131
3132 again:
3133 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3134
3135 if (!pg->records) {
3136
3137 if (!order)
3138 return -ENOMEM;
3139 order >>= 1;
3140 goto again;
3141 }
3142
3143 ftrace_number_of_pages += 1 << order;
3144 ftrace_number_of_groups++;
3145
3146 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3147 pg->size = cnt;
3148
3149 if (cnt > count)
3150 cnt = count;
3151
3152 return cnt;
3153}
3154
3155static struct ftrace_page *
3156ftrace_allocate_pages(unsigned long num_to_init)
3157{
3158 struct ftrace_page *start_pg;
3159 struct ftrace_page *pg;
3160 int order;
3161 int cnt;
3162
3163 if (!num_to_init)
3164 return 0;
3165
3166 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3167 if (!pg)
3168 return NULL;
3169
3170
3171
3172
3173
3174
3175 for (;;) {
3176 cnt = ftrace_allocate_records(pg, num_to_init);
3177 if (cnt < 0)
3178 goto free_pages;
3179
3180 num_to_init -= cnt;
3181 if (!num_to_init)
3182 break;
3183
3184 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3185 if (!pg->next)
3186 goto free_pages;
3187
3188 pg = pg->next;
3189 }
3190
3191 return start_pg;
3192
3193 free_pages:
3194 pg = start_pg;
3195 while (pg) {
3196 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3197 free_pages((unsigned long)pg->records, order);
3198 start_pg = pg->next;
3199 kfree(pg);
3200 pg = start_pg;
3201 ftrace_number_of_pages -= 1 << order;
3202 ftrace_number_of_groups--;
3203 }
3204 pr_info("ftrace: FAILED to allocate memory for functions\n");
3205 return NULL;
3206}
3207
3208#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4)
3209
3210struct ftrace_iterator {
3211 loff_t pos;
3212 loff_t func_pos;
3213 loff_t mod_pos;
3214 struct ftrace_page *pg;
3215 struct dyn_ftrace *func;
3216 struct ftrace_func_probe *probe;
3217 struct ftrace_func_entry *probe_entry;
3218 struct trace_parser parser;
3219 struct ftrace_hash *hash;
3220 struct ftrace_ops *ops;
3221 struct trace_array *tr;
3222 struct list_head *mod_list;
3223 int pidx;
3224 int idx;
3225 unsigned flags;
3226};
3227
3228static void *
3229t_probe_next(struct seq_file *m, loff_t *pos)
3230{
3231 struct ftrace_iterator *iter = m->private;
3232 struct trace_array *tr = iter->ops->private;
3233 struct list_head *func_probes;
3234 struct ftrace_hash *hash;
3235 struct list_head *next;
3236 struct hlist_node *hnd = NULL;
3237 struct hlist_head *hhd;
3238 int size;
3239
3240 (*pos)++;
3241 iter->pos = *pos;
3242
3243 if (!tr)
3244 return NULL;
3245
3246 func_probes = &tr->func_probes;
3247 if (list_empty(func_probes))
3248 return NULL;
3249
3250 if (!iter->probe) {
3251 next = func_probes->next;
3252 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3253 }
3254
3255 if (iter->probe_entry)
3256 hnd = &iter->probe_entry->hlist;
3257
3258 hash = iter->probe->ops.func_hash->filter_hash;
3259
3260
3261
3262
3263
3264 if (!hash || hash == EMPTY_HASH)
3265 return NULL;
3266
3267 size = 1 << hash->size_bits;
3268
3269 retry:
3270 if (iter->pidx >= size) {
3271 if (iter->probe->list.next == func_probes)
3272 return NULL;
3273 next = iter->probe->list.next;
3274 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3275 hash = iter->probe->ops.func_hash->filter_hash;
3276 size = 1 << hash->size_bits;
3277 iter->pidx = 0;
3278 }
3279
3280 hhd = &hash->buckets[iter->pidx];
3281
3282 if (hlist_empty(hhd)) {
3283 iter->pidx++;
3284 hnd = NULL;
3285 goto retry;
3286 }
3287
3288 if (!hnd)
3289 hnd = hhd->first;
3290 else {
3291 hnd = hnd->next;
3292 if (!hnd) {
3293 iter->pidx++;
3294 goto retry;
3295 }
3296 }
3297
3298 if (WARN_ON_ONCE(!hnd))
3299 return NULL;
3300
3301 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3302
3303 return iter;
3304}
3305
3306static void *t_probe_start(struct seq_file *m, loff_t *pos)
3307{
3308 struct ftrace_iterator *iter = m->private;
3309 void *p = NULL;
3310 loff_t l;
3311
3312 if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3313 return NULL;
3314
3315 if (iter->mod_pos > *pos)
3316 return NULL;
3317
3318 iter->probe = NULL;
3319 iter->probe_entry = NULL;
3320 iter->pidx = 0;
3321 for (l = 0; l <= (*pos - iter->mod_pos); ) {
3322 p = t_probe_next(m, &l);
3323 if (!p)
3324 break;
3325 }
3326 if (!p)
3327 return NULL;
3328
3329
3330 iter->flags |= FTRACE_ITER_PROBE;
3331
3332 return iter;
3333}
3334
3335static int
3336t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3337{
3338 struct ftrace_func_entry *probe_entry;
3339 struct ftrace_probe_ops *probe_ops;
3340 struct ftrace_func_probe *probe;
3341
3342 probe = iter->probe;
3343 probe_entry = iter->probe_entry;
3344
3345 if (WARN_ON_ONCE(!probe || !probe_entry))
3346 return -EIO;
3347
3348 probe_ops = probe->probe_ops;
3349
3350 if (probe_ops->print)
3351 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3352
3353 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3354 (void *)probe_ops->func);
3355
3356 return 0;
3357}
3358
3359static void *
3360t_mod_next(struct seq_file *m, loff_t *pos)
3361{
3362 struct ftrace_iterator *iter = m->private;
3363 struct trace_array *tr = iter->tr;
3364
3365 (*pos)++;
3366 iter->pos = *pos;
3367
3368 iter->mod_list = iter->mod_list->next;
3369
3370 if (iter->mod_list == &tr->mod_trace ||
3371 iter->mod_list == &tr->mod_notrace) {
3372 iter->flags &= ~FTRACE_ITER_MOD;
3373 return NULL;
3374 }
3375
3376 iter->mod_pos = *pos;
3377
3378 return iter;
3379}
3380
3381static void *t_mod_start(struct seq_file *m, loff_t *pos)
3382{
3383 struct ftrace_iterator *iter = m->private;
3384 void *p = NULL;
3385 loff_t l;
3386
3387 if (iter->func_pos > *pos)
3388 return NULL;
3389
3390 iter->mod_pos = iter->func_pos;
3391
3392
3393 if (!iter->tr)
3394 return NULL;
3395
3396 for (l = 0; l <= (*pos - iter->func_pos); ) {
3397 p = t_mod_next(m, &l);
3398 if (!p)
3399 break;
3400 }
3401 if (!p) {
3402 iter->flags &= ~FTRACE_ITER_MOD;
3403 return t_probe_start(m, pos);
3404 }
3405
3406
3407 iter->flags |= FTRACE_ITER_MOD;
3408
3409 return iter;
3410}
3411
3412static int
3413t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3414{
3415 struct ftrace_mod_load *ftrace_mod;
3416 struct trace_array *tr = iter->tr;
3417
3418 if (WARN_ON_ONCE(!iter->mod_list) ||
3419 iter->mod_list == &tr->mod_trace ||
3420 iter->mod_list == &tr->mod_notrace)
3421 return -EIO;
3422
3423 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3424
3425 if (ftrace_mod->func)
3426 seq_printf(m, "%s", ftrace_mod->func);
3427 else
3428 seq_putc(m, '*');
3429
3430 seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3431
3432 return 0;
3433}
3434
3435static void *
3436t_func_next(struct seq_file *m, loff_t *pos)
3437{
3438 struct ftrace_iterator *iter = m->private;
3439 struct dyn_ftrace *rec = NULL;
3440
3441 (*pos)++;
3442
3443 retry:
3444 if (iter->idx >= iter->pg->index) {
3445 if (iter->pg->next) {
3446 iter->pg = iter->pg->next;
3447 iter->idx = 0;
3448 goto retry;
3449 }
3450 } else {
3451 rec = &iter->pg->records[iter->idx++];
3452 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3453 !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3454
3455 ((iter->flags & FTRACE_ITER_ENABLED) &&
3456 !(rec->flags & FTRACE_FL_ENABLED))) {
3457
3458 rec = NULL;
3459 goto retry;
3460 }
3461 }
3462
3463 if (!rec)
3464 return NULL;
3465
3466 iter->pos = iter->func_pos = *pos;
3467 iter->func = rec;
3468
3469 return iter;
3470}
3471
3472static void *
3473t_next(struct seq_file *m, void *v, loff_t *pos)
3474{
3475 struct ftrace_iterator *iter = m->private;
3476 loff_t l = *pos;
3477 void *ret;
3478
3479 if (unlikely(ftrace_disabled))
3480 return NULL;
3481
3482 if (iter->flags & FTRACE_ITER_PROBE)
3483 return t_probe_next(m, pos);
3484
3485 if (iter->flags & FTRACE_ITER_MOD)
3486 return t_mod_next(m, pos);
3487
3488 if (iter->flags & FTRACE_ITER_PRINTALL) {
3489
3490 (*pos)++;
3491 return t_mod_start(m, &l);
3492 }
3493
3494 ret = t_func_next(m, pos);
3495
3496 if (!ret)
3497 return t_mod_start(m, &l);
3498
3499 return ret;
3500}
3501
3502static void reset_iter_read(struct ftrace_iterator *iter)
3503{
3504 iter->pos = 0;
3505 iter->func_pos = 0;
3506 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
3507}
3508
3509static void *t_start(struct seq_file *m, loff_t *pos)
3510{
3511 struct ftrace_iterator *iter = m->private;
3512 void *p = NULL;
3513 loff_t l;
3514
3515 mutex_lock(&ftrace_lock);
3516
3517 if (unlikely(ftrace_disabled))
3518 return NULL;
3519
3520
3521
3522
3523 if (*pos < iter->pos)
3524 reset_iter_read(iter);
3525
3526
3527
3528
3529
3530
3531 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3532 ftrace_hash_empty(iter->hash)) {
3533 iter->func_pos = 1;
3534 if (*pos > 0)
3535 return t_mod_start(m, pos);
3536 iter->flags |= FTRACE_ITER_PRINTALL;
3537
3538 iter->flags &= ~FTRACE_ITER_PROBE;
3539 return iter;
3540 }
3541
3542 if (iter->flags & FTRACE_ITER_MOD)
3543 return t_mod_start(m, pos);
3544
3545
3546
3547
3548
3549
3550 iter->pg = ftrace_pages_start;
3551 iter->idx = 0;
3552 for (l = 0; l <= *pos; ) {
3553 p = t_func_next(m, &l);
3554 if (!p)
3555 break;
3556 }
3557
3558 if (!p)
3559 return t_mod_start(m, pos);
3560
3561 return iter;
3562}
3563
3564static void t_stop(struct seq_file *m, void *p)
3565{
3566 mutex_unlock(&ftrace_lock);
3567}
3568
3569void * __weak
3570arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3571{
3572 return NULL;
3573}
3574
3575static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3576 struct dyn_ftrace *rec)
3577{
3578 void *ptr;
3579
3580 ptr = arch_ftrace_trampoline_func(ops, rec);
3581 if (ptr)
3582 seq_printf(m, " ->%pS", ptr);
3583}
3584
3585static int t_show(struct seq_file *m, void *v)
3586{
3587 struct ftrace_iterator *iter = m->private;
3588 struct dyn_ftrace *rec;
3589
3590 if (iter->flags & FTRACE_ITER_PROBE)
3591 return t_probe_show(m, iter);
3592
3593 if (iter->flags & FTRACE_ITER_MOD)
3594 return t_mod_show(m, iter);
3595
3596 if (iter->flags & FTRACE_ITER_PRINTALL) {
3597 if (iter->flags & FTRACE_ITER_NOTRACE)
3598 seq_puts(m, "#### no functions disabled ####\n");
3599 else
3600 seq_puts(m, "#### all functions enabled ####\n");
3601 return 0;
3602 }
3603
3604 rec = iter->func;
3605
3606 if (!rec)
3607 return 0;
3608
3609 seq_printf(m, "%ps", (void *)rec->ip);
3610 if (iter->flags & FTRACE_ITER_ENABLED) {
3611 struct ftrace_ops *ops;
3612
3613 seq_printf(m, " (%ld)%s%s%s",
3614 ftrace_rec_count(rec),
3615 rec->flags & FTRACE_FL_REGS ? " R" : " ",
3616 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ",
3617 rec->flags & FTRACE_FL_DIRECT ? " D" : " ");
3618 if (rec->flags & FTRACE_FL_TRAMP_EN) {
3619 ops = ftrace_find_tramp_ops_any(rec);
3620 if (ops) {
3621 do {
3622 seq_printf(m, "\ttramp: %pS (%pS)",
3623 (void *)ops->trampoline,
3624 (void *)ops->func);
3625 add_trampoline_func(m, ops, rec);
3626 ops = ftrace_find_tramp_ops_next(rec, ops);
3627 } while (ops);
3628 } else
3629 seq_puts(m, "\ttramp: ERROR!");
3630 } else {
3631 add_trampoline_func(m, NULL, rec);
3632 }
3633 if (rec->flags & FTRACE_FL_DIRECT) {
3634 unsigned long direct;
3635
3636 direct = ftrace_find_rec_direct(rec->ip);
3637 if (direct)
3638 seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
3639 }
3640 }
3641
3642 seq_putc(m, '\n');
3643
3644 return 0;
3645}
3646
3647static const struct seq_operations show_ftrace_seq_ops = {
3648 .start = t_start,
3649 .next = t_next,
3650 .stop = t_stop,
3651 .show = t_show,
3652};
3653
3654static int
3655ftrace_avail_open(struct inode *inode, struct file *file)
3656{
3657 struct ftrace_iterator *iter;
3658
3659 if (unlikely(ftrace_disabled))
3660 return -ENODEV;
3661
3662 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3663 if (!iter)
3664 return -ENOMEM;
3665
3666 iter->pg = ftrace_pages_start;
3667 iter->ops = &global_ops;
3668
3669 return 0;
3670}
3671
3672static int
3673ftrace_enabled_open(struct inode *inode, struct file *file)
3674{
3675 struct ftrace_iterator *iter;
3676
3677 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3678 if (!iter)
3679 return -ENOMEM;
3680
3681 iter->pg = ftrace_pages_start;
3682 iter->flags = FTRACE_ITER_ENABLED;
3683 iter->ops = &global_ops;
3684
3685 return 0;
3686}
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704int
3705ftrace_regex_open(struct ftrace_ops *ops, int flag,
3706 struct inode *inode, struct file *file)
3707{
3708 struct ftrace_iterator *iter;
3709 struct ftrace_hash *hash;
3710 struct list_head *mod_head;
3711 struct trace_array *tr = ops->private;
3712 int ret = 0;
3713
3714 ftrace_ops_init(ops);
3715
3716 if (unlikely(ftrace_disabled))
3717 return -ENODEV;
3718
3719 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3720 if (!iter)
3721 return -ENOMEM;
3722
3723 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3724 kfree(iter);
3725 return -ENOMEM;
3726 }
3727
3728 iter->ops = ops;
3729 iter->flags = flag;
3730 iter->tr = tr;
3731
3732 mutex_lock(&ops->func_hash->regex_lock);
3733
3734 if (flag & FTRACE_ITER_NOTRACE) {
3735 hash = ops->func_hash->notrace_hash;
3736 mod_head = tr ? &tr->mod_notrace : NULL;
3737 } else {
3738 hash = ops->func_hash->filter_hash;
3739 mod_head = tr ? &tr->mod_trace : NULL;
3740 }
3741
3742 iter->mod_list = mod_head;
3743
3744 if (file->f_mode & FMODE_WRITE) {
3745 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3746
3747 if (file->f_flags & O_TRUNC) {
3748 iter->hash = alloc_ftrace_hash(size_bits);
3749 clear_ftrace_mod_list(mod_head);
3750 } else {
3751 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3752 }
3753
3754 if (!iter->hash) {
3755 trace_parser_put(&iter->parser);
3756 kfree(iter);
3757 ret = -ENOMEM;
3758 goto out_unlock;
3759 }
3760 } else
3761 iter->hash = hash;
3762
3763 if (file->f_mode & FMODE_READ) {
3764 iter->pg = ftrace_pages_start;
3765
3766 ret = seq_open(file, &show_ftrace_seq_ops);
3767 if (!ret) {
3768 struct seq_file *m = file->private_data;
3769 m->private = iter;
3770 } else {
3771
3772 free_ftrace_hash(iter->hash);
3773 trace_parser_put(&iter->parser);
3774 kfree(iter);
3775 }
3776 } else
3777 file->private_data = iter;
3778
3779 out_unlock:
3780 mutex_unlock(&ops->func_hash->regex_lock);
3781
3782 return ret;
3783}
3784
3785static int
3786ftrace_filter_open(struct inode *inode, struct file *file)
3787{
3788 struct ftrace_ops *ops = inode->i_private;
3789
3790 return ftrace_regex_open(ops,
3791 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3792 inode, file);
3793}
3794
3795static int
3796ftrace_notrace_open(struct inode *inode, struct file *file)
3797{
3798 struct ftrace_ops *ops = inode->i_private;
3799
3800 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3801 inode, file);
3802}
3803
3804
3805struct ftrace_glob {
3806 char *search;
3807 unsigned len;
3808 int type;
3809};
3810
3811
3812
3813
3814
3815
3816char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3817{
3818 return str;
3819}
3820
3821static int ftrace_match(char *str, struct ftrace_glob *g)
3822{
3823 int matched = 0;
3824 int slen;
3825
3826 str = arch_ftrace_match_adjust(str, g->search);
3827
3828 switch (g->type) {
3829 case MATCH_FULL:
3830 if (strcmp(str, g->search) == 0)
3831 matched = 1;
3832 break;
3833 case MATCH_FRONT_ONLY:
3834 if (strncmp(str, g->search, g->len) == 0)
3835 matched = 1;
3836 break;
3837 case MATCH_MIDDLE_ONLY:
3838 if (strstr(str, g->search))
3839 matched = 1;
3840 break;
3841 case MATCH_END_ONLY:
3842 slen = strlen(str);
3843 if (slen >= g->len &&
3844 memcmp(str + slen - g->len, g->search, g->len) == 0)
3845 matched = 1;
3846 break;
3847 case MATCH_GLOB:
3848 if (glob_match(g->search, str))
3849 matched = 1;
3850 break;
3851 }
3852
3853 return matched;
3854}
3855
3856static int
3857enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
3858{
3859 struct ftrace_func_entry *entry;
3860 int ret = 0;
3861
3862 entry = ftrace_lookup_ip(hash, rec->ip);
3863 if (clear_filter) {
3864
3865 if (!entry)
3866 return 0;
3867
3868 free_hash_entry(hash, entry);
3869 } else {
3870
3871 if (entry)
3872 return 0;
3873
3874 ret = add_hash_entry(hash, rec->ip);
3875 }
3876 return ret;
3877}
3878
3879static int
3880ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3881 struct ftrace_glob *mod_g, int exclude_mod)
3882{
3883 char str[KSYM_SYMBOL_LEN];
3884 char *modname;
3885
3886 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3887
3888 if (mod_g) {
3889 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
3890
3891
3892 if (!mod_g->len) {
3893
3894 if (!exclude_mod != !modname)
3895 goto func_match;
3896 return 0;
3897 }
3898
3899
3900
3901
3902
3903
3904
3905
3906 if (!mod_matches == !exclude_mod)
3907 return 0;
3908func_match:
3909
3910 if (!func_g->len)
3911 return 1;
3912 }
3913
3914 return ftrace_match(str, func_g);
3915}
3916
3917static int
3918match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
3919{
3920 struct ftrace_page *pg;
3921 struct dyn_ftrace *rec;
3922 struct ftrace_glob func_g = { .type = MATCH_FULL };
3923 struct ftrace_glob mod_g = { .type = MATCH_FULL };
3924 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
3925 int exclude_mod = 0;
3926 int found = 0;
3927 int ret;
3928 int clear_filter = 0;
3929
3930 if (func) {
3931 func_g.type = filter_parse_regex(func, len, &func_g.search,
3932 &clear_filter);
3933 func_g.len = strlen(func_g.search);
3934 }
3935
3936 if (mod) {
3937 mod_g.type = filter_parse_regex(mod, strlen(mod),
3938 &mod_g.search, &exclude_mod);
3939 mod_g.len = strlen(mod_g.search);
3940 }
3941
3942 mutex_lock(&ftrace_lock);
3943
3944 if (unlikely(ftrace_disabled))
3945 goto out_unlock;
3946
3947 do_for_each_ftrace_rec(pg, rec) {
3948
3949 if (rec->flags & FTRACE_FL_DISABLED)
3950 continue;
3951
3952 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
3953 ret = enter_record(hash, rec, clear_filter);
3954 if (ret < 0) {
3955 found = ret;
3956 goto out_unlock;
3957 }
3958 found = 1;
3959 }
3960 } while_for_each_ftrace_rec();
3961 out_unlock:
3962 mutex_unlock(&ftrace_lock);
3963
3964 return found;
3965}
3966
3967static int
3968ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3969{
3970 return match_records(hash, buff, len, NULL);
3971}
3972
3973static void ftrace_ops_update_code(struct ftrace_ops *ops,
3974 struct ftrace_ops_hash *old_hash)
3975{
3976 struct ftrace_ops *op;
3977
3978 if (!ftrace_enabled)
3979 return;
3980
3981 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
3982 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3983 return;
3984 }
3985
3986
3987
3988
3989
3990
3991 if (ops->func_hash != &global_ops.local_hash)
3992 return;
3993
3994 do_for_each_ftrace_op(op, ftrace_ops_list) {
3995 if (op->func_hash == &global_ops.local_hash &&
3996 op->flags & FTRACE_OPS_FL_ENABLED) {
3997 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
3998
3999 return;
4000 }
4001 } while_for_each_ftrace_op(op);
4002}
4003
4004static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4005 struct ftrace_hash **orig_hash,
4006 struct ftrace_hash *hash,
4007 int enable)
4008{
4009 struct ftrace_ops_hash old_hash_ops;
4010 struct ftrace_hash *old_hash;
4011 int ret;
4012
4013 old_hash = *orig_hash;
4014 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4015 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4016 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4017 if (!ret) {
4018 ftrace_ops_update_code(ops, &old_hash_ops);
4019 free_ftrace_hash_rcu(old_hash);
4020 }
4021 return ret;
4022}
4023
4024static bool module_exists(const char *module)
4025{
4026
4027 const char this_mod[] = "__this_module";
4028 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
4029 unsigned long val;
4030 int n;
4031
4032 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
4033
4034 if (n > sizeof(modname) - 1)
4035 return false;
4036
4037 val = module_kallsyms_lookup_name(modname);
4038 return val != 0;
4039}
4040
4041static int cache_mod(struct trace_array *tr,
4042 const char *func, char *module, int enable)
4043{
4044 struct ftrace_mod_load *ftrace_mod, *n;
4045 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
4046 int ret;
4047
4048 mutex_lock(&ftrace_lock);
4049
4050
4051 if (func[0] == '!') {
4052 func++;
4053 ret = -EINVAL;
4054
4055
4056 list_for_each_entry_safe(ftrace_mod, n, head, list) {
4057 if (strcmp(ftrace_mod->module, module) != 0)
4058 continue;
4059
4060
4061 if (strcmp(func, "*") == 0 ||
4062 (ftrace_mod->func &&
4063 strcmp(ftrace_mod->func, func) == 0)) {
4064 ret = 0;
4065 free_ftrace_mod(ftrace_mod);
4066 continue;
4067 }
4068 }
4069 goto out;
4070 }
4071
4072 ret = -EINVAL;
4073
4074 if (module_exists(module))
4075 goto out;
4076
4077
4078 ret = ftrace_add_mod(tr, func, module, enable);
4079 out:
4080 mutex_unlock(&ftrace_lock);
4081
4082 return ret;
4083}
4084
4085static int
4086ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4087 int reset, int enable);
4088
4089#ifdef CONFIG_MODULES
4090static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4091 char *mod, bool enable)
4092{
4093 struct ftrace_mod_load *ftrace_mod, *n;
4094 struct ftrace_hash **orig_hash, *new_hash;
4095 LIST_HEAD(process_mods);
4096 char *func;
4097 int ret;
4098
4099 mutex_lock(&ops->func_hash->regex_lock);
4100
4101 if (enable)
4102 orig_hash = &ops->func_hash->filter_hash;
4103 else
4104 orig_hash = &ops->func_hash->notrace_hash;
4105
4106 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4107 *orig_hash);
4108 if (!new_hash)
4109 goto out;
4110
4111 mutex_lock(&ftrace_lock);
4112
4113 list_for_each_entry_safe(ftrace_mod, n, head, list) {
4114
4115 if (strcmp(ftrace_mod->module, mod) != 0)
4116 continue;
4117
4118 if (ftrace_mod->func)
4119 func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4120 else
4121 func = kstrdup("*", GFP_KERNEL);
4122
4123 if (!func)
4124 continue;
4125
4126 list_del(&ftrace_mod->list);
4127 list_add(&ftrace_mod->list, &process_mods);
4128
4129
4130 kfree(ftrace_mod->func);
4131 ftrace_mod->func = func;
4132 }
4133
4134 mutex_unlock(&ftrace_lock);
4135
4136 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4137
4138 func = ftrace_mod->func;
4139
4140
4141 match_records(new_hash, func, strlen(func), mod);
4142 free_ftrace_mod(ftrace_mod);
4143 }
4144
4145 if (enable && list_empty(head))
4146 new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4147
4148 mutex_lock(&ftrace_lock);
4149
4150 ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
4151 new_hash, enable);
4152 mutex_unlock(&ftrace_lock);
4153
4154 out:
4155 mutex_unlock(&ops->func_hash->regex_lock);
4156
4157 free_ftrace_hash(new_hash);
4158}
4159
4160static void process_cached_mods(const char *mod_name)
4161{
4162 struct trace_array *tr;
4163 char *mod;
4164
4165 mod = kstrdup(mod_name, GFP_KERNEL);
4166 if (!mod)
4167 return;
4168
4169 mutex_lock(&trace_types_lock);
4170 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4171 if (!list_empty(&tr->mod_trace))
4172 process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4173 if (!list_empty(&tr->mod_notrace))
4174 process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4175 }
4176 mutex_unlock(&trace_types_lock);
4177
4178 kfree(mod);
4179}
4180#endif
4181
4182
4183
4184
4185
4186
4187static int
4188ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4189 char *func_orig, char *cmd, char *module, int enable)
4190{
4191 char *func;
4192 int ret;
4193
4194
4195 func = kstrdup(func_orig, GFP_KERNEL);
4196 if (!func)
4197 return -ENOMEM;
4198
4199
4200
4201
4202
4203
4204
4205
4206 ret = match_records(hash, func, strlen(func), module);
4207 kfree(func);
4208
4209 if (!ret)
4210 return cache_mod(tr, func_orig, module, enable);
4211 if (ret < 0)
4212 return ret;
4213 return 0;
4214}
4215
4216static struct ftrace_func_command ftrace_mod_cmd = {
4217 .name = "mod",
4218 .func = ftrace_mod_callback,
4219};
4220
4221static int __init ftrace_mod_cmd_init(void)
4222{
4223 return register_ftrace_command(&ftrace_mod_cmd);
4224}
4225core_initcall(ftrace_mod_cmd_init);
4226
4227static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4228 struct ftrace_ops *op, struct pt_regs *pt_regs)
4229{
4230 struct ftrace_probe_ops *probe_ops;
4231 struct ftrace_func_probe *probe;
4232
4233 probe = container_of(op, struct ftrace_func_probe, ops);
4234 probe_ops = probe->probe_ops;
4235
4236
4237
4238
4239
4240
4241 preempt_disable_notrace();
4242 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
4243 preempt_enable_notrace();
4244}
4245
4246struct ftrace_func_map {
4247 struct ftrace_func_entry entry;
4248 void *data;
4249};
4250
4251struct ftrace_func_mapper {
4252 struct ftrace_hash hash;
4253};
4254
4255
4256
4257
4258
4259
4260struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
4261{
4262 struct ftrace_hash *hash;
4263
4264
4265
4266
4267
4268
4269 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4270 return (struct ftrace_func_mapper *)hash;
4271}
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4285 unsigned long ip)
4286{
4287 struct ftrace_func_entry *entry;
4288 struct ftrace_func_map *map;
4289
4290 entry = ftrace_lookup_ip(&mapper->hash, ip);
4291 if (!entry)
4292 return NULL;
4293
4294 map = (struct ftrace_func_map *)entry;
4295 return &map->data;
4296}
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4307 unsigned long ip, void *data)
4308{
4309 struct ftrace_func_entry *entry;
4310 struct ftrace_func_map *map;
4311
4312 entry = ftrace_lookup_ip(&mapper->hash, ip);
4313 if (entry)
4314 return -EBUSY;
4315
4316 map = kmalloc(sizeof(*map), GFP_KERNEL);
4317 if (!map)
4318 return -ENOMEM;
4319
4320 map->entry.ip = ip;
4321 map->data = data;
4322
4323 __add_hash_entry(&mapper->hash, &map->entry);
4324
4325 return 0;
4326}
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4339 unsigned long ip)
4340{
4341 struct ftrace_func_entry *entry;
4342 struct ftrace_func_map *map;
4343 void *data;
4344
4345 entry = ftrace_lookup_ip(&mapper->hash, ip);
4346 if (!entry)
4347 return NULL;
4348
4349 map = (struct ftrace_func_map *)entry;
4350 data = map->data;
4351
4352 remove_hash_entry(&mapper->hash, entry);
4353 kfree(entry);
4354
4355 return data;
4356}
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4367 ftrace_mapper_func free_func)
4368{
4369 struct ftrace_func_entry *entry;
4370 struct ftrace_func_map *map;
4371 struct hlist_head *hhd;
4372 int size = 1 << mapper->hash.size_bits;
4373 int i;
4374
4375 if (free_func && mapper->hash.count) {
4376 for (i = 0; i < size; i++) {
4377 hhd = &mapper->hash.buckets[i];
4378 hlist_for_each_entry(entry, hhd, hlist) {
4379 map = (struct ftrace_func_map *)entry;
4380 free_func(map);
4381 }
4382 }
4383 }
4384 free_ftrace_hash(&mapper->hash);
4385}
4386
4387static void release_probe(struct ftrace_func_probe *probe)
4388{
4389 struct ftrace_probe_ops *probe_ops;
4390
4391 mutex_lock(&ftrace_lock);
4392
4393 WARN_ON(probe->ref <= 0);
4394
4395
4396 probe->ref--;
4397
4398 if (!probe->ref) {
4399 probe_ops = probe->probe_ops;
4400
4401
4402
4403
4404 if (probe_ops->free)
4405 probe_ops->free(probe_ops, probe->tr, 0, probe->data);
4406 list_del(&probe->list);
4407 kfree(probe);
4408 }
4409 mutex_unlock(&ftrace_lock);
4410}
4411
4412static void acquire_probe_locked(struct ftrace_func_probe *probe)
4413{
4414
4415
4416
4417
4418 probe->ref++;
4419}
4420
4421int
4422register_ftrace_function_probe(char *glob, struct trace_array *tr,
4423 struct ftrace_probe_ops *probe_ops,
4424 void *data)
4425{
4426 struct ftrace_func_entry *entry;
4427 struct ftrace_func_probe *probe;
4428 struct ftrace_hash **orig_hash;
4429 struct ftrace_hash *old_hash;
4430 struct ftrace_hash *hash;
4431 int count = 0;
4432 int size;
4433 int ret;
4434 int i;
4435
4436 if (WARN_ON(!tr))
4437 return -EINVAL;
4438
4439
4440 if (WARN_ON(glob[0] == '!'))
4441 return -EINVAL;
4442
4443
4444 mutex_lock(&ftrace_lock);
4445
4446 list_for_each_entry(probe, &tr->func_probes, list) {
4447 if (probe->probe_ops == probe_ops)
4448 break;
4449 }
4450 if (&probe->list == &tr->func_probes) {
4451 probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4452 if (!probe) {
4453 mutex_unlock(&ftrace_lock);
4454 return -ENOMEM;
4455 }
4456 probe->probe_ops = probe_ops;
4457 probe->ops.func = function_trace_probe_call;
4458 probe->tr = tr;
4459 ftrace_ops_init(&probe->ops);
4460 list_add(&probe->list, &tr->func_probes);
4461 }
4462
4463 acquire_probe_locked(probe);
4464
4465 mutex_unlock(&ftrace_lock);
4466
4467
4468
4469
4470
4471 mutex_lock(&probe->ops.func_hash->regex_lock);
4472
4473 orig_hash = &probe->ops.func_hash->filter_hash;
4474 old_hash = *orig_hash;
4475 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4476
4477 if (!hash) {
4478 ret = -ENOMEM;
4479 goto out;
4480 }
4481
4482 ret = ftrace_match_records(hash, glob, strlen(glob));
4483
4484
4485 if (!ret)
4486 ret = -EINVAL;
4487
4488 if (ret < 0)
4489 goto out;
4490
4491 size = 1 << hash->size_bits;
4492 for (i = 0; i < size; i++) {
4493 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4494 if (ftrace_lookup_ip(old_hash, entry->ip))
4495 continue;
4496
4497
4498
4499
4500
4501 if (probe_ops->init) {
4502 ret = probe_ops->init(probe_ops, tr,
4503 entry->ip, data,
4504 &probe->data);
4505 if (ret < 0) {
4506 if (probe_ops->free && count)
4507 probe_ops->free(probe_ops, tr,
4508 0, probe->data);
4509 probe->data = NULL;
4510 goto out;
4511 }
4512 }
4513 count++;
4514 }
4515 }
4516
4517 mutex_lock(&ftrace_lock);
4518
4519 if (!count) {
4520
4521 ret = -EINVAL;
4522 goto out_unlock;
4523 }
4524
4525 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4526 hash, 1);
4527 if (ret < 0)
4528 goto err_unlock;
4529
4530
4531 probe->ref += count;
4532
4533 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4534 ret = ftrace_startup(&probe->ops, 0);
4535
4536 out_unlock:
4537 mutex_unlock(&ftrace_lock);
4538
4539 if (!ret)
4540 ret = count;
4541 out:
4542 mutex_unlock(&probe->ops.func_hash->regex_lock);
4543 free_ftrace_hash(hash);
4544
4545 release_probe(probe);
4546
4547 return ret;
4548
4549 err_unlock:
4550 if (!probe_ops->free || !count)
4551 goto out_unlock;
4552
4553
4554 for (i = 0; i < size; i++) {
4555 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4556 if (ftrace_lookup_ip(old_hash, entry->ip))
4557 continue;
4558 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4559 }
4560 }
4561 goto out_unlock;
4562}
4563
4564int
4565unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4566 struct ftrace_probe_ops *probe_ops)
4567{
4568 struct ftrace_ops_hash old_hash_ops;
4569 struct ftrace_func_entry *entry;
4570 struct ftrace_func_probe *probe;
4571 struct ftrace_glob func_g;
4572 struct ftrace_hash **orig_hash;
4573 struct ftrace_hash *old_hash;
4574 struct ftrace_hash *hash = NULL;
4575 struct hlist_node *tmp;
4576 struct hlist_head hhd;
4577 char str[KSYM_SYMBOL_LEN];
4578 int count = 0;
4579 int i, ret = -ENODEV;
4580 int size;
4581
4582 if (!glob || !strlen(glob) || !strcmp(glob, "*"))
4583 func_g.search = NULL;
4584 else {
4585 int not;
4586
4587 func_g.type = filter_parse_regex(glob, strlen(glob),
4588 &func_g.search, ¬);
4589 func_g.len = strlen(func_g.search);
4590
4591
4592 if (WARN_ON(not))
4593 return -EINVAL;
4594 }
4595
4596 mutex_lock(&ftrace_lock);
4597
4598 list_for_each_entry(probe, &tr->func_probes, list) {
4599 if (probe->probe_ops == probe_ops)
4600 break;
4601 }
4602 if (&probe->list == &tr->func_probes)
4603 goto err_unlock_ftrace;
4604
4605 ret = -EINVAL;
4606 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4607 goto err_unlock_ftrace;
4608
4609 acquire_probe_locked(probe);
4610
4611 mutex_unlock(&ftrace_lock);
4612
4613 mutex_lock(&probe->ops.func_hash->regex_lock);
4614
4615 orig_hash = &probe->ops.func_hash->filter_hash;
4616 old_hash = *orig_hash;
4617
4618 if (ftrace_hash_empty(old_hash))
4619 goto out_unlock;
4620
4621 old_hash_ops.filter_hash = old_hash;
4622
4623 old_hash_ops.notrace_hash = NULL;
4624
4625 ret = -ENOMEM;
4626 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4627 if (!hash)
4628 goto out_unlock;
4629
4630 INIT_HLIST_HEAD(&hhd);
4631
4632 size = 1 << hash->size_bits;
4633 for (i = 0; i < size; i++) {
4634 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
4635
4636 if (func_g.search) {
4637 kallsyms_lookup(entry->ip, NULL, NULL,
4638 NULL, str);
4639 if (!ftrace_match(str, &func_g))
4640 continue;
4641 }
4642 count++;
4643 remove_hash_entry(hash, entry);
4644 hlist_add_head(&entry->hlist, &hhd);
4645 }
4646 }
4647
4648
4649 if (!count) {
4650 ret = -EINVAL;
4651 goto out_unlock;
4652 }
4653
4654 mutex_lock(&ftrace_lock);
4655
4656 WARN_ON(probe->ref < count);
4657
4658 probe->ref -= count;
4659
4660 if (ftrace_hash_empty(hash))
4661 ftrace_shutdown(&probe->ops, 0);
4662
4663 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4664 hash, 1);
4665
4666
4667 if (ftrace_enabled && !ftrace_hash_empty(hash))
4668 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4669 &old_hash_ops);
4670 synchronize_rcu();
4671
4672 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4673 hlist_del(&entry->hlist);
4674 if (probe_ops->free)
4675 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4676 kfree(entry);
4677 }
4678 mutex_unlock(&ftrace_lock);
4679
4680 out_unlock:
4681 mutex_unlock(&probe->ops.func_hash->regex_lock);
4682 free_ftrace_hash(hash);
4683
4684 release_probe(probe);
4685
4686 return ret;
4687
4688 err_unlock_ftrace:
4689 mutex_unlock(&ftrace_lock);
4690 return ret;
4691}
4692
4693void clear_ftrace_function_probes(struct trace_array *tr)
4694{
4695 struct ftrace_func_probe *probe, *n;
4696
4697 list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4698 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4699}
4700
4701static LIST_HEAD(ftrace_commands);
4702static DEFINE_MUTEX(ftrace_cmd_mutex);
4703
4704
4705
4706
4707
4708__init int register_ftrace_command(struct ftrace_func_command *cmd)
4709{
4710 struct ftrace_func_command *p;
4711 int ret = 0;
4712
4713 mutex_lock(&ftrace_cmd_mutex);
4714 list_for_each_entry(p, &ftrace_commands, list) {
4715 if (strcmp(cmd->name, p->name) == 0) {
4716 ret = -EBUSY;
4717 goto out_unlock;
4718 }
4719 }
4720 list_add(&cmd->list, &ftrace_commands);
4721 out_unlock:
4722 mutex_unlock(&ftrace_cmd_mutex);
4723
4724 return ret;
4725}
4726
4727
4728
4729
4730
4731__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4732{
4733 struct ftrace_func_command *p, *n;
4734 int ret = -ENODEV;
4735
4736 mutex_lock(&ftrace_cmd_mutex);
4737 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4738 if (strcmp(cmd->name, p->name) == 0) {
4739 ret = 0;
4740 list_del_init(&p->list);
4741 goto out_unlock;
4742 }
4743 }
4744 out_unlock:
4745 mutex_unlock(&ftrace_cmd_mutex);
4746
4747 return ret;
4748}
4749
4750static int ftrace_process_regex(struct ftrace_iterator *iter,
4751 char *buff, int len, int enable)
4752{
4753 struct ftrace_hash *hash = iter->hash;
4754 struct trace_array *tr = iter->ops->private;
4755 char *func, *command, *next = buff;
4756 struct ftrace_func_command *p;
4757 int ret = -EINVAL;
4758
4759 func = strsep(&next, ":");
4760
4761 if (!next) {
4762 ret = ftrace_match_records(hash, func, len);
4763 if (!ret)
4764 ret = -EINVAL;
4765 if (ret < 0)
4766 return ret;
4767 return 0;
4768 }
4769
4770
4771
4772 command = strsep(&next, ":");
4773
4774 mutex_lock(&ftrace_cmd_mutex);
4775 list_for_each_entry(p, &ftrace_commands, list) {
4776 if (strcmp(p->name, command) == 0) {
4777 ret = p->func(tr, hash, func, command, next, enable);
4778 goto out_unlock;
4779 }
4780 }
4781 out_unlock:
4782 mutex_unlock(&ftrace_cmd_mutex);
4783
4784 return ret;
4785}
4786
4787static ssize_t
4788ftrace_regex_write(struct file *file, const char __user *ubuf,
4789 size_t cnt, loff_t *ppos, int enable)
4790{
4791 struct ftrace_iterator *iter;
4792 struct trace_parser *parser;
4793 ssize_t ret, read;
4794
4795 if (!cnt)
4796 return 0;
4797
4798 if (file->f_mode & FMODE_READ) {
4799 struct seq_file *m = file->private_data;
4800 iter = m->private;
4801 } else
4802 iter = file->private_data;
4803
4804 if (unlikely(ftrace_disabled))
4805 return -ENODEV;
4806
4807
4808
4809 parser = &iter->parser;
4810 read = trace_get_user(parser, ubuf, cnt, ppos);
4811
4812 if (read >= 0 && trace_parser_loaded(parser) &&
4813 !trace_parser_cont(parser)) {
4814 ret = ftrace_process_regex(iter, parser->buffer,
4815 parser->idx, enable);
4816 trace_parser_clear(parser);
4817 if (ret < 0)
4818 goto out;
4819 }
4820
4821 ret = read;
4822 out:
4823 return ret;
4824}
4825
4826ssize_t
4827ftrace_filter_write(struct file *file, const char __user *ubuf,
4828 size_t cnt, loff_t *ppos)
4829{
4830 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4831}
4832
4833ssize_t
4834ftrace_notrace_write(struct file *file, const char __user *ubuf,
4835 size_t cnt, loff_t *ppos)
4836{
4837 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4838}
4839
4840static int
4841ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4842{
4843 struct ftrace_func_entry *entry;
4844
4845 if (!ftrace_location(ip))
4846 return -EINVAL;
4847
4848 if (remove) {
4849 entry = ftrace_lookup_ip(hash, ip);
4850 if (!entry)
4851 return -ENOENT;
4852 free_hash_entry(hash, entry);
4853 return 0;
4854 }
4855
4856 return add_hash_entry(hash, ip);
4857}
4858
4859static int
4860ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4861 unsigned long ip, int remove, int reset, int enable)
4862{
4863 struct ftrace_hash **orig_hash;
4864 struct ftrace_hash *hash;
4865 int ret;
4866
4867 if (unlikely(ftrace_disabled))
4868 return -ENODEV;
4869
4870 mutex_lock(&ops->func_hash->regex_lock);
4871
4872 if (enable)
4873 orig_hash = &ops->func_hash->filter_hash;
4874 else
4875 orig_hash = &ops->func_hash->notrace_hash;
4876
4877 if (reset)
4878 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4879 else
4880 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4881
4882 if (!hash) {
4883 ret = -ENOMEM;
4884 goto out_regex_unlock;
4885 }
4886
4887 if (buf && !ftrace_match_records(hash, buf, len)) {
4888 ret = -EINVAL;
4889 goto out_regex_unlock;
4890 }
4891 if (ip) {
4892 ret = ftrace_match_addr(hash, ip, remove);
4893 if (ret < 0)
4894 goto out_regex_unlock;
4895 }
4896
4897 mutex_lock(&ftrace_lock);
4898 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
4899 mutex_unlock(&ftrace_lock);
4900
4901 out_regex_unlock:
4902 mutex_unlock(&ops->func_hash->regex_lock);
4903
4904 free_ftrace_hash(hash);
4905 return ret;
4906}
4907
4908static int
4909ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4910 int reset, int enable)
4911{
4912 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
4913}
4914
4915#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
4916
4917struct ftrace_direct_func {
4918 struct list_head next;
4919 unsigned long addr;
4920 int count;
4921};
4922
4923static LIST_HEAD(ftrace_direct_funcs);
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
4939{
4940 struct ftrace_direct_func *entry;
4941 bool found = false;
4942
4943
4944 list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) {
4945 if (entry->addr == addr) {
4946 found = true;
4947 break;
4948 }
4949 }
4950 if (found)
4951 return entry;
4952
4953 return NULL;
4954}
4955
4956static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
4957{
4958 struct ftrace_direct_func *direct;
4959
4960 direct = kmalloc(sizeof(*direct), GFP_KERNEL);
4961 if (!direct)
4962 return NULL;
4963 direct->addr = addr;
4964 direct->count = 0;
4965 list_add_rcu(&direct->next, &ftrace_direct_funcs);
4966 ftrace_direct_func_count++;
4967 return direct;
4968}
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987int register_ftrace_direct(unsigned long ip, unsigned long addr)
4988{
4989 struct ftrace_direct_func *direct;
4990 struct ftrace_func_entry *entry;
4991 struct ftrace_hash *free_hash = NULL;
4992 struct dyn_ftrace *rec;
4993 int ret = -EBUSY;
4994
4995 mutex_lock(&direct_mutex);
4996
4997
4998 if (ftrace_find_rec_direct(ip))
4999 goto out_unlock;
5000
5001 ret = -ENODEV;
5002 rec = lookup_rec(ip, ip);
5003 if (!rec)
5004 goto out_unlock;
5005
5006
5007
5008
5009
5010 if (WARN_ON(rec->flags & FTRACE_FL_DIRECT))
5011 goto out_unlock;
5012
5013
5014 if (ip != rec->ip) {
5015 ip = rec->ip;
5016
5017 if (ftrace_find_rec_direct(ip))
5018 goto out_unlock;
5019 }
5020
5021 ret = -ENOMEM;
5022 if (ftrace_hash_empty(direct_functions) ||
5023 direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
5024 struct ftrace_hash *new_hash;
5025 int size = ftrace_hash_empty(direct_functions) ? 0 :
5026 direct_functions->count + 1;
5027
5028 if (size < 32)
5029 size = 32;
5030
5031 new_hash = dup_hash(direct_functions, size);
5032 if (!new_hash)
5033 goto out_unlock;
5034
5035 free_hash = direct_functions;
5036 direct_functions = new_hash;
5037 }
5038
5039 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5040 if (!entry)
5041 goto out_unlock;
5042
5043 direct = ftrace_find_direct_func(addr);
5044 if (!direct) {
5045 direct = ftrace_alloc_direct_func(addr);
5046 if (!direct) {
5047 kfree(entry);
5048 goto out_unlock;
5049 }
5050 }
5051
5052 entry->ip = ip;
5053 entry->direct = addr;
5054 __add_hash_entry(direct_functions, entry);
5055
5056 ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
5057 if (ret)
5058 remove_hash_entry(direct_functions, entry);
5059
5060 if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
5061 ret = register_ftrace_function(&direct_ops);
5062 if (ret)
5063 ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5064 }
5065
5066 if (ret) {
5067 kfree(entry);
5068 if (!direct->count) {
5069 list_del_rcu(&direct->next);
5070 synchronize_rcu_tasks();
5071 kfree(direct);
5072 if (free_hash)
5073 free_ftrace_hash(free_hash);
5074 free_hash = NULL;
5075 ftrace_direct_func_count--;
5076 }
5077 } else {
5078 direct->count++;
5079 }
5080 out_unlock:
5081 mutex_unlock(&direct_mutex);
5082
5083 if (free_hash) {
5084 synchronize_rcu_tasks();
5085 free_ftrace_hash(free_hash);
5086 }
5087
5088 return ret;
5089}
5090EXPORT_SYMBOL_GPL(register_ftrace_direct);
5091
5092static struct ftrace_func_entry *find_direct_entry(unsigned long *ip,
5093 struct dyn_ftrace **recp)
5094{
5095 struct ftrace_func_entry *entry;
5096 struct dyn_ftrace *rec;
5097
5098 rec = lookup_rec(*ip, *ip);
5099 if (!rec)
5100 return NULL;
5101
5102 entry = __ftrace_lookup_ip(direct_functions, rec->ip);
5103 if (!entry) {
5104 WARN_ON(rec->flags & FTRACE_FL_DIRECT);
5105 return NULL;
5106 }
5107
5108 WARN_ON(!(rec->flags & FTRACE_FL_DIRECT));
5109
5110
5111 *ip = rec->ip;
5112
5113 if (recp)
5114 *recp = rec;
5115
5116 return entry;
5117}
5118
5119int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
5120{
5121 struct ftrace_direct_func *direct;
5122 struct ftrace_func_entry *entry;
5123 int ret = -ENODEV;
5124
5125 mutex_lock(&direct_mutex);
5126
5127 entry = find_direct_entry(&ip, NULL);
5128 if (!entry)
5129 goto out_unlock;
5130
5131 if (direct_functions->count == 1)
5132 unregister_ftrace_function(&direct_ops);
5133
5134 ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5135
5136 WARN_ON(ret);
5137
5138 remove_hash_entry(direct_functions, entry);
5139
5140 direct = ftrace_find_direct_func(addr);
5141 if (!WARN_ON(!direct)) {
5142
5143 direct->count--;
5144 WARN_ON(direct->count < 0);
5145 if (!direct->count) {
5146 list_del_rcu(&direct->next);
5147 synchronize_rcu_tasks();
5148 kfree(direct);
5149 kfree(entry);
5150 ftrace_direct_func_count--;
5151 }
5152 }
5153 out_unlock:
5154 mutex_unlock(&direct_mutex);
5155
5156 return ret;
5157}
5158EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
5159
5160static struct ftrace_ops stub_ops = {
5161 .func = ftrace_stub,
5162};
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
5183 struct dyn_ftrace *rec,
5184 unsigned long old_addr,
5185 unsigned long new_addr)
5186{
5187 unsigned long ip = rec->ip;
5188 int ret;
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200 mutex_unlock(&ftrace_lock);
5201
5202
5203
5204
5205
5206
5207
5208 ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0);
5209 if (ret)
5210 goto out_lock;
5211
5212 ret = register_ftrace_function(&stub_ops);
5213 if (ret) {
5214 ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5215 goto out_lock;
5216 }
5217
5218 entry->direct = new_addr;
5219
5220
5221
5222
5223
5224 unregister_ftrace_function(&stub_ops);
5225 ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5226
5227 out_lock:
5228 mutex_lock(&ftrace_lock);
5229
5230 return ret;
5231}
5232
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247int modify_ftrace_direct(unsigned long ip,
5248 unsigned long old_addr, unsigned long new_addr)
5249{
5250 struct ftrace_direct_func *direct, *new_direct = NULL;
5251 struct ftrace_func_entry *entry;
5252 struct dyn_ftrace *rec;
5253 int ret = -ENODEV;
5254
5255 mutex_lock(&direct_mutex);
5256
5257 mutex_lock(&ftrace_lock);
5258 entry = find_direct_entry(&ip, &rec);
5259 if (!entry)
5260 goto out_unlock;
5261
5262 ret = -EINVAL;
5263 if (entry->direct != old_addr)
5264 goto out_unlock;
5265
5266 direct = ftrace_find_direct_func(old_addr);
5267 if (WARN_ON(!direct))
5268 goto out_unlock;
5269 if (direct->count > 1) {
5270 ret = -ENOMEM;
5271 new_direct = ftrace_alloc_direct_func(new_addr);
5272 if (!new_direct)
5273 goto out_unlock;
5274 direct->count--;
5275 new_direct->count++;
5276 } else {
5277 direct->addr = new_addr;
5278 }
5279
5280
5281
5282
5283
5284
5285
5286 if (ftrace_rec_count(rec) == 1) {
5287 ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr);
5288 } else {
5289 entry->direct = new_addr;
5290 ret = 0;
5291 }
5292
5293 if (unlikely(ret && new_direct)) {
5294 direct->count++;
5295 list_del_rcu(&new_direct->next);
5296 synchronize_rcu_tasks();
5297 kfree(new_direct);
5298 ftrace_direct_func_count--;
5299 }
5300
5301 out_unlock:
5302 mutex_unlock(&ftrace_lock);
5303 mutex_unlock(&direct_mutex);
5304 return ret;
5305}
5306EXPORT_SYMBOL_GPL(modify_ftrace_direct);
5307#endif
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
5320 int remove, int reset)
5321{
5322 ftrace_ops_init(ops);
5323 return ftrace_set_addr(ops, ip, remove, reset, 1);
5324}
5325EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
5326
5327
5328
5329
5330
5331
5332
5333
5334void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
5335{
5336 if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
5337 return;
5338
5339 ftrace_ops_init(ops);
5340 ops->func_hash = &global_ops.local_hash;
5341}
5342EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
5343
5344static int
5345ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
5346 int reset, int enable)
5347{
5348 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
5349}
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
5362 int len, int reset)
5363{
5364 ftrace_ops_init(ops);
5365 return ftrace_set_regex(ops, buf, len, reset, 1);
5366}
5367EXPORT_SYMBOL_GPL(ftrace_set_filter);
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
5381 int len, int reset)
5382{
5383 ftrace_ops_init(ops);
5384 return ftrace_set_regex(ops, buf, len, reset, 0);
5385}
5386EXPORT_SYMBOL_GPL(ftrace_set_notrace);
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
5397{
5398 ftrace_set_regex(&global_ops, buf, len, reset, 1);
5399}
5400EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
5413{
5414 ftrace_set_regex(&global_ops, buf, len, reset, 0);
5415}
5416EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
5417
5418
5419
5420
5421#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
5422static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5423static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
5424
5425
5426bool ftrace_filter_param __initdata;
5427
5428static int __init set_ftrace_notrace(char *str)
5429{
5430 ftrace_filter_param = true;
5431 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
5432 return 1;
5433}
5434__setup("ftrace_notrace=", set_ftrace_notrace);
5435
5436static int __init set_ftrace_filter(char *str)
5437{
5438 ftrace_filter_param = true;
5439 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
5440 return 1;
5441}
5442__setup("ftrace_filter=", set_ftrace_filter);
5443
5444#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5445static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
5446static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5447static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
5448
5449static int __init set_graph_function(char *str)
5450{
5451 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
5452 return 1;
5453}
5454__setup("ftrace_graph_filter=", set_graph_function);
5455
5456static int __init set_graph_notrace_function(char *str)
5457{
5458 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
5459 return 1;
5460}
5461__setup("ftrace_graph_notrace=", set_graph_notrace_function);
5462
5463static int __init set_graph_max_depth_function(char *str)
5464{
5465 if (!str)
5466 return 0;
5467 fgraph_max_depth = simple_strtoul(str, NULL, 0);
5468 return 1;
5469}
5470__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
5471
5472static void __init set_ftrace_early_graph(char *buf, int enable)
5473{
5474 int ret;
5475 char *func;
5476 struct ftrace_hash *hash;
5477
5478 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5479 if (WARN_ON(!hash))
5480 return;
5481
5482 while (buf) {
5483 func = strsep(&buf, ",");
5484
5485 ret = ftrace_graph_set_hash(hash, func);
5486 if (ret)
5487 printk(KERN_DEBUG "ftrace: function %s not "
5488 "traceable\n", func);
5489 }
5490
5491 if (enable)
5492 ftrace_graph_hash = hash;
5493 else
5494 ftrace_graph_notrace_hash = hash;
5495}
5496#endif
5497
5498void __init
5499ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
5500{
5501 char *func;
5502
5503 ftrace_ops_init(ops);
5504
5505 while (buf) {
5506 func = strsep(&buf, ",");
5507 ftrace_set_regex(ops, func, strlen(func), 0, enable);
5508 }
5509}
5510
5511static void __init set_ftrace_early_filters(void)
5512{
5513 if (ftrace_filter_buf[0])
5514 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
5515 if (ftrace_notrace_buf[0])
5516 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
5517#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5518 if (ftrace_graph_buf[0])
5519 set_ftrace_early_graph(ftrace_graph_buf, 1);
5520 if (ftrace_graph_notrace_buf[0])
5521 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
5522#endif
5523}
5524
5525int ftrace_regex_release(struct inode *inode, struct file *file)
5526{
5527 struct seq_file *m = (struct seq_file *)file->private_data;
5528 struct ftrace_iterator *iter;
5529 struct ftrace_hash **orig_hash;
5530 struct trace_parser *parser;
5531 int filter_hash;
5532 int ret;
5533
5534 if (file->f_mode & FMODE_READ) {
5535 iter = m->private;
5536 seq_release(inode, file);
5537 } else
5538 iter = file->private_data;
5539
5540 parser = &iter->parser;
5541 if (trace_parser_loaded(parser)) {
5542 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
5543 }
5544
5545 trace_parser_put(parser);
5546
5547 mutex_lock(&iter->ops->func_hash->regex_lock);
5548
5549 if (file->f_mode & FMODE_WRITE) {
5550 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
5551
5552 if (filter_hash) {
5553 orig_hash = &iter->ops->func_hash->filter_hash;
5554 if (iter->tr && !list_empty(&iter->tr->mod_trace))
5555 iter->hash->flags |= FTRACE_HASH_FL_MOD;
5556 } else
5557 orig_hash = &iter->ops->func_hash->notrace_hash;
5558
5559 mutex_lock(&ftrace_lock);
5560 ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
5561 iter->hash, filter_hash);
5562 mutex_unlock(&ftrace_lock);
5563 } else {
5564
5565 iter->hash = NULL;
5566 }
5567
5568 mutex_unlock(&iter->ops->func_hash->regex_lock);
5569 free_ftrace_hash(iter->hash);
5570 kfree(iter);
5571
5572 return 0;
5573}
5574
5575static const struct file_operations ftrace_avail_fops = {
5576 .open = ftrace_avail_open,
5577 .read = seq_read,
5578 .llseek = seq_lseek,
5579 .release = seq_release_private,
5580};
5581
5582static const struct file_operations ftrace_enabled_fops = {
5583 .open = ftrace_enabled_open,
5584 .read = seq_read,
5585 .llseek = seq_lseek,
5586 .release = seq_release_private,
5587};
5588
5589static const struct file_operations ftrace_filter_fops = {
5590 .open = ftrace_filter_open,
5591 .read = seq_read,
5592 .write = ftrace_filter_write,
5593 .llseek = tracing_lseek,
5594 .release = ftrace_regex_release,
5595};
5596
5597static const struct file_operations ftrace_notrace_fops = {
5598 .open = ftrace_notrace_open,
5599 .read = seq_read,
5600 .write = ftrace_notrace_write,
5601 .llseek = tracing_lseek,
5602 .release = ftrace_regex_release,
5603};
5604
5605#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5606
5607static DEFINE_MUTEX(graph_lock);
5608
5609struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
5610struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
5611
5612enum graph_filter_type {
5613 GRAPH_FILTER_NOTRACE = 0,
5614 GRAPH_FILTER_FUNCTION,
5615};
5616
5617#define FTRACE_GRAPH_EMPTY ((void *)1)
5618
5619struct ftrace_graph_data {
5620 struct ftrace_hash *hash;
5621 struct ftrace_func_entry *entry;
5622 int idx;
5623 enum graph_filter_type type;
5624 struct ftrace_hash *new_hash;
5625 const struct seq_operations *seq_ops;
5626 struct trace_parser parser;
5627};
5628
5629static void *
5630__g_next(struct seq_file *m, loff_t *pos)
5631{
5632 struct ftrace_graph_data *fgd = m->private;
5633 struct ftrace_func_entry *entry = fgd->entry;
5634 struct hlist_head *head;
5635 int i, idx = fgd->idx;
5636
5637 if (*pos >= fgd->hash->count)
5638 return NULL;
5639
5640 if (entry) {
5641 hlist_for_each_entry_continue(entry, hlist) {
5642 fgd->entry = entry;
5643 return entry;
5644 }
5645
5646 idx++;
5647 }
5648
5649 for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
5650 head = &fgd->hash->buckets[i];
5651 hlist_for_each_entry(entry, head, hlist) {
5652 fgd->entry = entry;
5653 fgd->idx = i;
5654 return entry;
5655 }
5656 }
5657 return NULL;
5658}
5659
5660static void *
5661g_next(struct seq_file *m, void *v, loff_t *pos)
5662{
5663 (*pos)++;
5664 return __g_next(m, pos);
5665}
5666
5667static void *g_start(struct seq_file *m, loff_t *pos)
5668{
5669 struct ftrace_graph_data *fgd = m->private;
5670
5671 mutex_lock(&graph_lock);
5672
5673 if (fgd->type == GRAPH_FILTER_FUNCTION)
5674 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5675 lockdep_is_held(&graph_lock));
5676 else
5677 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5678 lockdep_is_held(&graph_lock));
5679
5680
5681 if (ftrace_hash_empty(fgd->hash) && !*pos)
5682 return FTRACE_GRAPH_EMPTY;
5683
5684 fgd->idx = 0;
5685 fgd->entry = NULL;
5686 return __g_next(m, pos);
5687}
5688
5689static void g_stop(struct seq_file *m, void *p)
5690{
5691 mutex_unlock(&graph_lock);
5692}
5693
5694static int g_show(struct seq_file *m, void *v)
5695{
5696 struct ftrace_func_entry *entry = v;
5697
5698 if (!entry)
5699 return 0;
5700
5701 if (entry == FTRACE_GRAPH_EMPTY) {
5702 struct ftrace_graph_data *fgd = m->private;
5703
5704 if (fgd->type == GRAPH_FILTER_FUNCTION)
5705 seq_puts(m, "#### all functions enabled ####\n");
5706 else
5707 seq_puts(m, "#### no functions disabled ####\n");
5708 return 0;
5709 }
5710
5711 seq_printf(m, "%ps\n", (void *)entry->ip);
5712
5713 return 0;
5714}
5715
5716static const struct seq_operations ftrace_graph_seq_ops = {
5717 .start = g_start,
5718 .next = g_next,
5719 .stop = g_stop,
5720 .show = g_show,
5721};
5722
5723static int
5724__ftrace_graph_open(struct inode *inode, struct file *file,
5725 struct ftrace_graph_data *fgd)
5726{
5727 int ret = 0;
5728 struct ftrace_hash *new_hash = NULL;
5729
5730 if (file->f_mode & FMODE_WRITE) {
5731 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
5732
5733 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
5734 return -ENOMEM;
5735
5736 if (file->f_flags & O_TRUNC)
5737 new_hash = alloc_ftrace_hash(size_bits);
5738 else
5739 new_hash = alloc_and_copy_ftrace_hash(size_bits,
5740 fgd->hash);
5741 if (!new_hash) {
5742 ret = -ENOMEM;
5743 goto out;
5744 }
5745 }
5746
5747 if (file->f_mode & FMODE_READ) {
5748 ret = seq_open(file, &ftrace_graph_seq_ops);
5749 if (!ret) {
5750 struct seq_file *m = file->private_data;
5751 m->private = fgd;
5752 } else {
5753
5754 free_ftrace_hash(new_hash);
5755 new_hash = NULL;
5756 }
5757 } else
5758 file->private_data = fgd;
5759
5760out:
5761 if (ret < 0 && file->f_mode & FMODE_WRITE)
5762 trace_parser_put(&fgd->parser);
5763
5764 fgd->new_hash = new_hash;
5765
5766
5767
5768
5769
5770
5771 fgd->hash = NULL;
5772
5773 return ret;
5774}
5775
5776static int
5777ftrace_graph_open(struct inode *inode, struct file *file)
5778{
5779 struct ftrace_graph_data *fgd;
5780 int ret;
5781
5782 if (unlikely(ftrace_disabled))
5783 return -ENODEV;
5784
5785 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5786 if (fgd == NULL)
5787 return -ENOMEM;
5788
5789 mutex_lock(&graph_lock);
5790
5791 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5792 lockdep_is_held(&graph_lock));
5793 fgd->type = GRAPH_FILTER_FUNCTION;
5794 fgd->seq_ops = &ftrace_graph_seq_ops;
5795
5796 ret = __ftrace_graph_open(inode, file, fgd);
5797 if (ret < 0)
5798 kfree(fgd);
5799
5800 mutex_unlock(&graph_lock);
5801 return ret;
5802}
5803
5804static int
5805ftrace_graph_notrace_open(struct inode *inode, struct file *file)
5806{
5807 struct ftrace_graph_data *fgd;
5808 int ret;
5809
5810 if (unlikely(ftrace_disabled))
5811 return -ENODEV;
5812
5813 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5814 if (fgd == NULL)
5815 return -ENOMEM;
5816
5817 mutex_lock(&graph_lock);
5818
5819 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5820 lockdep_is_held(&graph_lock));
5821 fgd->type = GRAPH_FILTER_NOTRACE;
5822 fgd->seq_ops = &ftrace_graph_seq_ops;
5823
5824 ret = __ftrace_graph_open(inode, file, fgd);
5825 if (ret < 0)
5826 kfree(fgd);
5827
5828 mutex_unlock(&graph_lock);
5829 return ret;
5830}
5831
5832static int
5833ftrace_graph_release(struct inode *inode, struct file *file)
5834{
5835 struct ftrace_graph_data *fgd;
5836 struct ftrace_hash *old_hash, *new_hash;
5837 struct trace_parser *parser;
5838 int ret = 0;
5839
5840 if (file->f_mode & FMODE_READ) {
5841 struct seq_file *m = file->private_data;
5842
5843 fgd = m->private;
5844 seq_release(inode, file);
5845 } else {
5846 fgd = file->private_data;
5847 }
5848
5849
5850 if (file->f_mode & FMODE_WRITE) {
5851
5852 parser = &fgd->parser;
5853
5854 if (trace_parser_loaded((parser))) {
5855 ret = ftrace_graph_set_hash(fgd->new_hash,
5856 parser->buffer);
5857 }
5858
5859 trace_parser_put(parser);
5860
5861 new_hash = __ftrace_hash_move(fgd->new_hash);
5862 if (!new_hash) {
5863 ret = -ENOMEM;
5864 goto out;
5865 }
5866
5867 mutex_lock(&graph_lock);
5868
5869 if (fgd->type == GRAPH_FILTER_FUNCTION) {
5870 old_hash = rcu_dereference_protected(ftrace_graph_hash,
5871 lockdep_is_held(&graph_lock));
5872 rcu_assign_pointer(ftrace_graph_hash, new_hash);
5873 } else {
5874 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5875 lockdep_is_held(&graph_lock));
5876 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
5877 }
5878
5879 mutex_unlock(&graph_lock);
5880
5881
5882
5883
5884
5885
5886
5887
5888
5889 synchronize_rcu_tasks_rude();
5890
5891 free_ftrace_hash(old_hash);
5892 }
5893
5894 out:
5895 free_ftrace_hash(fgd->new_hash);
5896 kfree(fgd);
5897
5898 return ret;
5899}
5900
5901static int
5902ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
5903{
5904 struct ftrace_glob func_g;
5905 struct dyn_ftrace *rec;
5906 struct ftrace_page *pg;
5907 struct ftrace_func_entry *entry;
5908 int fail = 1;
5909 int not;
5910
5911
5912 func_g.type = filter_parse_regex(buffer, strlen(buffer),
5913 &func_g.search, ¬);
5914
5915 func_g.len = strlen(func_g.search);
5916
5917 mutex_lock(&ftrace_lock);
5918
5919 if (unlikely(ftrace_disabled)) {
5920 mutex_unlock(&ftrace_lock);
5921 return -ENODEV;
5922 }
5923
5924 do_for_each_ftrace_rec(pg, rec) {
5925
5926 if (rec->flags & FTRACE_FL_DISABLED)
5927 continue;
5928
5929 if (ftrace_match_record(rec, &func_g, NULL, 0)) {
5930 entry = ftrace_lookup_ip(hash, rec->ip);
5931
5932 if (!not) {
5933 fail = 0;
5934
5935 if (entry)
5936 continue;
5937 if (add_hash_entry(hash, rec->ip) < 0)
5938 goto out;
5939 } else {
5940 if (entry) {
5941 free_hash_entry(hash, entry);
5942 fail = 0;
5943 }
5944 }
5945 }
5946 } while_for_each_ftrace_rec();
5947out:
5948 mutex_unlock(&ftrace_lock);
5949
5950 if (fail)
5951 return -EINVAL;
5952
5953 return 0;
5954}
5955
5956static ssize_t
5957ftrace_graph_write(struct file *file, const char __user *ubuf,
5958 size_t cnt, loff_t *ppos)
5959{
5960 ssize_t read, ret = 0;
5961 struct ftrace_graph_data *fgd = file->private_data;
5962 struct trace_parser *parser;
5963
5964 if (!cnt)
5965 return 0;
5966
5967
5968 if (file->f_mode & FMODE_READ) {
5969 struct seq_file *m = file->private_data;
5970 fgd = m->private;
5971 }
5972
5973 parser = &fgd->parser;
5974
5975 read = trace_get_user(parser, ubuf, cnt, ppos);
5976
5977 if (read >= 0 && trace_parser_loaded(parser) &&
5978 !trace_parser_cont(parser)) {
5979
5980 ret = ftrace_graph_set_hash(fgd->new_hash,
5981 parser->buffer);
5982 trace_parser_clear(parser);
5983 }
5984
5985 if (!ret)
5986 ret = read;
5987
5988 return ret;
5989}
5990
5991static const struct file_operations ftrace_graph_fops = {
5992 .open = ftrace_graph_open,
5993 .read = seq_read,
5994 .write = ftrace_graph_write,
5995 .llseek = tracing_lseek,
5996 .release = ftrace_graph_release,
5997};
5998
5999static const struct file_operations ftrace_graph_notrace_fops = {
6000 .open = ftrace_graph_notrace_open,
6001 .read = seq_read,
6002 .write = ftrace_graph_write,
6003 .llseek = tracing_lseek,
6004 .release = ftrace_graph_release,
6005};
6006#endif
6007
6008void ftrace_create_filter_files(struct ftrace_ops *ops,
6009 struct dentry *parent)
6010{
6011
6012 trace_create_file("set_ftrace_filter", 0644, parent,
6013 ops, &ftrace_filter_fops);
6014
6015 trace_create_file("set_ftrace_notrace", 0644, parent,
6016 ops, &ftrace_notrace_fops);
6017}
6018
6019
6020
6021
6022
6023
6024
6025
6026
6027
6028
6029void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6030{
6031 mutex_lock(&ftrace_lock);
6032 if (ops->flags & FTRACE_OPS_FL_ENABLED)
6033 ftrace_shutdown(ops, 0);
6034 ops->flags |= FTRACE_OPS_FL_DELETED;
6035 mutex_unlock(&ftrace_lock);
6036}
6037
6038static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
6039{
6040
6041 trace_create_file("available_filter_functions", 0444,
6042 d_tracer, NULL, &ftrace_avail_fops);
6043
6044 trace_create_file("enabled_functions", 0444,
6045 d_tracer, NULL, &ftrace_enabled_fops);
6046
6047 ftrace_create_filter_files(&global_ops, d_tracer);
6048
6049#ifdef CONFIG_FUNCTION_GRAPH_TRACER
6050 trace_create_file("set_graph_function", 0644, d_tracer,
6051 NULL,
6052 &ftrace_graph_fops);
6053 trace_create_file("set_graph_notrace", 0644, d_tracer,
6054 NULL,
6055 &ftrace_graph_notrace_fops);
6056#endif
6057
6058 return 0;
6059}
6060
6061static int ftrace_cmp_ips(const void *a, const void *b)
6062{
6063 const unsigned long *ipa = a;
6064 const unsigned long *ipb = b;
6065
6066 if (*ipa > *ipb)
6067 return 1;
6068 if (*ipa < *ipb)
6069 return -1;
6070 return 0;
6071}
6072
6073static int ftrace_process_locs(struct module *mod,
6074 unsigned long *start,
6075 unsigned long *end)
6076{
6077 struct ftrace_page *start_pg;
6078 struct ftrace_page *pg;
6079 struct dyn_ftrace *rec;
6080 unsigned long count;
6081 unsigned long *p;
6082 unsigned long addr;
6083 unsigned long flags = 0;
6084 int ret = -ENOMEM;
6085
6086 count = end - start;
6087
6088 if (!count)
6089 return 0;
6090
6091 sort(start, count, sizeof(*start),
6092 ftrace_cmp_ips, NULL);
6093
6094 start_pg = ftrace_allocate_pages(count);
6095 if (!start_pg)
6096 return -ENOMEM;
6097
6098 mutex_lock(&ftrace_lock);
6099
6100
6101
6102
6103
6104
6105 if (!mod) {
6106 WARN_ON(ftrace_pages || ftrace_pages_start);
6107
6108 ftrace_pages = ftrace_pages_start = start_pg;
6109 } else {
6110 if (!ftrace_pages)
6111 goto out;
6112
6113 if (WARN_ON(ftrace_pages->next)) {
6114
6115 while (ftrace_pages->next)
6116 ftrace_pages = ftrace_pages->next;
6117 }
6118
6119 ftrace_pages->next = start_pg;
6120 }
6121
6122 p = start;
6123 pg = start_pg;
6124 while (p < end) {
6125 addr = ftrace_call_adjust(*p++);
6126
6127
6128
6129
6130
6131
6132 if (!addr)
6133 continue;
6134
6135 if (pg->index == pg->size) {
6136
6137 if (WARN_ON(!pg->next))
6138 break;
6139 pg = pg->next;
6140 }
6141
6142 rec = &pg->records[pg->index++];
6143 rec->ip = addr;
6144 }
6145
6146
6147 WARN_ON(pg->next);
6148
6149
6150 ftrace_pages = pg;
6151
6152
6153
6154
6155
6156
6157
6158
6159
6160 if (!mod)
6161 local_irq_save(flags);
6162 ftrace_update_code(mod, start_pg);
6163 if (!mod)
6164 local_irq_restore(flags);
6165 ret = 0;
6166 out:
6167 mutex_unlock(&ftrace_lock);
6168
6169 return ret;
6170}
6171
6172struct ftrace_mod_func {
6173 struct list_head list;
6174 char *name;
6175 unsigned long ip;
6176 unsigned int size;
6177};
6178
6179struct ftrace_mod_map {
6180 struct rcu_head rcu;
6181 struct list_head list;
6182 struct module *mod;
6183 unsigned long start_addr;
6184 unsigned long end_addr;
6185 struct list_head funcs;
6186 unsigned int num_funcs;
6187};
6188
6189#ifdef CONFIG_MODULES
6190
6191#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
6192
6193static LIST_HEAD(ftrace_mod_maps);
6194
6195static int referenced_filters(struct dyn_ftrace *rec)
6196{
6197 struct ftrace_ops *ops;
6198 int cnt = 0;
6199
6200 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
6201 if (ops_references_rec(ops, rec))
6202 cnt++;
6203 }
6204
6205 return cnt;
6206}
6207
6208static void
6209clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
6210{
6211 struct ftrace_func_entry *entry;
6212 struct dyn_ftrace *rec;
6213 int i;
6214
6215 if (ftrace_hash_empty(hash))
6216 return;
6217
6218 for (i = 0; i < pg->index; i++) {
6219 rec = &pg->records[i];
6220 entry = __ftrace_lookup_ip(hash, rec->ip);
6221
6222
6223
6224
6225
6226 if (entry)
6227 entry->ip = 0;
6228 }
6229}
6230
6231
6232static void clear_mod_from_hashes(struct ftrace_page *pg)
6233{
6234 struct trace_array *tr;
6235
6236 mutex_lock(&trace_types_lock);
6237 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6238 if (!tr->ops || !tr->ops->func_hash)
6239 continue;
6240 mutex_lock(&tr->ops->func_hash->regex_lock);
6241 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
6242 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
6243 mutex_unlock(&tr->ops->func_hash->regex_lock);
6244 }
6245 mutex_unlock(&trace_types_lock);
6246}
6247
6248static void ftrace_free_mod_map(struct rcu_head *rcu)
6249{
6250 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
6251 struct ftrace_mod_func *mod_func;
6252 struct ftrace_mod_func *n;
6253
6254
6255 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
6256 kfree(mod_func->name);
6257 list_del(&mod_func->list);
6258 kfree(mod_func);
6259 }
6260
6261 kfree(mod_map);
6262}
6263
6264void ftrace_release_mod(struct module *mod)
6265{
6266 struct ftrace_mod_map *mod_map;
6267 struct ftrace_mod_map *n;
6268 struct dyn_ftrace *rec;
6269 struct ftrace_page **last_pg;
6270 struct ftrace_page *tmp_page = NULL;
6271 struct ftrace_page *pg;
6272 int order;
6273
6274 mutex_lock(&ftrace_lock);
6275
6276 if (ftrace_disabled)
6277 goto out_unlock;
6278
6279 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
6280 if (mod_map->mod == mod) {
6281 list_del_rcu(&mod_map->list);
6282 call_rcu(&mod_map->rcu, ftrace_free_mod_map);
6283 break;
6284 }
6285 }
6286
6287
6288
6289
6290
6291 last_pg = &ftrace_pages_start;
6292 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
6293 rec = &pg->records[0];
6294 if (within_module_core(rec->ip, mod) ||
6295 within_module_init(rec->ip, mod)) {
6296
6297
6298
6299
6300 if (WARN_ON(pg == ftrace_pages_start))
6301 goto out_unlock;
6302
6303
6304 if (pg == ftrace_pages)
6305 ftrace_pages = next_to_ftrace_page(last_pg);
6306
6307 ftrace_update_tot_cnt -= pg->index;
6308 *last_pg = pg->next;
6309
6310 pg->next = tmp_page;
6311 tmp_page = pg;
6312 } else
6313 last_pg = &pg->next;
6314 }
6315 out_unlock:
6316 mutex_unlock(&ftrace_lock);
6317
6318 for (pg = tmp_page; pg; pg = tmp_page) {
6319
6320
6321 clear_mod_from_hashes(pg);
6322
6323 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
6324 free_pages((unsigned long)pg->records, order);
6325 tmp_page = pg->next;
6326 kfree(pg);
6327 ftrace_number_of_pages -= 1 << order;
6328 ftrace_number_of_groups--;
6329 }
6330}
6331
6332void ftrace_module_enable(struct module *mod)
6333{
6334 struct dyn_ftrace *rec;
6335 struct ftrace_page *pg;
6336
6337 mutex_lock(&ftrace_lock);
6338
6339 if (ftrace_disabled)
6340 goto out_unlock;
6341
6342
6343
6344
6345
6346
6347
6348
6349
6350
6351
6352
6353
6354
6355 if (ftrace_start_up)
6356 ftrace_arch_code_modify_prepare();
6357
6358 do_for_each_ftrace_rec(pg, rec) {
6359 int cnt;
6360
6361
6362
6363
6364
6365
6366 if (!within_module_core(rec->ip, mod) &&
6367 !within_module_init(rec->ip, mod))
6368 break;
6369
6370 cnt = 0;
6371
6372
6373
6374
6375
6376
6377
6378 if (ftrace_start_up)
6379 cnt += referenced_filters(rec);
6380
6381
6382 rec->flags = cnt;
6383
6384 if (ftrace_start_up && cnt) {
6385 int failed = __ftrace_replace_code(rec, 1);
6386 if (failed) {
6387 ftrace_bug(failed, rec);
6388 goto out_loop;
6389 }
6390 }
6391
6392 } while_for_each_ftrace_rec();
6393
6394 out_loop:
6395 if (ftrace_start_up)
6396 ftrace_arch_code_modify_post_process();
6397
6398 out_unlock:
6399 mutex_unlock(&ftrace_lock);
6400
6401 process_cached_mods(mod->name);
6402}
6403
6404void ftrace_module_init(struct module *mod)
6405{
6406 if (ftrace_disabled || !mod->num_ftrace_callsites)
6407 return;
6408
6409 ftrace_process_locs(mod, mod->ftrace_callsites,
6410 mod->ftrace_callsites + mod->num_ftrace_callsites);
6411}
6412
6413static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6414 struct dyn_ftrace *rec)
6415{
6416 struct ftrace_mod_func *mod_func;
6417 unsigned long symsize;
6418 unsigned long offset;
6419 char str[KSYM_SYMBOL_LEN];
6420 char *modname;
6421 const char *ret;
6422
6423 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
6424 if (!ret)
6425 return;
6426
6427 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
6428 if (!mod_func)
6429 return;
6430
6431 mod_func->name = kstrdup(str, GFP_KERNEL);
6432 if (!mod_func->name) {
6433 kfree(mod_func);
6434 return;
6435 }
6436
6437 mod_func->ip = rec->ip - offset;
6438 mod_func->size = symsize;
6439
6440 mod_map->num_funcs++;
6441
6442 list_add_rcu(&mod_func->list, &mod_map->funcs);
6443}
6444
6445static struct ftrace_mod_map *
6446allocate_ftrace_mod_map(struct module *mod,
6447 unsigned long start, unsigned long end)
6448{
6449 struct ftrace_mod_map *mod_map;
6450
6451 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
6452 if (!mod_map)
6453 return NULL;
6454
6455 mod_map->mod = mod;
6456 mod_map->start_addr = start;
6457 mod_map->end_addr = end;
6458 mod_map->num_funcs = 0;
6459
6460 INIT_LIST_HEAD_RCU(&mod_map->funcs);
6461
6462 list_add_rcu(&mod_map->list, &ftrace_mod_maps);
6463
6464 return mod_map;
6465}
6466
6467static const char *
6468ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
6469 unsigned long addr, unsigned long *size,
6470 unsigned long *off, char *sym)
6471{
6472 struct ftrace_mod_func *found_func = NULL;
6473 struct ftrace_mod_func *mod_func;
6474
6475 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6476 if (addr >= mod_func->ip &&
6477 addr < mod_func->ip + mod_func->size) {
6478 found_func = mod_func;
6479 break;
6480 }
6481 }
6482
6483 if (found_func) {
6484 if (size)
6485 *size = found_func->size;
6486 if (off)
6487 *off = addr - found_func->ip;
6488 if (sym)
6489 strlcpy(sym, found_func->name, KSYM_NAME_LEN);
6490
6491 return found_func->name;
6492 }
6493
6494 return NULL;
6495}
6496
6497const char *
6498ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
6499 unsigned long *off, char **modname, char *sym)
6500{
6501 struct ftrace_mod_map *mod_map;
6502 const char *ret = NULL;
6503
6504
6505 preempt_disable();
6506 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
6507 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
6508 if (ret) {
6509 if (modname)
6510 *modname = mod_map->mod->name;
6511 break;
6512 }
6513 }
6514 preempt_enable();
6515
6516 return ret;
6517}
6518
6519int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
6520 char *type, char *name,
6521 char *module_name, int *exported)
6522{
6523 struct ftrace_mod_map *mod_map;
6524 struct ftrace_mod_func *mod_func;
6525
6526 preempt_disable();
6527 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
6528
6529 if (symnum >= mod_map->num_funcs) {
6530 symnum -= mod_map->num_funcs;
6531 continue;
6532 }
6533
6534 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6535 if (symnum > 1) {
6536 symnum--;
6537 continue;
6538 }
6539
6540 *value = mod_func->ip;
6541 *type = 'T';
6542 strlcpy(name, mod_func->name, KSYM_NAME_LEN);
6543 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
6544 *exported = 1;
6545 preempt_enable();
6546 return 0;
6547 }
6548 WARN_ON(1);
6549 break;
6550 }
6551 preempt_enable();
6552 return -ERANGE;
6553}
6554
6555#else
6556static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6557 struct dyn_ftrace *rec) { }
6558static inline struct ftrace_mod_map *
6559allocate_ftrace_mod_map(struct module *mod,
6560 unsigned long start, unsigned long end)
6561{
6562 return NULL;
6563}
6564#endif
6565
6566struct ftrace_init_func {
6567 struct list_head list;
6568 unsigned long ip;
6569};
6570
6571
6572static void
6573clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
6574{
6575 struct ftrace_func_entry *entry;
6576
6577 if (ftrace_hash_empty(hash))
6578 return;
6579
6580 entry = __ftrace_lookup_ip(hash, func->ip);
6581
6582
6583
6584
6585
6586
6587 if (entry)
6588 entry->ip = 0;
6589}
6590
6591static void
6592clear_func_from_hashes(struct ftrace_init_func *func)
6593{
6594 struct trace_array *tr;
6595
6596 mutex_lock(&trace_types_lock);
6597 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6598 if (!tr->ops || !tr->ops->func_hash)
6599 continue;
6600 mutex_lock(&tr->ops->func_hash->regex_lock);
6601 clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
6602 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
6603 mutex_unlock(&tr->ops->func_hash->regex_lock);
6604 }
6605 mutex_unlock(&trace_types_lock);
6606}
6607
6608static void add_to_clear_hash_list(struct list_head *clear_list,
6609 struct dyn_ftrace *rec)
6610{
6611 struct ftrace_init_func *func;
6612
6613 func = kmalloc(sizeof(*func), GFP_KERNEL);
6614 if (!func) {
6615 WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
6616 return;
6617 }
6618
6619 func->ip = rec->ip;
6620 list_add(&func->list, clear_list);
6621}
6622
6623void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
6624{
6625 unsigned long start = (unsigned long)(start_ptr);
6626 unsigned long end = (unsigned long)(end_ptr);
6627 struct ftrace_page **last_pg = &ftrace_pages_start;
6628 struct ftrace_page *pg;
6629 struct dyn_ftrace *rec;
6630 struct dyn_ftrace key;
6631 struct ftrace_mod_map *mod_map = NULL;
6632 struct ftrace_init_func *func, *func_next;
6633 struct list_head clear_hash;
6634 int order;
6635
6636 INIT_LIST_HEAD(&clear_hash);
6637
6638 key.ip = start;
6639 key.flags = end;
6640
6641 mutex_lock(&ftrace_lock);
6642
6643
6644
6645
6646
6647
6648 if (mod && ftrace_ops_list != &ftrace_list_end)
6649 mod_map = allocate_ftrace_mod_map(mod, start, end);
6650
6651 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
6652 if (end < pg->records[0].ip ||
6653 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
6654 continue;
6655 again:
6656 rec = bsearch(&key, pg->records, pg->index,
6657 sizeof(struct dyn_ftrace),
6658 ftrace_cmp_recs);
6659 if (!rec)
6660 continue;
6661
6662
6663 add_to_clear_hash_list(&clear_hash, rec);
6664
6665 if (mod_map)
6666 save_ftrace_mod_rec(mod_map, rec);
6667
6668 pg->index--;
6669 ftrace_update_tot_cnt--;
6670 if (!pg->index) {
6671 *last_pg = pg->next;
6672 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
6673 free_pages((unsigned long)pg->records, order);
6674 ftrace_number_of_pages -= 1 << order;
6675 ftrace_number_of_groups--;
6676 kfree(pg);
6677 pg = container_of(last_pg, struct ftrace_page, next);
6678 if (!(*last_pg))
6679 ftrace_pages = pg;
6680 continue;
6681 }
6682 memmove(rec, rec + 1,
6683 (pg->index - (rec - pg->records)) * sizeof(*rec));
6684
6685 goto again;
6686 }
6687 mutex_unlock(&ftrace_lock);
6688
6689 list_for_each_entry_safe(func, func_next, &clear_hash, list) {
6690 clear_func_from_hashes(func);
6691 kfree(func);
6692 }
6693}
6694
6695void __init ftrace_free_init_mem(void)
6696{
6697 void *start = (void *)(&__init_begin);
6698 void *end = (void *)(&__init_end);
6699
6700 ftrace_free_mem(NULL, start, end);
6701}
6702
6703void __init ftrace_init(void)
6704{
6705 extern unsigned long __start_mcount_loc[];
6706 extern unsigned long __stop_mcount_loc[];
6707 unsigned long count, flags;
6708 int ret;
6709
6710 local_irq_save(flags);
6711 ret = ftrace_dyn_arch_init();
6712 local_irq_restore(flags);
6713 if (ret)
6714 goto failed;
6715
6716 count = __stop_mcount_loc - __start_mcount_loc;
6717 if (!count) {
6718 pr_info("ftrace: No functions to be traced?\n");
6719 goto failed;
6720 }
6721
6722 pr_info("ftrace: allocating %ld entries in %ld pages\n",
6723 count, count / ENTRIES_PER_PAGE + 1);
6724
6725 last_ftrace_enabled = ftrace_enabled = 1;
6726
6727 ret = ftrace_process_locs(NULL,
6728 __start_mcount_loc,
6729 __stop_mcount_loc);
6730
6731 pr_info("ftrace: allocated %ld pages with %ld groups\n",
6732 ftrace_number_of_pages, ftrace_number_of_groups);
6733
6734 set_ftrace_early_filters();
6735
6736 return;
6737 failed:
6738 ftrace_disabled = 1;
6739}
6740
6741
6742void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
6743{
6744}
6745
6746static void ftrace_update_trampoline(struct ftrace_ops *ops)
6747{
6748 arch_ftrace_update_trampoline(ops);
6749}
6750
6751void ftrace_init_trace_array(struct trace_array *tr)
6752{
6753 INIT_LIST_HEAD(&tr->func_probes);
6754 INIT_LIST_HEAD(&tr->mod_trace);
6755 INIT_LIST_HEAD(&tr->mod_notrace);
6756}
6757#else
6758
6759static struct ftrace_ops global_ops = {
6760 .func = ftrace_stub,
6761 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
6762 FTRACE_OPS_FL_INITIALIZED |
6763 FTRACE_OPS_FL_PID,
6764};
6765
6766static int __init ftrace_nodyn_init(void)
6767{
6768 ftrace_enabled = 1;
6769 return 0;
6770}
6771core_initcall(ftrace_nodyn_init);
6772
6773static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
6774static inline void ftrace_startup_enable(int command) { }
6775static inline void ftrace_startup_all(int command) { }
6776
6777# define ftrace_startup(ops, command) \
6778 ({ \
6779 int ___ret = __register_ftrace_function(ops); \
6780 if (!___ret) \
6781 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
6782 ___ret; \
6783 })
6784# define ftrace_shutdown(ops, command) \
6785 ({ \
6786 int ___ret = __unregister_ftrace_function(ops); \
6787 if (!___ret) \
6788 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
6789 ___ret; \
6790 })
6791
6792# define ftrace_startup_sysctl() do { } while (0)
6793# define ftrace_shutdown_sysctl() do { } while (0)
6794
6795static inline int
6796ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
6797{
6798 return 1;
6799}
6800
6801static void ftrace_update_trampoline(struct ftrace_ops *ops)
6802{
6803}
6804
6805#endif
6806
6807__init void ftrace_init_global_array_ops(struct trace_array *tr)
6808{
6809 tr->ops = &global_ops;
6810 tr->ops->private = tr;
6811 ftrace_init_trace_array(tr);
6812}
6813
6814void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
6815{
6816
6817 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
6818 if (WARN_ON(tr->ops->func != ftrace_stub))
6819 printk("ftrace ops had %pS for function\n",
6820 tr->ops->func);
6821 }
6822 tr->ops->func = func;
6823 tr->ops->private = tr;
6824}
6825
6826void ftrace_reset_array_ops(struct trace_array *tr)
6827{
6828 tr->ops->func = ftrace_stub;
6829}
6830
6831static nokprobe_inline void
6832__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6833 struct ftrace_ops *ignored, struct pt_regs *regs)
6834{
6835 struct ftrace_ops *op;
6836 int bit;
6837
6838 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
6839 if (bit < 0)
6840 return;
6841
6842
6843
6844
6845
6846 preempt_disable_notrace();
6847
6848 do_for_each_ftrace_op(op, ftrace_ops_list) {
6849
6850
6851
6852
6853
6854
6855
6856
6857
6858 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
6859 ftrace_ops_test(op, ip, regs)) {
6860 if (FTRACE_WARN_ON(!op->func)) {
6861 pr_warn("op=%p %pS\n", op, op);
6862 goto out;
6863 }
6864 op->func(ip, parent_ip, op, regs);
6865 }
6866 } while_for_each_ftrace_op(op);
6867out:
6868 preempt_enable_notrace();
6869 trace_clear_recursion(bit);
6870}
6871
6872
6873
6874
6875
6876
6877
6878
6879
6880
6881
6882
6883
6884
6885#if ARCH_SUPPORTS_FTRACE_OPS
6886static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6887 struct ftrace_ops *op, struct pt_regs *regs)
6888{
6889 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
6890}
6891NOKPROBE_SYMBOL(ftrace_ops_list_func);
6892#else
6893static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
6894{
6895 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
6896}
6897NOKPROBE_SYMBOL(ftrace_ops_no_ops);
6898#endif
6899
6900
6901
6902
6903
6904
6905static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
6906 struct ftrace_ops *op, struct pt_regs *regs)
6907{
6908 int bit;
6909
6910 if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
6911 return;
6912
6913 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
6914 if (bit < 0)
6915 return;
6916
6917 preempt_disable_notrace();
6918
6919 op->func(ip, parent_ip, op, regs);
6920
6921 preempt_enable_notrace();
6922 trace_clear_recursion(bit);
6923}
6924NOKPROBE_SYMBOL(ftrace_ops_assist_func);
6925
6926
6927
6928
6929
6930
6931
6932
6933
6934
6935
6936
6937ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
6938{
6939
6940
6941
6942
6943 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
6944 ops->flags & FTRACE_OPS_FL_RCU)
6945 return ftrace_ops_assist_func;
6946
6947 return ops->func;
6948}
6949
6950static void
6951ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
6952 struct task_struct *prev, struct task_struct *next)
6953{
6954 struct trace_array *tr = data;
6955 struct trace_pid_list *pid_list;
6956
6957 pid_list = rcu_dereference_sched(tr->function_pids);
6958
6959 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
6960 trace_ignore_this_task(pid_list, next));
6961}
6962
6963static void
6964ftrace_pid_follow_sched_process_fork(void *data,
6965 struct task_struct *self,
6966 struct task_struct *task)
6967{
6968 struct trace_pid_list *pid_list;
6969 struct trace_array *tr = data;
6970
6971 pid_list = rcu_dereference_sched(tr->function_pids);
6972 trace_filter_add_remove_task(pid_list, self, task);
6973}
6974
6975static void
6976ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
6977{
6978 struct trace_pid_list *pid_list;
6979 struct trace_array *tr = data;
6980
6981 pid_list = rcu_dereference_sched(tr->function_pids);
6982 trace_filter_add_remove_task(pid_list, NULL, task);
6983}
6984
6985void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
6986{
6987 if (enable) {
6988 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6989 tr);
6990 register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
6991 tr);
6992 } else {
6993 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6994 tr);
6995 unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
6996 tr);
6997 }
6998}
6999
7000static void clear_ftrace_pids(struct trace_array *tr)
7001{
7002 struct trace_pid_list *pid_list;
7003 int cpu;
7004
7005 pid_list = rcu_dereference_protected(tr->function_pids,
7006 lockdep_is_held(&ftrace_lock));
7007 if (!pid_list)
7008 return;
7009
7010 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7011
7012 for_each_possible_cpu(cpu)
7013 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = false;
7014
7015 rcu_assign_pointer(tr->function_pids, NULL);
7016
7017
7018 synchronize_rcu();
7019
7020 trace_free_pid_list(pid_list);
7021}
7022
7023void ftrace_clear_pids(struct trace_array *tr)
7024{
7025 mutex_lock(&ftrace_lock);
7026
7027 clear_ftrace_pids(tr);
7028
7029 mutex_unlock(&ftrace_lock);
7030}
7031
7032static void ftrace_pid_reset(struct trace_array *tr)
7033{
7034 mutex_lock(&ftrace_lock);
7035 clear_ftrace_pids(tr);
7036
7037 ftrace_update_pid_func();
7038 ftrace_startup_all(0);
7039
7040 mutex_unlock(&ftrace_lock);
7041}
7042
7043
7044#define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
7045
7046static void *fpid_start(struct seq_file *m, loff_t *pos)
7047 __acquires(RCU)
7048{
7049 struct trace_pid_list *pid_list;
7050 struct trace_array *tr = m->private;
7051
7052 mutex_lock(&ftrace_lock);
7053 rcu_read_lock_sched();
7054
7055 pid_list = rcu_dereference_sched(tr->function_pids);
7056
7057 if (!pid_list)
7058 return !(*pos) ? FTRACE_NO_PIDS : NULL;
7059
7060 return trace_pid_start(pid_list, pos);
7061}
7062
7063static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
7064{
7065 struct trace_array *tr = m->private;
7066 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
7067
7068 if (v == FTRACE_NO_PIDS)
7069 return NULL;
7070
7071 return trace_pid_next(pid_list, v, pos);
7072}
7073
7074static void fpid_stop(struct seq_file *m, void *p)
7075 __releases(RCU)
7076{
7077 rcu_read_unlock_sched();
7078 mutex_unlock(&ftrace_lock);
7079}
7080
7081static int fpid_show(struct seq_file *m, void *v)
7082{
7083 if (v == FTRACE_NO_PIDS) {
7084 seq_puts(m, "no pid\n");
7085 return 0;
7086 }
7087
7088 return trace_pid_show(m, v);
7089}
7090
7091static const struct seq_operations ftrace_pid_sops = {
7092 .start = fpid_start,
7093 .next = fpid_next,
7094 .stop = fpid_stop,
7095 .show = fpid_show,
7096};
7097
7098static int
7099ftrace_pid_open(struct inode *inode, struct file *file)
7100{
7101 struct trace_array *tr = inode->i_private;
7102 struct seq_file *m;
7103 int ret = 0;
7104
7105 if (trace_array_get(tr) < 0)
7106 return -ENODEV;
7107
7108 if ((file->f_mode & FMODE_WRITE) &&
7109 (file->f_flags & O_TRUNC))
7110 ftrace_pid_reset(tr);
7111
7112 ret = seq_open(file, &ftrace_pid_sops);
7113 if (ret < 0) {
7114 trace_array_put(tr);
7115 } else {
7116 m = file->private_data;
7117
7118 m->private = tr;
7119 }
7120
7121 return ret;
7122}
7123
7124static void ignore_task_cpu(void *data)
7125{
7126 struct trace_array *tr = data;
7127 struct trace_pid_list *pid_list;
7128
7129
7130
7131
7132
7133 pid_list = rcu_dereference_protected(tr->function_pids,
7134 mutex_is_locked(&ftrace_lock));
7135
7136 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7137 trace_ignore_this_task(pid_list, current));
7138}
7139
7140static ssize_t
7141ftrace_pid_write(struct file *filp, const char __user *ubuf,
7142 size_t cnt, loff_t *ppos)
7143{
7144 struct seq_file *m = filp->private_data;
7145 struct trace_array *tr = m->private;
7146 struct trace_pid_list *filtered_pids = NULL;
7147 struct trace_pid_list *pid_list;
7148 ssize_t ret;
7149
7150 if (!cnt)
7151 return 0;
7152
7153 mutex_lock(&ftrace_lock);
7154
7155 filtered_pids = rcu_dereference_protected(tr->function_pids,
7156 lockdep_is_held(&ftrace_lock));
7157
7158 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
7159 if (ret < 0)
7160 goto out;
7161
7162 rcu_assign_pointer(tr->function_pids, pid_list);
7163
7164 if (filtered_pids) {
7165 synchronize_rcu();
7166 trace_free_pid_list(filtered_pids);
7167 } else if (pid_list) {
7168
7169 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7170 }
7171
7172
7173
7174
7175
7176
7177 on_each_cpu(ignore_task_cpu, tr, 1);
7178
7179 ftrace_update_pid_func();
7180 ftrace_startup_all(0);
7181 out:
7182 mutex_unlock(&ftrace_lock);
7183
7184 if (ret > 0)
7185 *ppos += ret;
7186
7187 return ret;
7188}
7189
7190static int
7191ftrace_pid_release(struct inode *inode, struct file *file)
7192{
7193 struct trace_array *tr = inode->i_private;
7194
7195 trace_array_put(tr);
7196
7197 return seq_release(inode, file);
7198}
7199
7200static const struct file_operations ftrace_pid_fops = {
7201 .open = ftrace_pid_open,
7202 .write = ftrace_pid_write,
7203 .read = seq_read,
7204 .llseek = tracing_lseek,
7205 .release = ftrace_pid_release,
7206};
7207
7208void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7209{
7210 trace_create_file("set_ftrace_pid", 0644, d_tracer,
7211 tr, &ftrace_pid_fops);
7212}
7213
7214void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
7215 struct dentry *d_tracer)
7216{
7217
7218 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
7219
7220 ftrace_init_dyn_tracefs(d_tracer);
7221 ftrace_profile_tracefs(d_tracer);
7222}
7223
7224
7225
7226
7227
7228
7229
7230
7231void ftrace_kill(void)
7232{
7233 ftrace_disabled = 1;
7234 ftrace_enabled = 0;
7235 ftrace_trace_function = ftrace_stub;
7236}
7237
7238
7239
7240
7241int ftrace_is_dead(void)
7242{
7243 return ftrace_disabled;
7244}
7245
7246
7247
7248
7249
7250
7251
7252
7253
7254
7255
7256
7257int register_ftrace_function(struct ftrace_ops *ops)
7258{
7259 int ret = -1;
7260
7261 ftrace_ops_init(ops);
7262
7263 mutex_lock(&ftrace_lock);
7264
7265 ret = ftrace_startup(ops, 0);
7266
7267 mutex_unlock(&ftrace_lock);
7268
7269 return ret;
7270}
7271EXPORT_SYMBOL_GPL(register_ftrace_function);
7272
7273
7274
7275
7276
7277
7278
7279int unregister_ftrace_function(struct ftrace_ops *ops)
7280{
7281 int ret;
7282
7283 mutex_lock(&ftrace_lock);
7284 ret = ftrace_shutdown(ops, 0);
7285 mutex_unlock(&ftrace_lock);
7286
7287 return ret;
7288}
7289EXPORT_SYMBOL_GPL(unregister_ftrace_function);
7290
7291static bool is_permanent_ops_registered(void)
7292{
7293 struct ftrace_ops *op;
7294
7295 do_for_each_ftrace_op(op, ftrace_ops_list) {
7296 if (op->flags & FTRACE_OPS_FL_PERMANENT)
7297 return true;
7298 } while_for_each_ftrace_op(op);
7299
7300 return false;
7301}
7302
7303int
7304ftrace_enable_sysctl(struct ctl_table *table, int write,
7305 void __user *buffer, size_t *lenp,
7306 loff_t *ppos)
7307{
7308 int ret = -ENODEV;
7309
7310 mutex_lock(&ftrace_lock);
7311
7312 if (unlikely(ftrace_disabled))
7313 goto out;
7314
7315 ret = proc_dointvec(table, write, buffer, lenp, ppos);
7316
7317 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
7318 goto out;
7319
7320 if (ftrace_enabled) {
7321
7322
7323 if (rcu_dereference_protected(ftrace_ops_list,
7324 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
7325 update_ftrace_function();
7326
7327 ftrace_startup_sysctl();
7328
7329 } else {
7330 if (is_permanent_ops_registered()) {
7331 ftrace_enabled = true;
7332 ret = -EBUSY;
7333 goto out;
7334 }
7335
7336
7337 ftrace_trace_function = ftrace_stub;
7338
7339 ftrace_shutdown_sysctl();
7340 }
7341
7342 last_ftrace_enabled = !!ftrace_enabled;
7343 out:
7344 mutex_unlock(&ftrace_lock);
7345 return ret;
7346}
7347
7348#ifdef CONFIG_FUNCTION_GRAPH_TRACER
7349
7350static struct ftrace_ops graph_ops = {
7351 .func = ftrace_stub,
7352 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
7353 FTRACE_OPS_FL_INITIALIZED |
7354 FTRACE_OPS_FL_PID |
7355 FTRACE_OPS_FL_STUB,
7356#ifdef FTRACE_GRAPH_TRAMP_ADDR
7357 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
7358
7359#endif
7360 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
7361};
7362
7363void ftrace_graph_sleep_time_control(bool enable)
7364{
7365 fgraph_sleep_time = enable;
7366}
7367
7368void ftrace_graph_graph_time_control(bool enable)
7369{
7370 fgraph_graph_time = enable;
7371}
7372
7373int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
7374{
7375 return 0;
7376}
7377
7378
7379trace_func_graph_ret_t ftrace_graph_return =
7380 (trace_func_graph_ret_t)ftrace_stub;
7381trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
7382static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
7383
7384
7385static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
7386{
7387 int i;
7388 int ret = 0;
7389 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
7390 struct task_struct *g, *t;
7391
7392 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
7393 ret_stack_list[i] =
7394 kmalloc_array(FTRACE_RETFUNC_DEPTH,
7395 sizeof(struct ftrace_ret_stack),
7396 GFP_KERNEL);
7397 if (!ret_stack_list[i]) {
7398 start = 0;
7399 end = i;
7400 ret = -ENOMEM;
7401 goto free;
7402 }
7403 }
7404
7405 read_lock(&tasklist_lock);
7406 do_each_thread(g, t) {
7407 if (start == end) {
7408 ret = -EAGAIN;
7409 goto unlock;
7410 }
7411
7412 if (t->ret_stack == NULL) {
7413 atomic_set(&t->tracing_graph_pause, 0);
7414 atomic_set(&t->trace_overrun, 0);
7415 t->curr_ret_stack = -1;
7416
7417 smp_wmb();
7418 t->ret_stack = ret_stack_list[start++];
7419 }
7420 } while_each_thread(g, t);
7421
7422unlock:
7423 read_unlock(&tasklist_lock);
7424free:
7425 for (i = start; i < end; i++)
7426 kfree(ret_stack_list[i]);
7427 return ret;
7428}
7429
7430static void
7431ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
7432 struct task_struct *prev, struct task_struct *next)
7433{
7434 unsigned long long timestamp;
7435 int index;
7436
7437
7438
7439
7440
7441 if (fgraph_sleep_time)
7442 return;
7443
7444 timestamp = trace_clock_local();
7445
7446 prev->ftrace_timestamp = timestamp;
7447
7448
7449 if (!next->ftrace_timestamp)
7450 return;
7451
7452
7453
7454
7455
7456 timestamp -= next->ftrace_timestamp;
7457
7458 for (index = next->curr_ret_stack; index >= 0; index--)
7459 next->ret_stack[index].calltime += timestamp;
7460}
7461
7462
7463static int start_graph_tracing(void)
7464{
7465 struct ftrace_ret_stack **ret_stack_list;
7466 int ret, cpu;
7467
7468 ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
7469 sizeof(struct ftrace_ret_stack *),
7470 GFP_KERNEL);
7471
7472 if (!ret_stack_list)
7473 return -ENOMEM;
7474
7475
7476 for_each_online_cpu(cpu) {
7477 if (!idle_task(cpu)->ret_stack)
7478 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
7479 }
7480
7481 do {
7482 ret = alloc_retstack_tasklist(ret_stack_list);
7483 } while (ret == -EAGAIN);
7484
7485 if (!ret) {
7486 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
7487 if (ret)
7488 pr_info("ftrace_graph: Couldn't activate tracepoint"
7489 " probe to kernel_sched_switch\n");
7490 }
7491
7492 kfree(ret_stack_list);
7493 return ret;
7494}
7495
7496
7497
7498
7499
7500
7501static int
7502ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
7503 void *unused)
7504{
7505 switch (state) {
7506 case PM_HIBERNATION_PREPARE:
7507 pause_graph_tracing();
7508 break;
7509
7510 case PM_POST_HIBERNATION:
7511 unpause_graph_tracing();
7512 break;
7513 }
7514 return NOTIFY_DONE;
7515}
7516
7517static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
7518{
7519 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
7520 return 0;
7521 return __ftrace_graph_entry(trace);
7522}
7523
7524
7525
7526
7527
7528
7529
7530
7531static void update_function_graph_func(void)
7532{
7533 struct ftrace_ops *op;
7534 bool do_test = false;
7535
7536
7537
7538
7539
7540
7541
7542 do_for_each_ftrace_op(op, ftrace_ops_list) {
7543 if (op != &global_ops && op != &graph_ops &&
7544 op != &ftrace_list_end) {
7545 do_test = true;
7546
7547 goto out;
7548 }
7549 } while_for_each_ftrace_op(op);
7550 out:
7551 if (do_test)
7552 ftrace_graph_entry = ftrace_graph_entry_test;
7553 else
7554 ftrace_graph_entry = __ftrace_graph_entry;
7555}
7556
7557static struct notifier_block ftrace_suspend_notifier = {
7558 .notifier_call = ftrace_suspend_notifier_call,
7559};
7560
7561int register_ftrace_graph(trace_func_graph_ret_t retfunc,
7562 trace_func_graph_ent_t entryfunc)
7563{
7564 int ret = 0;
7565
7566 mutex_lock(&ftrace_lock);
7567
7568
7569 if (ftrace_graph_active) {
7570 ret = -EBUSY;
7571 goto out;
7572 }
7573
7574 register_pm_notifier(&ftrace_suspend_notifier);
7575
7576 ftrace_graph_active++;
7577 ret = start_graph_tracing();
7578 if (ret) {
7579 ftrace_graph_active--;
7580 goto out;
7581 }
7582
7583 ftrace_graph_return = retfunc;
7584
7585
7586
7587
7588
7589
7590
7591 __ftrace_graph_entry = entryfunc;
7592 ftrace_graph_entry = ftrace_graph_entry_test;
7593 update_function_graph_func();
7594
7595 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
7596out:
7597 mutex_unlock(&ftrace_lock);
7598 return ret;
7599}
7600
7601void unregister_ftrace_graph(void)
7602{
7603 mutex_lock(&ftrace_lock);
7604
7605 if (unlikely(!ftrace_graph_active))
7606 goto out;
7607
7608 ftrace_graph_active--;
7609 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
7610 ftrace_graph_entry = ftrace_graph_entry_stub;
7611 __ftrace_graph_entry = ftrace_graph_entry_stub;
7612 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
7613 unregister_pm_notifier(&ftrace_suspend_notifier);
7614 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
7615
7616 out:
7617 mutex_unlock(&ftrace_lock);
7618}
7619
7620static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
7621
7622static void
7623graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
7624{
7625 atomic_set(&t->tracing_graph_pause, 0);
7626 atomic_set(&t->trace_overrun, 0);
7627 t->ftrace_timestamp = 0;
7628
7629 smp_wmb();
7630 t->ret_stack = ret_stack;
7631}
7632
7633
7634
7635
7636
7637void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
7638{
7639 t->curr_ret_stack = -1;
7640
7641
7642
7643
7644 if (t->ret_stack)
7645 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
7646
7647 if (ftrace_graph_active) {
7648 struct ftrace_ret_stack *ret_stack;
7649
7650 ret_stack = per_cpu(idle_ret_stack, cpu);
7651 if (!ret_stack) {
7652 ret_stack =
7653 kmalloc_array(FTRACE_RETFUNC_DEPTH,
7654 sizeof(struct ftrace_ret_stack),
7655 GFP_KERNEL);
7656 if (!ret_stack)
7657 return;
7658 per_cpu(idle_ret_stack, cpu) = ret_stack;
7659 }
7660 graph_init_task(t, ret_stack);
7661 }
7662}
7663
7664
7665void ftrace_graph_init_task(struct task_struct *t)
7666{
7667
7668 t->ret_stack = NULL;
7669 t->curr_ret_stack = -1;
7670
7671 if (ftrace_graph_active) {
7672 struct ftrace_ret_stack *ret_stack;
7673
7674 ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
7675 sizeof(struct ftrace_ret_stack),
7676 GFP_KERNEL);
7677 if (!ret_stack)
7678 return;
7679 graph_init_task(t, ret_stack);
7680 }
7681}
7682
7683void ftrace_graph_exit_task(struct task_struct *t)
7684{
7685 struct ftrace_ret_stack *ret_stack = t->ret_stack;
7686
7687 t->ret_stack = NULL;
7688
7689 barrier();
7690
7691 kfree(ret_stack);
7692}
7693#endif
7694