1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
20#include <linux/suspend.h>
21#include <linux/debugfs.h>
22#include <linux/hardirq.h>
23#include <linux/kthread.h>
24#include <linux/uaccess.h>
25#include <linux/bsearch.h>
26#include <linux/module.h>
27#include <linux/ftrace.h>
28#include <linux/sysctl.h>
29#include <linux/slab.h>
30#include <linux/ctype.h>
31#include <linux/sort.h>
32#include <linux/list.h>
33#include <linux/hash.h>
34#include <linux/rcupdate.h>
35
36#include <trace/events/sched.h>
37
38#include <asm/setup.h>
39
40#include "trace_output.h"
41#include "trace_stat.h"
42
43#define FTRACE_WARN_ON(cond) \
44 ({ \
45 int ___r = cond; \
46 if (WARN_ON(___r)) \
47 ftrace_kill(); \
48 ___r; \
49 })
50
51#define FTRACE_WARN_ON_ONCE(cond) \
52 ({ \
53 int ___r = cond; \
54 if (WARN_ON_ONCE(___r)) \
55 ftrace_kill(); \
56 ___r; \
57 })
58
59
60#define FTRACE_HASH_BITS 7
61#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12
64
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
67#ifdef CONFIG_DYNAMIC_FTRACE
68#define INIT_REGEX_LOCK(opsname) \
69 .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock),
70#else
71#define INIT_REGEX_LOCK(opsname)
72#endif
73
74static struct ftrace_ops ftrace_list_end __read_mostly = {
75 .func = ftrace_stub,
76 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
77};
78
79
80int ftrace_enabled __read_mostly;
81static int last_ftrace_enabled;
82
83
84struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
85
86static struct ftrace_ops *set_function_trace_op;
87
88
89LIST_HEAD(ftrace_pids);
90struct ftrace_pid {
91 struct list_head list;
92 struct pid *pid;
93};
94
95
96
97
98
99static int ftrace_disabled __read_mostly;
100
101static DEFINE_MUTEX(ftrace_lock);
102
103static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
104static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
105static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
106ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
107ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
108static struct ftrace_ops global_ops;
109static struct ftrace_ops control_ops;
110
111#if ARCH_SUPPORTS_FTRACE_OPS
112static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
113 struct ftrace_ops *op, struct pt_regs *regs);
114#else
115
116static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
117#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
118#endif
119
120
121
122
123
124
125
126
127
128
129#define do_for_each_ftrace_op(op, list) \
130 op = rcu_dereference_raw_notrace(list); \
131 do
132
133
134
135
136#define while_for_each_ftrace_op(op) \
137 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
138 unlikely((op) != &ftrace_list_end))
139
140static inline void ftrace_ops_init(struct ftrace_ops *ops)
141{
142#ifdef CONFIG_DYNAMIC_FTRACE
143 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
144 mutex_init(&ops->regex_lock);
145 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
146 }
147#endif
148}
149
150
151
152
153
154
155int ftrace_nr_registered_ops(void)
156{
157 struct ftrace_ops *ops;
158 int cnt = 0;
159
160 mutex_lock(&ftrace_lock);
161
162 for (ops = ftrace_ops_list;
163 ops != &ftrace_list_end; ops = ops->next)
164 cnt++;
165
166 mutex_unlock(&ftrace_lock);
167
168 return cnt;
169}
170
171static void
172ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
173 struct ftrace_ops *op, struct pt_regs *regs)
174{
175 int bit;
176
177 bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
178 if (bit < 0)
179 return;
180
181 do_for_each_ftrace_op(op, ftrace_global_list) {
182 op->func(ip, parent_ip, op, regs);
183 } while_for_each_ftrace_op(op);
184
185 trace_clear_recursion(bit);
186}
187
188static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
189 struct ftrace_ops *op, struct pt_regs *regs)
190{
191 if (!test_tsk_trace_trace(current))
192 return;
193
194 ftrace_pid_function(ip, parent_ip, op, regs);
195}
196
197static void set_ftrace_pid_function(ftrace_func_t func)
198{
199
200 if (func != ftrace_pid_func)
201 ftrace_pid_function = func;
202}
203
204
205
206
207
208
209
210void clear_ftrace_function(void)
211{
212 ftrace_trace_function = ftrace_stub;
213 ftrace_pid_function = ftrace_stub;
214}
215
216static void control_ops_disable_all(struct ftrace_ops *ops)
217{
218 int cpu;
219
220 for_each_possible_cpu(cpu)
221 *per_cpu_ptr(ops->disabled, cpu) = 1;
222}
223
224static int control_ops_alloc(struct ftrace_ops *ops)
225{
226 int __percpu *disabled;
227
228 disabled = alloc_percpu(int);
229 if (!disabled)
230 return -ENOMEM;
231
232 ops->disabled = disabled;
233 control_ops_disable_all(ops);
234 return 0;
235}
236
237static void control_ops_free(struct ftrace_ops *ops)
238{
239 free_percpu(ops->disabled);
240}
241
242static void update_global_ops(void)
243{
244 ftrace_func_t func;
245
246
247
248
249
250
251 if (ftrace_global_list == &ftrace_list_end ||
252 ftrace_global_list->next == &ftrace_list_end) {
253 func = ftrace_global_list->func;
254
255
256
257
258
259
260 if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
261 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
262 else
263 global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
264 } else {
265 func = ftrace_global_list_func;
266
267 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
268 }
269
270
271
272 if (!list_empty(&ftrace_pids)) {
273 set_ftrace_pid_function(func);
274 func = ftrace_pid_func;
275 }
276
277 global_ops.func = func;
278}
279
280static void ftrace_sync(struct work_struct *work)
281{
282
283
284
285
286
287
288
289}
290
291static void ftrace_sync_ipi(void *data)
292{
293
294 smp_rmb();
295}
296
297#ifdef CONFIG_FUNCTION_GRAPH_TRACER
298static void update_function_graph_func(void);
299#else
300static inline void update_function_graph_func(void) { }
301#endif
302
303static void update_ftrace_function(void)
304{
305 ftrace_func_t func;
306
307 update_global_ops();
308
309
310
311
312
313
314 if (ftrace_ops_list == &ftrace_list_end ||
315 (ftrace_ops_list->next == &ftrace_list_end &&
316 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
317 (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
318 !FTRACE_FORCE_LIST_FUNC)) {
319
320 if (ftrace_ops_list == &global_ops)
321 set_function_trace_op = ftrace_global_list;
322 else
323 set_function_trace_op = ftrace_ops_list;
324 func = ftrace_ops_list->func;
325 } else {
326
327 set_function_trace_op = &ftrace_list_end;
328 func = ftrace_ops_list_func;
329 }
330
331
332 if (ftrace_trace_function == func)
333 return;
334
335 update_function_graph_func();
336
337
338
339
340
341 if (func == ftrace_ops_list_func) {
342 ftrace_trace_function = func;
343
344
345
346
347 return;
348 }
349
350#ifndef CONFIG_DYNAMIC_FTRACE
351
352
353
354
355
356
357
358
359
360
361 ftrace_trace_function = ftrace_ops_list_func;
362
363
364
365
366 schedule_on_each_cpu(ftrace_sync);
367
368 function_trace_op = set_function_trace_op;
369
370 smp_wmb();
371
372 smp_call_function(ftrace_sync_ipi, NULL, 1);
373
374#endif
375
376 ftrace_trace_function = func;
377}
378
379static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
380{
381 ops->next = *list;
382
383
384
385
386
387
388 rcu_assign_pointer(*list, ops);
389}
390
391static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
392{
393 struct ftrace_ops **p;
394
395
396
397
398
399 if (*list == ops && ops->next == &ftrace_list_end) {
400 *list = &ftrace_list_end;
401 return 0;
402 }
403
404 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
405 if (*p == ops)
406 break;
407
408 if (*p != ops)
409 return -1;
410
411 *p = (*p)->next;
412 return 0;
413}
414
415static void add_ftrace_list_ops(struct ftrace_ops **list,
416 struct ftrace_ops *main_ops,
417 struct ftrace_ops *ops)
418{
419 int first = *list == &ftrace_list_end;
420 add_ftrace_ops(list, ops);
421 if (first)
422 add_ftrace_ops(&ftrace_ops_list, main_ops);
423}
424
425static int remove_ftrace_list_ops(struct ftrace_ops **list,
426 struct ftrace_ops *main_ops,
427 struct ftrace_ops *ops)
428{
429 int ret = remove_ftrace_ops(list, ops);
430 if (!ret && *list == &ftrace_list_end)
431 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
432 return ret;
433}
434
435static int __register_ftrace_function(struct ftrace_ops *ops)
436{
437 if (FTRACE_WARN_ON(ops == &global_ops))
438 return -EINVAL;
439
440 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
441 return -EBUSY;
442
443
444 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
445 return -EINVAL;
446
447#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
448
449
450
451
452
453 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
454 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
455 return -EINVAL;
456
457 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
458 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
459#endif
460
461 if (!core_kernel_data((unsigned long)ops))
462 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
463
464 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
465 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
466 ops->flags |= FTRACE_OPS_FL_ENABLED;
467 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
468 if (control_ops_alloc(ops))
469 return -ENOMEM;
470 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
471 } else
472 add_ftrace_ops(&ftrace_ops_list, ops);
473
474 if (ftrace_enabled)
475 update_ftrace_function();
476
477 return 0;
478}
479
480static int __unregister_ftrace_function(struct ftrace_ops *ops)
481{
482 int ret;
483
484 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
485 return -EBUSY;
486
487 if (FTRACE_WARN_ON(ops == &global_ops))
488 return -EINVAL;
489
490 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
491 ret = remove_ftrace_list_ops(&ftrace_global_list,
492 &global_ops, ops);
493 if (!ret)
494 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
495 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
496 ret = remove_ftrace_list_ops(&ftrace_control_list,
497 &control_ops, ops);
498 } else
499 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
500
501 if (ret < 0)
502 return ret;
503
504 if (ftrace_enabled)
505 update_ftrace_function();
506
507 return 0;
508}
509
510static void ftrace_update_pid_func(void)
511{
512
513 if (ftrace_trace_function == ftrace_stub)
514 return;
515
516 update_ftrace_function();
517}
518
519#ifdef CONFIG_FUNCTION_PROFILER
520struct ftrace_profile {
521 struct hlist_node node;
522 unsigned long ip;
523 unsigned long counter;
524#ifdef CONFIG_FUNCTION_GRAPH_TRACER
525 unsigned long long time;
526 unsigned long long time_squared;
527#endif
528};
529
530struct ftrace_profile_page {
531 struct ftrace_profile_page *next;
532 unsigned long index;
533 struct ftrace_profile records[];
534};
535
536struct ftrace_profile_stat {
537 atomic_t disabled;
538 struct hlist_head *hash;
539 struct ftrace_profile_page *pages;
540 struct ftrace_profile_page *start;
541 struct tracer_stat stat;
542};
543
544#define PROFILE_RECORDS_SIZE \
545 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
546
547#define PROFILES_PER_PAGE \
548 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
549
550static int ftrace_profile_enabled __read_mostly;
551
552
553static DEFINE_MUTEX(ftrace_profile_lock);
554
555static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
556
557#define FTRACE_PROFILE_HASH_BITS 10
558#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
559
560static void *
561function_stat_next(void *v, int idx)
562{
563 struct ftrace_profile *rec = v;
564 struct ftrace_profile_page *pg;
565
566 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
567
568 again:
569 if (idx != 0)
570 rec++;
571
572 if ((void *)rec >= (void *)&pg->records[pg->index]) {
573 pg = pg->next;
574 if (!pg)
575 return NULL;
576 rec = &pg->records[0];
577 if (!rec->counter)
578 goto again;
579 }
580
581 return rec;
582}
583
584static void *function_stat_start(struct tracer_stat *trace)
585{
586 struct ftrace_profile_stat *stat =
587 container_of(trace, struct ftrace_profile_stat, stat);
588
589 if (!stat || !stat->start)
590 return NULL;
591
592 return function_stat_next(&stat->start->records[0], 0);
593}
594
595#ifdef CONFIG_FUNCTION_GRAPH_TRACER
596
597static int function_stat_cmp(void *p1, void *p2)
598{
599 struct ftrace_profile *a = p1;
600 struct ftrace_profile *b = p2;
601
602 if (a->time < b->time)
603 return -1;
604 if (a->time > b->time)
605 return 1;
606 else
607 return 0;
608}
609#else
610
611static int function_stat_cmp(void *p1, void *p2)
612{
613 struct ftrace_profile *a = p1;
614 struct ftrace_profile *b = p2;
615
616 if (a->counter < b->counter)
617 return -1;
618 if (a->counter > b->counter)
619 return 1;
620 else
621 return 0;
622}
623#endif
624
625static int function_stat_headers(struct seq_file *m)
626{
627#ifdef CONFIG_FUNCTION_GRAPH_TRACER
628 seq_printf(m, " Function "
629 "Hit Time Avg s^2\n"
630 " -------- "
631 "--- ---- --- ---\n");
632#else
633 seq_printf(m, " Function Hit\n"
634 " -------- ---\n");
635#endif
636 return 0;
637}
638
639static int function_stat_show(struct seq_file *m, void *v)
640{
641 struct ftrace_profile *rec = v;
642 char str[KSYM_SYMBOL_LEN];
643 int ret = 0;
644#ifdef CONFIG_FUNCTION_GRAPH_TRACER
645 static struct trace_seq s;
646 unsigned long long avg;
647 unsigned long long stddev;
648#endif
649 mutex_lock(&ftrace_profile_lock);
650
651
652 if (unlikely(rec->counter == 0)) {
653 ret = -EBUSY;
654 goto out;
655 }
656
657 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
658 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
659
660#ifdef CONFIG_FUNCTION_GRAPH_TRACER
661 seq_printf(m, " ");
662 avg = rec->time;
663 do_div(avg, rec->counter);
664
665
666 if (rec->counter <= 1)
667 stddev = 0;
668 else {
669 stddev = rec->time_squared - rec->counter * avg * avg;
670
671
672
673
674 do_div(stddev, (rec->counter - 1) * 1000);
675 }
676
677 trace_seq_init(&s);
678 trace_print_graph_duration(rec->time, &s);
679 trace_seq_puts(&s, " ");
680 trace_print_graph_duration(avg, &s);
681 trace_seq_puts(&s, " ");
682 trace_print_graph_duration(stddev, &s);
683 trace_print_seq(m, &s);
684#endif
685 seq_putc(m, '\n');
686out:
687 mutex_unlock(&ftrace_profile_lock);
688
689 return ret;
690}
691
692static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
693{
694 struct ftrace_profile_page *pg;
695
696 pg = stat->pages = stat->start;
697
698 while (pg) {
699 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
700 pg->index = 0;
701 pg = pg->next;
702 }
703
704 memset(stat->hash, 0,
705 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
706}
707
708int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
709{
710 struct ftrace_profile_page *pg;
711 int functions;
712 int pages;
713 int i;
714
715
716 if (stat->pages)
717 return 0;
718
719 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
720 if (!stat->pages)
721 return -ENOMEM;
722
723#ifdef CONFIG_DYNAMIC_FTRACE
724 functions = ftrace_update_tot_cnt;
725#else
726
727
728
729
730
731
732
733 functions = 20000;
734#endif
735
736 pg = stat->start = stat->pages;
737
738 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
739
740 for (i = 1; i < pages; i++) {
741 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
742 if (!pg->next)
743 goto out_free;
744 pg = pg->next;
745 }
746
747 return 0;
748
749 out_free:
750 pg = stat->start;
751 while (pg) {
752 unsigned long tmp = (unsigned long)pg;
753
754 pg = pg->next;
755 free_page(tmp);
756 }
757
758 stat->pages = NULL;
759 stat->start = NULL;
760
761 return -ENOMEM;
762}
763
764static int ftrace_profile_init_cpu(int cpu)
765{
766 struct ftrace_profile_stat *stat;
767 int size;
768
769 stat = &per_cpu(ftrace_profile_stats, cpu);
770
771 if (stat->hash) {
772
773 ftrace_profile_reset(stat);
774 return 0;
775 }
776
777
778
779
780
781 size = FTRACE_PROFILE_HASH_SIZE;
782
783 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
784
785 if (!stat->hash)
786 return -ENOMEM;
787
788
789 if (ftrace_profile_pages_init(stat) < 0) {
790 kfree(stat->hash);
791 stat->hash = NULL;
792 return -ENOMEM;
793 }
794
795 return 0;
796}
797
798static int ftrace_profile_init(void)
799{
800 int cpu;
801 int ret = 0;
802
803 for_each_possible_cpu(cpu) {
804 ret = ftrace_profile_init_cpu(cpu);
805 if (ret)
806 break;
807 }
808
809 return ret;
810}
811
812
813static struct ftrace_profile *
814ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
815{
816 struct ftrace_profile *rec;
817 struct hlist_head *hhd;
818 unsigned long key;
819
820 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
821 hhd = &stat->hash[key];
822
823 if (hlist_empty(hhd))
824 return NULL;
825
826 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
827 if (rec->ip == ip)
828 return rec;
829 }
830
831 return NULL;
832}
833
834static void ftrace_add_profile(struct ftrace_profile_stat *stat,
835 struct ftrace_profile *rec)
836{
837 unsigned long key;
838
839 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
840 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
841}
842
843
844
845
846static struct ftrace_profile *
847ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
848{
849 struct ftrace_profile *rec = NULL;
850
851
852 if (atomic_inc_return(&stat->disabled) != 1)
853 goto out;
854
855
856
857
858
859 rec = ftrace_find_profiled_func(stat, ip);
860 if (rec)
861 goto out;
862
863 if (stat->pages->index == PROFILES_PER_PAGE) {
864 if (!stat->pages->next)
865 goto out;
866 stat->pages = stat->pages->next;
867 }
868
869 rec = &stat->pages->records[stat->pages->index++];
870 rec->ip = ip;
871 ftrace_add_profile(stat, rec);
872
873 out:
874 atomic_dec(&stat->disabled);
875
876 return rec;
877}
878
879static void
880function_profile_call(unsigned long ip, unsigned long parent_ip,
881 struct ftrace_ops *ops, struct pt_regs *regs)
882{
883 struct ftrace_profile_stat *stat;
884 struct ftrace_profile *rec;
885 unsigned long flags;
886
887 if (!ftrace_profile_enabled)
888 return;
889
890 local_irq_save(flags);
891
892 stat = &__get_cpu_var(ftrace_profile_stats);
893 if (!stat->hash || !ftrace_profile_enabled)
894 goto out;
895
896 rec = ftrace_find_profiled_func(stat, ip);
897 if (!rec) {
898 rec = ftrace_profile_alloc(stat, ip);
899 if (!rec)
900 goto out;
901 }
902
903 rec->counter++;
904 out:
905 local_irq_restore(flags);
906}
907
908#ifdef CONFIG_FUNCTION_GRAPH_TRACER
909static int profile_graph_entry(struct ftrace_graph_ent *trace)
910{
911 function_profile_call(trace->func, 0, NULL, NULL);
912 return 1;
913}
914
915static void profile_graph_return(struct ftrace_graph_ret *trace)
916{
917 struct ftrace_profile_stat *stat;
918 unsigned long long calltime;
919 struct ftrace_profile *rec;
920 unsigned long flags;
921
922 local_irq_save(flags);
923 stat = &__get_cpu_var(ftrace_profile_stats);
924 if (!stat->hash || !ftrace_profile_enabled)
925 goto out;
926
927
928 if (!trace->calltime)
929 goto out;
930
931 calltime = trace->rettime - trace->calltime;
932
933 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
934 int index;
935
936 index = trace->depth;
937
938
939 if (index)
940 current->ret_stack[index - 1].subtime += calltime;
941
942 if (current->ret_stack[index].subtime < calltime)
943 calltime -= current->ret_stack[index].subtime;
944 else
945 calltime = 0;
946 }
947
948 rec = ftrace_find_profiled_func(stat, trace->func);
949 if (rec) {
950 rec->time += calltime;
951 rec->time_squared += calltime * calltime;
952 }
953
954 out:
955 local_irq_restore(flags);
956}
957
958static int register_ftrace_profiler(void)
959{
960 return register_ftrace_graph(&profile_graph_return,
961 &profile_graph_entry);
962}
963
964static void unregister_ftrace_profiler(void)
965{
966 unregister_ftrace_graph();
967}
968#else
969static struct ftrace_ops ftrace_profile_ops __read_mostly = {
970 .func = function_profile_call,
971 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
972 INIT_REGEX_LOCK(ftrace_profile_ops)
973};
974
975static int register_ftrace_profiler(void)
976{
977 return register_ftrace_function(&ftrace_profile_ops);
978}
979
980static void unregister_ftrace_profiler(void)
981{
982 unregister_ftrace_function(&ftrace_profile_ops);
983}
984#endif
985
986static ssize_t
987ftrace_profile_write(struct file *filp, const char __user *ubuf,
988 size_t cnt, loff_t *ppos)
989{
990 unsigned long val;
991 int ret;
992
993 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
994 if (ret)
995 return ret;
996
997 val = !!val;
998
999 mutex_lock(&ftrace_profile_lock);
1000 if (ftrace_profile_enabled ^ val) {
1001 if (val) {
1002 ret = ftrace_profile_init();
1003 if (ret < 0) {
1004 cnt = ret;
1005 goto out;
1006 }
1007
1008 ret = register_ftrace_profiler();
1009 if (ret < 0) {
1010 cnt = ret;
1011 goto out;
1012 }
1013 ftrace_profile_enabled = 1;
1014 } else {
1015 ftrace_profile_enabled = 0;
1016
1017
1018
1019
1020 unregister_ftrace_profiler();
1021 }
1022 }
1023 out:
1024 mutex_unlock(&ftrace_profile_lock);
1025
1026 *ppos += cnt;
1027
1028 return cnt;
1029}
1030
1031static ssize_t
1032ftrace_profile_read(struct file *filp, char __user *ubuf,
1033 size_t cnt, loff_t *ppos)
1034{
1035 char buf[64];
1036 int r;
1037
1038 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1039 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1040}
1041
1042static const struct file_operations ftrace_profile_fops = {
1043 .open = tracing_open_generic,
1044 .read = ftrace_profile_read,
1045 .write = ftrace_profile_write,
1046 .llseek = default_llseek,
1047};
1048
1049
1050static struct tracer_stat function_stats __initdata = {
1051 .name = "functions",
1052 .stat_start = function_stat_start,
1053 .stat_next = function_stat_next,
1054 .stat_cmp = function_stat_cmp,
1055 .stat_headers = function_stat_headers,
1056 .stat_show = function_stat_show
1057};
1058
1059static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1060{
1061 struct ftrace_profile_stat *stat;
1062 struct dentry *entry;
1063 char *name;
1064 int ret;
1065 int cpu;
1066
1067 for_each_possible_cpu(cpu) {
1068 stat = &per_cpu(ftrace_profile_stats, cpu);
1069
1070
1071 name = kmalloc(32, GFP_KERNEL);
1072 if (!name) {
1073
1074
1075
1076
1077 WARN(1,
1078 "Could not allocate stat file for cpu %d\n",
1079 cpu);
1080 return;
1081 }
1082 stat->stat = function_stats;
1083 snprintf(name, 32, "function%d", cpu);
1084 stat->stat.name = name;
1085 ret = register_stat_tracer(&stat->stat);
1086 if (ret) {
1087 WARN(1,
1088 "Could not register function stat for cpu %d\n",
1089 cpu);
1090 kfree(name);
1091 return;
1092 }
1093 }
1094
1095 entry = debugfs_create_file("function_profile_enabled", 0644,
1096 d_tracer, NULL, &ftrace_profile_fops);
1097 if (!entry)
1098 pr_warning("Could not create debugfs "
1099 "'function_profile_enabled' entry\n");
1100}
1101
1102#else
1103static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1104{
1105}
1106#endif
1107
1108static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1109
1110loff_t
1111ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1112{
1113 loff_t ret;
1114
1115 if (file->f_mode & FMODE_READ)
1116 ret = seq_lseek(file, offset, whence);
1117 else
1118 file->f_pos = ret = 1;
1119
1120 return ret;
1121}
1122
1123#ifdef CONFIG_DYNAMIC_FTRACE
1124
1125#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1126# error Dynamic ftrace depends on MCOUNT_RECORD
1127#endif
1128
1129static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1130
1131struct ftrace_func_probe {
1132 struct hlist_node node;
1133 struct ftrace_probe_ops *ops;
1134 unsigned long flags;
1135 unsigned long ip;
1136 void *data;
1137 struct list_head free_list;
1138};
1139
1140struct ftrace_func_entry {
1141 struct hlist_node hlist;
1142 unsigned long ip;
1143};
1144
1145struct ftrace_hash {
1146 unsigned long size_bits;
1147 struct hlist_head *buckets;
1148 unsigned long count;
1149 struct rcu_head rcu;
1150};
1151
1152
1153
1154
1155
1156
1157
1158static const struct hlist_head empty_buckets[1];
1159static const struct ftrace_hash empty_hash = {
1160 .buckets = (struct hlist_head *)empty_buckets,
1161};
1162#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1163
1164static struct ftrace_ops global_ops = {
1165 .func = ftrace_stub,
1166 .notrace_hash = EMPTY_HASH,
1167 .filter_hash = EMPTY_HASH,
1168 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
1169 INIT_REGEX_LOCK(global_ops)
1170};
1171
1172struct ftrace_page {
1173 struct ftrace_page *next;
1174 struct dyn_ftrace *records;
1175 int index;
1176 int size;
1177};
1178
1179#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1180#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1181
1182
1183#define NR_TO_INIT 10000
1184
1185static struct ftrace_page *ftrace_pages_start;
1186static struct ftrace_page *ftrace_pages;
1187
1188static bool ftrace_hash_empty(struct ftrace_hash *hash)
1189{
1190 return !hash || !hash->count;
1191}
1192
1193static struct ftrace_func_entry *
1194ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1195{
1196 unsigned long key;
1197 struct ftrace_func_entry *entry;
1198 struct hlist_head *hhd;
1199
1200 if (ftrace_hash_empty(hash))
1201 return NULL;
1202
1203 if (hash->size_bits > 0)
1204 key = hash_long(ip, hash->size_bits);
1205 else
1206 key = 0;
1207
1208 hhd = &hash->buckets[key];
1209
1210 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1211 if (entry->ip == ip)
1212 return entry;
1213 }
1214 return NULL;
1215}
1216
1217static void __add_hash_entry(struct ftrace_hash *hash,
1218 struct ftrace_func_entry *entry)
1219{
1220 struct hlist_head *hhd;
1221 unsigned long key;
1222
1223 if (hash->size_bits)
1224 key = hash_long(entry->ip, hash->size_bits);
1225 else
1226 key = 0;
1227
1228 hhd = &hash->buckets[key];
1229 hlist_add_head(&entry->hlist, hhd);
1230 hash->count++;
1231}
1232
1233static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1234{
1235 struct ftrace_func_entry *entry;
1236
1237 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1238 if (!entry)
1239 return -ENOMEM;
1240
1241 entry->ip = ip;
1242 __add_hash_entry(hash, entry);
1243
1244 return 0;
1245}
1246
1247static void
1248free_hash_entry(struct ftrace_hash *hash,
1249 struct ftrace_func_entry *entry)
1250{
1251 hlist_del(&entry->hlist);
1252 kfree(entry);
1253 hash->count--;
1254}
1255
1256static void
1257remove_hash_entry(struct ftrace_hash *hash,
1258 struct ftrace_func_entry *entry)
1259{
1260 hlist_del(&entry->hlist);
1261 hash->count--;
1262}
1263
1264static void ftrace_hash_clear(struct ftrace_hash *hash)
1265{
1266 struct hlist_head *hhd;
1267 struct hlist_node *tn;
1268 struct ftrace_func_entry *entry;
1269 int size = 1 << hash->size_bits;
1270 int i;
1271
1272 if (!hash->count)
1273 return;
1274
1275 for (i = 0; i < size; i++) {
1276 hhd = &hash->buckets[i];
1277 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1278 free_hash_entry(hash, entry);
1279 }
1280 FTRACE_WARN_ON(hash->count);
1281}
1282
1283static void free_ftrace_hash(struct ftrace_hash *hash)
1284{
1285 if (!hash || hash == EMPTY_HASH)
1286 return;
1287 ftrace_hash_clear(hash);
1288 kfree(hash->buckets);
1289 kfree(hash);
1290}
1291
1292static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1293{
1294 struct ftrace_hash *hash;
1295
1296 hash = container_of(rcu, struct ftrace_hash, rcu);
1297 free_ftrace_hash(hash);
1298}
1299
1300static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1301{
1302 if (!hash || hash == EMPTY_HASH)
1303 return;
1304 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1305}
1306
1307void ftrace_free_filter(struct ftrace_ops *ops)
1308{
1309 ftrace_ops_init(ops);
1310 free_ftrace_hash(ops->filter_hash);
1311 free_ftrace_hash(ops->notrace_hash);
1312}
1313
1314static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1315{
1316 struct ftrace_hash *hash;
1317 int size;
1318
1319 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1320 if (!hash)
1321 return NULL;
1322
1323 size = 1 << size_bits;
1324 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1325
1326 if (!hash->buckets) {
1327 kfree(hash);
1328 return NULL;
1329 }
1330
1331 hash->size_bits = size_bits;
1332
1333 return hash;
1334}
1335
1336static struct ftrace_hash *
1337alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1338{
1339 struct ftrace_func_entry *entry;
1340 struct ftrace_hash *new_hash;
1341 int size;
1342 int ret;
1343 int i;
1344
1345 new_hash = alloc_ftrace_hash(size_bits);
1346 if (!new_hash)
1347 return NULL;
1348
1349
1350 if (ftrace_hash_empty(hash))
1351 return new_hash;
1352
1353 size = 1 << hash->size_bits;
1354 for (i = 0; i < size; i++) {
1355 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1356 ret = add_hash_entry(new_hash, entry->ip);
1357 if (ret < 0)
1358 goto free_hash;
1359 }
1360 }
1361
1362 FTRACE_WARN_ON(new_hash->count != hash->count);
1363
1364 return new_hash;
1365
1366 free_hash:
1367 free_ftrace_hash(new_hash);
1368 return NULL;
1369}
1370
1371static bool
1372ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1373static bool
1374ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1375
1376static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1377 struct ftrace_hash *new_hash);
1378
1379static int
1380ftrace_hash_move(struct ftrace_ops *ops, int enable,
1381 struct ftrace_hash **dst, struct ftrace_hash *src)
1382{
1383 struct ftrace_func_entry *entry;
1384 struct hlist_node *tn;
1385 struct hlist_head *hhd;
1386 struct ftrace_hash *old_hash;
1387 struct ftrace_hash *new_hash;
1388 int size = src->count;
1389 int bits = 0;
1390 int ret;
1391 int i;
1392
1393
1394 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1395 return -EINVAL;
1396
1397
1398
1399
1400
1401 if (!src->count) {
1402 new_hash = EMPTY_HASH;
1403 goto update;
1404 }
1405
1406
1407
1408
1409 for (size /= 2; size; size >>= 1)
1410 bits++;
1411
1412
1413 if (bits > FTRACE_HASH_MAX_BITS)
1414 bits = FTRACE_HASH_MAX_BITS;
1415
1416 new_hash = alloc_ftrace_hash(bits);
1417 if (!new_hash)
1418 return -ENOMEM;
1419
1420 size = 1 << src->size_bits;
1421 for (i = 0; i < size; i++) {
1422 hhd = &src->buckets[i];
1423 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1424 remove_hash_entry(src, entry);
1425 __add_hash_entry(new_hash, entry);
1426 }
1427 }
1428
1429update:
1430
1431 if (enable) {
1432
1433 ret = ftrace_hash_ipmodify_update(ops, new_hash);
1434 if (ret < 0) {
1435 free_ftrace_hash(new_hash);
1436 return ret;
1437 }
1438 }
1439
1440
1441
1442
1443
1444 ftrace_hash_rec_disable(ops, enable);
1445
1446 old_hash = *dst;
1447 rcu_assign_pointer(*dst, new_hash);
1448 free_ftrace_hash_rcu(old_hash);
1449
1450 ftrace_hash_rec_enable(ops, enable);
1451
1452 return 0;
1453}
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467static int
1468ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1469{
1470 struct ftrace_hash *filter_hash;
1471 struct ftrace_hash *notrace_hash;
1472 int ret;
1473
1474#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1475
1476
1477
1478
1479
1480 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1481 return 0;
1482#endif
1483
1484 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1485 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1486
1487 if ((ftrace_hash_empty(filter_hash) ||
1488 ftrace_lookup_ip(filter_hash, ip)) &&
1489 (ftrace_hash_empty(notrace_hash) ||
1490 !ftrace_lookup_ip(notrace_hash, ip)))
1491 ret = 1;
1492 else
1493 ret = 0;
1494
1495 return ret;
1496}
1497
1498
1499
1500
1501
1502#define do_for_each_ftrace_rec(pg, rec) \
1503 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1504 int _____i; \
1505 for (_____i = 0; _____i < pg->index; _____i++) { \
1506 rec = &pg->records[_____i];
1507
1508#define while_for_each_ftrace_rec() \
1509 } \
1510 }
1511
1512
1513static int ftrace_cmp_recs(const void *a, const void *b)
1514{
1515 const struct dyn_ftrace *key = a;
1516 const struct dyn_ftrace *rec = b;
1517
1518 if (key->flags < rec->ip)
1519 return -1;
1520 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1521 return 1;
1522 return 0;
1523}
1524
1525static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1526{
1527 struct ftrace_page *pg;
1528 struct dyn_ftrace *rec;
1529 struct dyn_ftrace key;
1530
1531 key.ip = start;
1532 key.flags = end;
1533
1534 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1535 if (end < pg->records[0].ip ||
1536 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1537 continue;
1538 rec = bsearch(&key, pg->records, pg->index,
1539 sizeof(struct dyn_ftrace),
1540 ftrace_cmp_recs);
1541 if (rec)
1542 return rec->ip;
1543 }
1544
1545 return 0;
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557unsigned long ftrace_location(unsigned long ip)
1558{
1559 return ftrace_location_range(ip, ip);
1560}
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572int ftrace_text_reserved(void *start, void *end)
1573{
1574 unsigned long ret;
1575
1576 ret = ftrace_location_range((unsigned long)start,
1577 (unsigned long)end);
1578
1579 return (int)!!ret;
1580}
1581
1582static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1583 int filter_hash,
1584 bool inc)
1585{
1586 struct ftrace_hash *hash;
1587 struct ftrace_hash *other_hash;
1588 struct ftrace_page *pg;
1589 struct dyn_ftrace *rec;
1590 bool update = false;
1591 int count = 0;
1592 int all = 0;
1593
1594
1595 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1596 return false;
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609 if (filter_hash) {
1610 hash = ops->filter_hash;
1611 other_hash = ops->notrace_hash;
1612 if (ftrace_hash_empty(hash))
1613 all = 1;
1614 } else {
1615 inc = !inc;
1616 hash = ops->notrace_hash;
1617 other_hash = ops->filter_hash;
1618
1619
1620
1621
1622 if (ftrace_hash_empty(hash))
1623 return false;
1624 }
1625
1626 do_for_each_ftrace_rec(pg, rec) {
1627 int in_other_hash = 0;
1628 int in_hash = 0;
1629 int match = 0;
1630
1631 if (rec->flags & FTRACE_FL_DISABLED)
1632 continue;
1633
1634 if (all) {
1635
1636
1637
1638
1639 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1640 match = 1;
1641 } else {
1642 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1643 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1644
1645
1646
1647
1648 if (filter_hash && in_hash && !in_other_hash)
1649 match = 1;
1650 else if (!filter_hash && in_hash &&
1651 (in_other_hash || ftrace_hash_empty(other_hash)))
1652 match = 1;
1653 }
1654 if (!match)
1655 continue;
1656
1657 if (inc) {
1658 rec->flags++;
1659 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1660 return false;
1661
1662
1663
1664
1665 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1666 rec->flags |= FTRACE_FL_REGS;
1667 } else {
1668 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1669 return false;
1670 rec->flags--;
1671 }
1672 count++;
1673
1674
1675 update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;
1676
1677
1678 if (!all && count == hash->count)
1679 return update;
1680 } while_for_each_ftrace_rec();
1681
1682 return update;
1683}
1684
1685static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1686 int filter_hash)
1687{
1688 return __ftrace_hash_rec_update(ops, filter_hash, 0);
1689}
1690
1691static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1692 int filter_hash)
1693{
1694 return __ftrace_hash_rec_update(ops, filter_hash, 1);
1695}
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1707 struct ftrace_hash *old_hash,
1708 struct ftrace_hash *new_hash)
1709{
1710 struct ftrace_page *pg;
1711 struct dyn_ftrace *rec, *end = NULL;
1712 int in_old, in_new;
1713
1714
1715 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1716 return 0;
1717
1718 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1719 return 0;
1720
1721
1722
1723
1724
1725 if (!new_hash || !old_hash)
1726 return -EINVAL;
1727
1728
1729 do_for_each_ftrace_rec(pg, rec) {
1730
1731 if (rec->flags & FTRACE_FL_DISABLED)
1732 continue;
1733
1734
1735 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1736 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1737 if (in_old == in_new)
1738 continue;
1739
1740 if (in_new) {
1741
1742 if (rec->flags & FTRACE_FL_IPMODIFY)
1743 goto rollback;
1744 rec->flags |= FTRACE_FL_IPMODIFY;
1745 } else
1746 rec->flags &= ~FTRACE_FL_IPMODIFY;
1747 } while_for_each_ftrace_rec();
1748
1749 return 0;
1750
1751rollback:
1752 end = rec;
1753
1754
1755 do_for_each_ftrace_rec(pg, rec) {
1756
1757 if (rec->flags & FTRACE_FL_DISABLED)
1758 continue;
1759
1760 if (rec == end)
1761 goto err_out;
1762
1763 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1764 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1765 if (in_old == in_new)
1766 continue;
1767
1768 if (in_new)
1769 rec->flags &= ~FTRACE_FL_IPMODIFY;
1770 else
1771 rec->flags |= FTRACE_FL_IPMODIFY;
1772 } while_for_each_ftrace_rec();
1773
1774err_out:
1775 return -EBUSY;
1776}
1777
1778static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1779{
1780 struct ftrace_hash *hash = ops->filter_hash;
1781
1782 if (ftrace_hash_empty(hash))
1783 hash = NULL;
1784
1785 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1786}
1787
1788
1789static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1790{
1791 struct ftrace_hash *hash = ops->filter_hash;
1792
1793 if (ftrace_hash_empty(hash))
1794 hash = NULL;
1795
1796 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1797}
1798
1799static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1800 struct ftrace_hash *new_hash)
1801{
1802 struct ftrace_hash *old_hash = ops->filter_hash;
1803
1804 if (ftrace_hash_empty(old_hash))
1805 old_hash = NULL;
1806
1807 if (ftrace_hash_empty(new_hash))
1808 new_hash = NULL;
1809
1810 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1811}
1812
1813static void print_ip_ins(const char *fmt, unsigned char *p)
1814{
1815 int i;
1816
1817 printk(KERN_CONT "%s", fmt);
1818
1819 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1820 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1821}
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835void ftrace_bug(int failed, unsigned long ip)
1836{
1837 switch (failed) {
1838 case -EFAULT:
1839 FTRACE_WARN_ON_ONCE(1);
1840 pr_info("ftrace faulted on modifying ");
1841 print_ip_sym(ip);
1842 break;
1843 case -EINVAL:
1844 FTRACE_WARN_ON_ONCE(1);
1845 pr_info("ftrace failed to modify ");
1846 print_ip_sym(ip);
1847 print_ip_ins(" actual: ", (unsigned char *)ip);
1848 printk(KERN_CONT "\n");
1849 break;
1850 case -EPERM:
1851 FTRACE_WARN_ON_ONCE(1);
1852 pr_info("ftrace faulted on writing ");
1853 print_ip_sym(ip);
1854 break;
1855 default:
1856 FTRACE_WARN_ON_ONCE(1);
1857 pr_info("ftrace faulted on unknown error ");
1858 print_ip_sym(ip);
1859 }
1860}
1861
1862static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1863{
1864 unsigned long flag = 0UL;
1865
1866 if (rec->flags & FTRACE_FL_DISABLED)
1867 return FTRACE_UPDATE_IGNORE;
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880 if (enable && (rec->flags & ~FTRACE_FL_MASK))
1881 flag = FTRACE_FL_ENABLED;
1882
1883
1884
1885
1886
1887
1888 if (flag &&
1889 (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1890 flag |= FTRACE_FL_REGS;
1891
1892
1893 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1894 return FTRACE_UPDATE_IGNORE;
1895
1896 if (flag) {
1897
1898 flag ^= rec->flags & FTRACE_FL_ENABLED;
1899
1900 if (update) {
1901 rec->flags |= FTRACE_FL_ENABLED;
1902 if (flag & FTRACE_FL_REGS) {
1903 if (rec->flags & FTRACE_FL_REGS)
1904 rec->flags |= FTRACE_FL_REGS_EN;
1905 else
1906 rec->flags &= ~FTRACE_FL_REGS_EN;
1907 }
1908 }
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920 if (flag & FTRACE_FL_ENABLED)
1921 return FTRACE_UPDATE_MAKE_CALL;
1922 else if (rec->flags & FTRACE_FL_REGS_EN)
1923 return FTRACE_UPDATE_MODIFY_CALL_REGS;
1924 else
1925 return FTRACE_UPDATE_MODIFY_CALL;
1926 }
1927
1928 if (update) {
1929
1930 if (!(rec->flags & ~FTRACE_FL_MASK))
1931 rec->flags = 0;
1932 else
1933
1934 rec->flags &= ~FTRACE_FL_ENABLED;
1935 }
1936
1937 return FTRACE_UPDATE_MAKE_NOP;
1938}
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1949{
1950 return ftrace_check_record(rec, enable, 1);
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1963{
1964 return ftrace_check_record(rec, enable, 0);
1965}
1966
1967static int
1968__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1969{
1970 unsigned long ftrace_old_addr;
1971 unsigned long ftrace_addr;
1972 int ret;
1973
1974 ret = ftrace_update_record(rec, enable);
1975
1976 if (rec->flags & FTRACE_FL_REGS)
1977 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1978 else
1979 ftrace_addr = (unsigned long)FTRACE_ADDR;
1980
1981 switch (ret) {
1982 case FTRACE_UPDATE_IGNORE:
1983 return 0;
1984
1985 case FTRACE_UPDATE_MAKE_CALL:
1986 return ftrace_make_call(rec, ftrace_addr);
1987
1988 case FTRACE_UPDATE_MAKE_NOP:
1989 return ftrace_make_nop(NULL, rec, ftrace_addr);
1990
1991 case FTRACE_UPDATE_MODIFY_CALL_REGS:
1992 case FTRACE_UPDATE_MODIFY_CALL:
1993 if (rec->flags & FTRACE_FL_REGS)
1994 ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1995 else
1996 ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1997
1998 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1999 }
2000
2001 return -1;
2002}
2003
2004void __weak ftrace_replace_code(int enable)
2005{
2006 struct dyn_ftrace *rec;
2007 struct ftrace_page *pg;
2008 int failed;
2009
2010 if (unlikely(ftrace_disabled))
2011 return;
2012
2013 do_for_each_ftrace_rec(pg, rec) {
2014
2015 if (rec->flags & FTRACE_FL_DISABLED)
2016 continue;
2017
2018 failed = __ftrace_replace_code(rec, enable);
2019 if (failed) {
2020 ftrace_bug(failed, rec->ip);
2021
2022 return;
2023 }
2024 } while_for_each_ftrace_rec();
2025}
2026
2027struct ftrace_rec_iter {
2028 struct ftrace_page *pg;
2029 int index;
2030};
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2042{
2043
2044
2045
2046
2047 static struct ftrace_rec_iter ftrace_rec_iter;
2048 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2049
2050 iter->pg = ftrace_pages_start;
2051 iter->index = 0;
2052
2053
2054 while (iter->pg && !iter->pg->index)
2055 iter->pg = iter->pg->next;
2056
2057 if (!iter->pg)
2058 return NULL;
2059
2060 return iter;
2061}
2062
2063
2064
2065
2066
2067
2068
2069struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2070{
2071 iter->index++;
2072
2073 if (iter->index >= iter->pg->index) {
2074 iter->pg = iter->pg->next;
2075 iter->index = 0;
2076
2077
2078 while (iter->pg && !iter->pg->index)
2079 iter->pg = iter->pg->next;
2080 }
2081
2082 if (!iter->pg)
2083 return NULL;
2084
2085 return iter;
2086}
2087
2088
2089
2090
2091
2092
2093
2094struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2095{
2096 return &iter->pg->records[iter->index];
2097}
2098
2099static int
2100ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2101{
2102 unsigned long ip;
2103 int ret;
2104
2105 ip = rec->ip;
2106
2107 if (unlikely(ftrace_disabled))
2108 return 0;
2109
2110 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2111 if (ret) {
2112 ftrace_bug(ret, ip);
2113 return 0;
2114 }
2115 return 1;
2116}
2117
2118
2119
2120
2121
2122int __weak ftrace_arch_code_modify_prepare(void)
2123{
2124 return 0;
2125}
2126
2127
2128
2129
2130
2131int __weak ftrace_arch_code_modify_post_process(void)
2132{
2133 return 0;
2134}
2135
2136void ftrace_modify_all_code(int command)
2137{
2138 if (command & FTRACE_UPDATE_CALLS)
2139 ftrace_replace_code(1);
2140 else if (command & FTRACE_DISABLE_CALLS)
2141 ftrace_replace_code(0);
2142
2143 if (command & FTRACE_UPDATE_TRACE_FUNC) {
2144 function_trace_op = set_function_trace_op;
2145 smp_wmb();
2146
2147 if (!irqs_disabled())
2148 smp_call_function(ftrace_sync_ipi, NULL, 1);
2149 ftrace_update_ftrace_func(ftrace_trace_function);
2150 }
2151
2152 if (command & FTRACE_START_FUNC_RET)
2153 ftrace_enable_ftrace_graph_caller();
2154 else if (command & FTRACE_STOP_FUNC_RET)
2155 ftrace_disable_ftrace_graph_caller();
2156}
2157
2158static int __ftrace_modify_code(void *data)
2159{
2160 int *command = data;
2161
2162 ftrace_modify_all_code(*command);
2163
2164 return 0;
2165}
2166
2167
2168
2169
2170
2171
2172
2173
2174void ftrace_run_stop_machine(int command)
2175{
2176 stop_machine(__ftrace_modify_code, &command, NULL);
2177}
2178
2179
2180
2181
2182
2183
2184
2185
2186void __weak arch_ftrace_update_code(int command)
2187{
2188 ftrace_run_stop_machine(command);
2189}
2190
2191static void ftrace_run_update_code(int command)
2192{
2193 int ret;
2194
2195 ret = ftrace_arch_code_modify_prepare();
2196 FTRACE_WARN_ON(ret);
2197 if (ret)
2198 return;
2199
2200
2201
2202
2203
2204
2205
2206 arch_ftrace_update_code(command);
2207
2208 ret = ftrace_arch_code_modify_post_process();
2209 FTRACE_WARN_ON(ret);
2210}
2211
2212static ftrace_func_t saved_ftrace_func;
2213static int ftrace_start_up;
2214static int global_start_up;
2215
2216static void ftrace_startup_enable(int command)
2217{
2218 if (saved_ftrace_func != ftrace_trace_function) {
2219 saved_ftrace_func = ftrace_trace_function;
2220 command |= FTRACE_UPDATE_TRACE_FUNC;
2221 }
2222
2223 if (!command || !ftrace_enabled)
2224 return;
2225
2226 ftrace_run_update_code(command);
2227}
2228
2229static int ftrace_startup(struct ftrace_ops *ops, int command)
2230{
2231 bool hash_enable = true;
2232 int ret;
2233
2234 if (unlikely(ftrace_disabled))
2235 return -ENODEV;
2236
2237 ret = __register_ftrace_function(ops);
2238 if (ret)
2239 return ret;
2240
2241 ftrace_start_up++;
2242
2243
2244 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2245 ops = &global_ops;
2246
2247 if (global_start_up)
2248 hash_enable = false;
2249 global_start_up++;
2250 }
2251
2252 ops->flags |= FTRACE_OPS_FL_ENABLED;
2253
2254 ret = ftrace_hash_ipmodify_enable(ops);
2255 if (ret < 0) {
2256
2257 __unregister_ftrace_function(ops);
2258 ftrace_start_up--;
2259 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2260 return ret;
2261 }
2262
2263 if (hash_enable && ftrace_hash_rec_enable(ops, 1))
2264 command |= FTRACE_UPDATE_CALLS;
2265
2266 ftrace_startup_enable(command);
2267
2268 return 0;
2269}
2270
2271static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2272{
2273 bool hash_disable = true;
2274 int ret;
2275
2276 if (unlikely(ftrace_disabled))
2277 return -ENODEV;
2278
2279 ret = __unregister_ftrace_function(ops);
2280 if (ret)
2281 return ret;
2282
2283 ftrace_start_up--;
2284
2285
2286
2287
2288
2289 WARN_ON_ONCE(ftrace_start_up < 0);
2290
2291 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2292 ops = &global_ops;
2293 global_start_up--;
2294 WARN_ON_ONCE(global_start_up < 0);
2295
2296 if (global_start_up) {
2297 WARN_ON_ONCE(!ftrace_start_up);
2298 hash_disable = false;
2299 }
2300 }
2301
2302
2303 ftrace_hash_ipmodify_disable(ops);
2304
2305 if (hash_disable && ftrace_hash_rec_disable(ops, 1))
2306 command |= FTRACE_UPDATE_CALLS;
2307
2308 if (ops != &global_ops || !global_start_up)
2309 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2310
2311 if (saved_ftrace_func != ftrace_trace_function) {
2312 saved_ftrace_func = ftrace_trace_function;
2313 command |= FTRACE_UPDATE_TRACE_FUNC;
2314 }
2315
2316 if (!command || !ftrace_enabled) {
2317
2318
2319
2320
2321
2322
2323 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2324 control_ops_free(ops);
2325 return 0;
2326 }
2327
2328 ftrace_run_update_code(command);
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2345 schedule_on_each_cpu(ftrace_sync);
2346
2347 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2348 control_ops_free(ops);
2349 }
2350
2351 return 0;
2352}
2353
2354static void ftrace_startup_sysctl(void)
2355{
2356 if (unlikely(ftrace_disabled))
2357 return;
2358
2359
2360 saved_ftrace_func = NULL;
2361
2362 if (ftrace_start_up)
2363 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2364}
2365
2366static void ftrace_shutdown_sysctl(void)
2367{
2368 if (unlikely(ftrace_disabled))
2369 return;
2370
2371
2372 if (ftrace_start_up)
2373 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2374}
2375
2376static u64 ftrace_update_time;
2377unsigned long ftrace_update_tot_cnt;
2378
2379static inline int ops_traces_mod(struct ftrace_ops *ops)
2380{
2381
2382
2383
2384
2385 return ftrace_hash_empty(ops->filter_hash) &&
2386 ftrace_hash_empty(ops->notrace_hash);
2387}
2388
2389
2390
2391
2392
2393
2394
2395
2396static inline bool
2397ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2398{
2399
2400 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2401 return 0;
2402
2403
2404 if (ops_traces_mod(ops))
2405 return 1;
2406
2407
2408 if (!ftrace_hash_empty(ops->filter_hash) &&
2409 !ftrace_lookup_ip(ops->filter_hash, rec->ip))
2410 return 0;
2411
2412
2413 if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
2414 return 0;
2415
2416 return 1;
2417}
2418
2419static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2420{
2421 struct ftrace_page *pg;
2422 struct dyn_ftrace *p;
2423 u64 start, stop;
2424 unsigned long update_cnt = 0;
2425 unsigned long rec_flags = 0;
2426 int i;
2427
2428 start = ftrace_now(raw_smp_processor_id());
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441 if (mod)
2442 rec_flags |= FTRACE_FL_DISABLED;
2443
2444 for (pg = new_pgs; pg; pg = pg->next) {
2445
2446 for (i = 0; i < pg->index; i++) {
2447
2448
2449 if (unlikely(ftrace_disabled))
2450 return -1;
2451
2452 p = &pg->records[i];
2453 p->flags = rec_flags;
2454
2455
2456
2457
2458
2459 if (!ftrace_code_disable(mod, p))
2460 break;
2461
2462 update_cnt++;
2463 }
2464 }
2465
2466 stop = ftrace_now(raw_smp_processor_id());
2467 ftrace_update_time = stop - start;
2468 ftrace_update_tot_cnt += update_cnt;
2469
2470 return 0;
2471}
2472
2473static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2474{
2475 int order;
2476 int cnt;
2477
2478 if (WARN_ON(!count))
2479 return -EINVAL;
2480
2481 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2482
2483
2484
2485
2486
2487 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2488 order--;
2489
2490 again:
2491 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2492
2493 if (!pg->records) {
2494
2495 if (!order)
2496 return -ENOMEM;
2497 order >>= 1;
2498 goto again;
2499 }
2500
2501 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2502 pg->size = cnt;
2503
2504 if (cnt > count)
2505 cnt = count;
2506
2507 return cnt;
2508}
2509
2510static struct ftrace_page *
2511ftrace_allocate_pages(unsigned long num_to_init)
2512{
2513 struct ftrace_page *start_pg;
2514 struct ftrace_page *pg;
2515 int order;
2516 int cnt;
2517
2518 if (!num_to_init)
2519 return 0;
2520
2521 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2522 if (!pg)
2523 return NULL;
2524
2525
2526
2527
2528
2529
2530 for (;;) {
2531 cnt = ftrace_allocate_records(pg, num_to_init);
2532 if (cnt < 0)
2533 goto free_pages;
2534
2535 num_to_init -= cnt;
2536 if (!num_to_init)
2537 break;
2538
2539 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2540 if (!pg->next)
2541 goto free_pages;
2542
2543 pg = pg->next;
2544 }
2545
2546 return start_pg;
2547
2548 free_pages:
2549 while (start_pg) {
2550 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2551 free_pages((unsigned long)pg->records, order);
2552 start_pg = pg->next;
2553 kfree(pg);
2554 pg = start_pg;
2555 }
2556 pr_info("ftrace: FAILED to allocate memory for functions\n");
2557 return NULL;
2558}
2559
2560static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2561{
2562 int cnt;
2563
2564 if (!num_to_init) {
2565 pr_info("ftrace: No functions to be traced?\n");
2566 return -1;
2567 }
2568
2569 cnt = num_to_init / ENTRIES_PER_PAGE;
2570 pr_info("ftrace: allocating %ld entries in %d pages\n",
2571 num_to_init, cnt + 1);
2572
2573 return 0;
2574}
2575
2576#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4)
2577
2578struct ftrace_iterator {
2579 loff_t pos;
2580 loff_t func_pos;
2581 struct ftrace_page *pg;
2582 struct dyn_ftrace *func;
2583 struct ftrace_func_probe *probe;
2584 struct trace_parser parser;
2585 struct ftrace_hash *hash;
2586 struct ftrace_ops *ops;
2587 int hidx;
2588 int idx;
2589 unsigned flags;
2590};
2591
2592static void *
2593t_hash_next(struct seq_file *m, loff_t *pos)
2594{
2595 struct ftrace_iterator *iter = m->private;
2596 struct hlist_node *hnd = NULL;
2597 struct hlist_head *hhd;
2598
2599 (*pos)++;
2600 iter->pos = *pos;
2601
2602 if (iter->probe)
2603 hnd = &iter->probe->node;
2604 retry:
2605 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2606 return NULL;
2607
2608 hhd = &ftrace_func_hash[iter->hidx];
2609
2610 if (hlist_empty(hhd)) {
2611 iter->hidx++;
2612 hnd = NULL;
2613 goto retry;
2614 }
2615
2616 if (!hnd)
2617 hnd = hhd->first;
2618 else {
2619 hnd = hnd->next;
2620 if (!hnd) {
2621 iter->hidx++;
2622 goto retry;
2623 }
2624 }
2625
2626 if (WARN_ON_ONCE(!hnd))
2627 return NULL;
2628
2629 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2630
2631 return iter;
2632}
2633
2634static void *t_hash_start(struct seq_file *m, loff_t *pos)
2635{
2636 struct ftrace_iterator *iter = m->private;
2637 void *p = NULL;
2638 loff_t l;
2639
2640 if (!(iter->flags & FTRACE_ITER_DO_HASH))
2641 return NULL;
2642
2643 if (iter->func_pos > *pos)
2644 return NULL;
2645
2646 iter->hidx = 0;
2647 for (l = 0; l <= (*pos - iter->func_pos); ) {
2648 p = t_hash_next(m, &l);
2649 if (!p)
2650 break;
2651 }
2652 if (!p)
2653 return NULL;
2654
2655
2656 iter->flags |= FTRACE_ITER_HASH;
2657
2658 return iter;
2659}
2660
2661static int
2662t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2663{
2664 struct ftrace_func_probe *rec;
2665
2666 rec = iter->probe;
2667 if (WARN_ON_ONCE(!rec))
2668 return -EIO;
2669
2670 if (rec->ops->print)
2671 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2672
2673 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2674
2675 if (rec->data)
2676 seq_printf(m, ":%p", rec->data);
2677 seq_putc(m, '\n');
2678
2679 return 0;
2680}
2681
2682static void *
2683t_next(struct seq_file *m, void *v, loff_t *pos)
2684{
2685 struct ftrace_iterator *iter = m->private;
2686 struct ftrace_ops *ops = iter->ops;
2687 struct dyn_ftrace *rec = NULL;
2688
2689 if (unlikely(ftrace_disabled))
2690 return NULL;
2691
2692 if (iter->flags & FTRACE_ITER_HASH)
2693 return t_hash_next(m, pos);
2694
2695 (*pos)++;
2696 iter->pos = iter->func_pos = *pos;
2697
2698 if (iter->flags & FTRACE_ITER_PRINTALL)
2699 return t_hash_start(m, pos);
2700
2701 retry:
2702 if (iter->idx >= iter->pg->index) {
2703 if (iter->pg->next) {
2704 iter->pg = iter->pg->next;
2705 iter->idx = 0;
2706 goto retry;
2707 }
2708 } else {
2709 rec = &iter->pg->records[iter->idx++];
2710 if (((iter->flags & FTRACE_ITER_FILTER) &&
2711 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2712
2713 ((iter->flags & FTRACE_ITER_NOTRACE) &&
2714 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2715
2716 ((iter->flags & FTRACE_ITER_ENABLED) &&
2717 !(rec->flags & FTRACE_FL_ENABLED))) {
2718
2719 rec = NULL;
2720 goto retry;
2721 }
2722 }
2723
2724 if (!rec)
2725 return t_hash_start(m, pos);
2726
2727 iter->func = rec;
2728
2729 return iter;
2730}
2731
2732static void reset_iter_read(struct ftrace_iterator *iter)
2733{
2734 iter->pos = 0;
2735 iter->func_pos = 0;
2736 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2737}
2738
2739static void *t_start(struct seq_file *m, loff_t *pos)
2740{
2741 struct ftrace_iterator *iter = m->private;
2742 struct ftrace_ops *ops = iter->ops;
2743 void *p = NULL;
2744 loff_t l;
2745
2746 mutex_lock(&ftrace_lock);
2747
2748 if (unlikely(ftrace_disabled))
2749 return NULL;
2750
2751
2752
2753
2754 if (*pos < iter->pos)
2755 reset_iter_read(iter);
2756
2757
2758
2759
2760
2761
2762 if (iter->flags & FTRACE_ITER_FILTER &&
2763 ftrace_hash_empty(ops->filter_hash)) {
2764 if (*pos > 0)
2765 return t_hash_start(m, pos);
2766 iter->flags |= FTRACE_ITER_PRINTALL;
2767
2768 iter->flags &= ~FTRACE_ITER_HASH;
2769 return iter;
2770 }
2771
2772 if (iter->flags & FTRACE_ITER_HASH)
2773 return t_hash_start(m, pos);
2774
2775
2776
2777
2778
2779
2780 iter->pg = ftrace_pages_start;
2781 iter->idx = 0;
2782 for (l = 0; l <= *pos; ) {
2783 p = t_next(m, p, &l);
2784 if (!p)
2785 break;
2786 }
2787
2788 if (!p)
2789 return t_hash_start(m, pos);
2790
2791 return iter;
2792}
2793
2794static void t_stop(struct seq_file *m, void *p)
2795{
2796 mutex_unlock(&ftrace_lock);
2797}
2798
2799static int t_show(struct seq_file *m, void *v)
2800{
2801 struct ftrace_iterator *iter = m->private;
2802 struct dyn_ftrace *rec;
2803
2804 if (iter->flags & FTRACE_ITER_HASH)
2805 return t_hash_show(m, iter);
2806
2807 if (iter->flags & FTRACE_ITER_PRINTALL) {
2808 seq_printf(m, "#### all functions enabled ####\n");
2809 return 0;
2810 }
2811
2812 rec = iter->func;
2813
2814 if (!rec)
2815 return 0;
2816
2817 seq_printf(m, "%ps", (void *)rec->ip);
2818 if (iter->flags & FTRACE_ITER_ENABLED)
2819 seq_printf(m, " (%ld)%s%s",
2820 rec->flags & ~FTRACE_FL_MASK,
2821 rec->flags & FTRACE_FL_REGS ? " R" : " ",
2822 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ");
2823 seq_printf(m, "\n");
2824
2825 return 0;
2826}
2827
2828static const struct seq_operations show_ftrace_seq_ops = {
2829 .start = t_start,
2830 .next = t_next,
2831 .stop = t_stop,
2832 .show = t_show,
2833};
2834
2835static int
2836ftrace_avail_open(struct inode *inode, struct file *file)
2837{
2838 struct ftrace_iterator *iter;
2839
2840 if (unlikely(ftrace_disabled))
2841 return -ENODEV;
2842
2843 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2844 if (iter) {
2845 iter->pg = ftrace_pages_start;
2846 iter->ops = &global_ops;
2847 }
2848
2849 return iter ? 0 : -ENOMEM;
2850}
2851
2852static int
2853ftrace_enabled_open(struct inode *inode, struct file *file)
2854{
2855 struct ftrace_iterator *iter;
2856
2857 if (unlikely(ftrace_disabled))
2858 return -ENODEV;
2859
2860 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2861 if (iter) {
2862 iter->pg = ftrace_pages_start;
2863 iter->flags = FTRACE_ITER_ENABLED;
2864 iter->ops = &global_ops;
2865 }
2866
2867 return iter ? 0 : -ENOMEM;
2868}
2869
2870static void ftrace_filter_reset(struct ftrace_hash *hash)
2871{
2872 mutex_lock(&ftrace_lock);
2873 ftrace_hash_clear(hash);
2874 mutex_unlock(&ftrace_lock);
2875}
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893int
2894ftrace_regex_open(struct ftrace_ops *ops, int flag,
2895 struct inode *inode, struct file *file)
2896{
2897 struct ftrace_iterator *iter;
2898 struct ftrace_hash *hash;
2899 int ret = 0;
2900
2901 ftrace_ops_init(ops);
2902
2903 if (unlikely(ftrace_disabled))
2904 return -ENODEV;
2905
2906 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2907 if (!iter)
2908 return -ENOMEM;
2909
2910 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2911 kfree(iter);
2912 return -ENOMEM;
2913 }
2914
2915 iter->ops = ops;
2916 iter->flags = flag;
2917
2918 mutex_lock(&ops->regex_lock);
2919
2920 if (flag & FTRACE_ITER_NOTRACE)
2921 hash = ops->notrace_hash;
2922 else
2923 hash = ops->filter_hash;
2924
2925 if (file->f_mode & FMODE_WRITE) {
2926 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2927 if (!iter->hash) {
2928 trace_parser_put(&iter->parser);
2929 kfree(iter);
2930 ret = -ENOMEM;
2931 goto out_unlock;
2932 }
2933 }
2934
2935 if ((file->f_mode & FMODE_WRITE) &&
2936 (file->f_flags & O_TRUNC))
2937 ftrace_filter_reset(iter->hash);
2938
2939 if (file->f_mode & FMODE_READ) {
2940 iter->pg = ftrace_pages_start;
2941
2942 ret = seq_open(file, &show_ftrace_seq_ops);
2943 if (!ret) {
2944 struct seq_file *m = file->private_data;
2945 m->private = iter;
2946 } else {
2947
2948 free_ftrace_hash(iter->hash);
2949 trace_parser_put(&iter->parser);
2950 kfree(iter);
2951 }
2952 } else
2953 file->private_data = iter;
2954
2955 out_unlock:
2956 mutex_unlock(&ops->regex_lock);
2957
2958 return ret;
2959}
2960
2961static int
2962ftrace_filter_open(struct inode *inode, struct file *file)
2963{
2964 return ftrace_regex_open(&global_ops,
2965 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2966 inode, file);
2967}
2968
2969static int
2970ftrace_notrace_open(struct inode *inode, struct file *file)
2971{
2972 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2973 inode, file);
2974}
2975
2976static int ftrace_match(char *str, char *regex, int len, int type)
2977{
2978 int matched = 0;
2979 int slen;
2980
2981 switch (type) {
2982 case MATCH_FULL:
2983 if (strcmp(str, regex) == 0)
2984 matched = 1;
2985 break;
2986 case MATCH_FRONT_ONLY:
2987 if (strncmp(str, regex, len) == 0)
2988 matched = 1;
2989 break;
2990 case MATCH_MIDDLE_ONLY:
2991 if (strstr(str, regex))
2992 matched = 1;
2993 break;
2994 case MATCH_END_ONLY:
2995 slen = strlen(str);
2996 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2997 matched = 1;
2998 break;
2999 }
3000
3001 return matched;
3002}
3003
3004static int
3005enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
3006{
3007 struct ftrace_func_entry *entry;
3008 int ret = 0;
3009
3010 entry = ftrace_lookup_ip(hash, rec->ip);
3011 if (not) {
3012
3013 if (!entry)
3014 return 0;
3015
3016 free_hash_entry(hash, entry);
3017 } else {
3018
3019 if (entry)
3020 return 0;
3021
3022 ret = add_hash_entry(hash, rec->ip);
3023 }
3024 return ret;
3025}
3026
3027static int
3028ftrace_match_record(struct dyn_ftrace *rec, char *mod,
3029 char *regex, int len, int type)
3030{
3031 char str[KSYM_SYMBOL_LEN];
3032 char *modname;
3033
3034 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3035
3036 if (mod) {
3037
3038 if (!modname || strcmp(modname, mod))
3039 return 0;
3040
3041
3042 if (!len)
3043 return 1;
3044 }
3045
3046 return ftrace_match(str, regex, len, type);
3047}
3048
3049static int
3050match_records(struct ftrace_hash *hash, char *buff,
3051 int len, char *mod, int not)
3052{
3053 unsigned search_len = 0;
3054 struct ftrace_page *pg;
3055 struct dyn_ftrace *rec;
3056 int type = MATCH_FULL;
3057 char *search = buff;
3058 int found = 0;
3059 int ret;
3060
3061 if (len) {
3062 type = filter_parse_regex(buff, len, &search, ¬);
3063 search_len = strlen(search);
3064 }
3065
3066 mutex_lock(&ftrace_lock);
3067
3068 if (unlikely(ftrace_disabled))
3069 goto out_unlock;
3070
3071 do_for_each_ftrace_rec(pg, rec) {
3072
3073 if (rec->flags & FTRACE_FL_DISABLED)
3074 continue;
3075
3076 if (ftrace_match_record(rec, mod, search, search_len, type)) {
3077 ret = enter_record(hash, rec, not);
3078 if (ret < 0) {
3079 found = ret;
3080 goto out_unlock;
3081 }
3082 found = 1;
3083 }
3084 } while_for_each_ftrace_rec();
3085 out_unlock:
3086 mutex_unlock(&ftrace_lock);
3087
3088 return found;
3089}
3090
3091static int
3092ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3093{
3094 return match_records(hash, buff, len, NULL, 0);
3095}
3096
3097static int
3098ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
3099{
3100 int not = 0;
3101
3102
3103 if (strcmp(buff, "*") == 0)
3104 buff[0] = 0;
3105
3106
3107 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
3108 buff[0] = 0;
3109 not = 1;
3110 }
3111
3112 return match_records(hash, buff, strlen(buff), mod, not);
3113}
3114
3115
3116
3117
3118
3119
3120static int
3121ftrace_mod_callback(struct ftrace_hash *hash,
3122 char *func, char *cmd, char *param, int enable)
3123{
3124 char *mod;
3125 int ret = -EINVAL;
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136 if (!param)
3137 return ret;
3138
3139 mod = strsep(¶m, ":");
3140 if (!strlen(mod))
3141 return ret;
3142
3143 ret = ftrace_match_module_records(hash, func, mod);
3144 if (!ret)
3145 ret = -EINVAL;
3146 if (ret < 0)
3147 return ret;
3148
3149 return 0;
3150}
3151
3152static struct ftrace_func_command ftrace_mod_cmd = {
3153 .name = "mod",
3154 .func = ftrace_mod_callback,
3155};
3156
3157static int __init ftrace_mod_cmd_init(void)
3158{
3159 return register_ftrace_command(&ftrace_mod_cmd);
3160}
3161core_initcall(ftrace_mod_cmd_init);
3162
3163static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3164 struct ftrace_ops *op, struct pt_regs *pt_regs)
3165{
3166 struct ftrace_func_probe *entry;
3167 struct hlist_head *hhd;
3168 unsigned long key;
3169
3170 key = hash_long(ip, FTRACE_HASH_BITS);
3171
3172 hhd = &ftrace_func_hash[key];
3173
3174 if (hlist_empty(hhd))
3175 return;
3176
3177
3178
3179
3180
3181
3182 preempt_disable_notrace();
3183 hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3184 if (entry->ip == ip)
3185 entry->ops->func(ip, parent_ip, &entry->data);
3186 }
3187 preempt_enable_notrace();
3188}
3189
3190static struct ftrace_ops trace_probe_ops __read_mostly =
3191{
3192 .func = function_trace_probe_call,
3193 .flags = FTRACE_OPS_FL_INITIALIZED,
3194 INIT_REGEX_LOCK(trace_probe_ops)
3195};
3196
3197static int ftrace_probe_registered;
3198
3199static void __enable_ftrace_function_probe(void)
3200{
3201 int ret;
3202 int i;
3203
3204 if (ftrace_probe_registered) {
3205
3206 if (ftrace_enabled)
3207 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3208 return;
3209 }
3210
3211 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3212 struct hlist_head *hhd = &ftrace_func_hash[i];
3213 if (hhd->first)
3214 break;
3215 }
3216
3217 if (i == FTRACE_FUNC_HASHSIZE)
3218 return;
3219
3220 ret = ftrace_startup(&trace_probe_ops, 0);
3221
3222 ftrace_probe_registered = 1;
3223}
3224
3225static void __disable_ftrace_function_probe(void)
3226{
3227 int i;
3228
3229 if (!ftrace_probe_registered)
3230 return;
3231
3232 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3233 struct hlist_head *hhd = &ftrace_func_hash[i];
3234 if (hhd->first)
3235 return;
3236 }
3237
3238
3239 ftrace_shutdown(&trace_probe_ops, 0);
3240
3241 ftrace_probe_registered = 0;
3242}
3243
3244
3245static void ftrace_free_entry(struct ftrace_func_probe *entry)
3246{
3247 if (entry->ops->free)
3248 entry->ops->free(entry->ops, entry->ip, &entry->data);
3249 kfree(entry);
3250}
3251
3252int
3253register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3254 void *data)
3255{
3256 struct ftrace_func_probe *entry;
3257 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3258 struct ftrace_hash *hash;
3259 struct ftrace_page *pg;
3260 struct dyn_ftrace *rec;
3261 int type, len, not;
3262 unsigned long key;
3263 int count = 0;
3264 char *search;
3265 int ret;
3266
3267 type = filter_parse_regex(glob, strlen(glob), &search, ¬);
3268 len = strlen(search);
3269
3270
3271 if (WARN_ON(not))
3272 return -EINVAL;
3273
3274 mutex_lock(&trace_probe_ops.regex_lock);
3275
3276 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3277 if (!hash) {
3278 count = -ENOMEM;
3279 goto out;
3280 }
3281
3282 if (unlikely(ftrace_disabled)) {
3283 count = -ENODEV;
3284 goto out;
3285 }
3286
3287 mutex_lock(&ftrace_lock);
3288
3289 do_for_each_ftrace_rec(pg, rec) {
3290
3291 if (rec->flags & FTRACE_FL_DISABLED)
3292 continue;
3293
3294 if (!ftrace_match_record(rec, NULL, search, len, type))
3295 continue;
3296
3297 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3298 if (!entry) {
3299
3300 if (!count)
3301 count = -ENOMEM;
3302 goto out_unlock;
3303 }
3304
3305 count++;
3306
3307 entry->data = data;
3308
3309
3310
3311
3312
3313
3314 if (ops->init) {
3315 if (ops->init(ops, rec->ip, &entry->data) < 0) {
3316
3317 kfree(entry);
3318 continue;
3319 }
3320 }
3321
3322 ret = enter_record(hash, rec, 0);
3323 if (ret < 0) {
3324 kfree(entry);
3325 count = ret;
3326 goto out_unlock;
3327 }
3328
3329 entry->ops = ops;
3330 entry->ip = rec->ip;
3331
3332 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3333 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3334
3335 } while_for_each_ftrace_rec();
3336
3337 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3338 if (ret < 0)
3339 count = ret;
3340
3341 __enable_ftrace_function_probe();
3342
3343 out_unlock:
3344 mutex_unlock(&ftrace_lock);
3345 out:
3346 mutex_unlock(&trace_probe_ops.regex_lock);
3347 free_ftrace_hash(hash);
3348
3349 return count;
3350}
3351
3352enum {
3353 PROBE_TEST_FUNC = 1,
3354 PROBE_TEST_DATA = 2
3355};
3356
3357static void
3358__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3359 void *data, int flags)
3360{
3361 struct ftrace_func_entry *rec_entry;
3362 struct ftrace_func_probe *entry;
3363 struct ftrace_func_probe *p;
3364 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3365 struct list_head free_list;
3366 struct ftrace_hash *hash;
3367 struct hlist_node *tmp;
3368 char str[KSYM_SYMBOL_LEN];
3369 int type = MATCH_FULL;
3370 int i, len = 0;
3371 char *search;
3372
3373 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3374 glob = NULL;
3375 else if (glob) {
3376 int not;
3377
3378 type = filter_parse_regex(glob, strlen(glob), &search, ¬);
3379 len = strlen(search);
3380
3381
3382 if (WARN_ON(not))
3383 return;
3384 }
3385
3386 mutex_lock(&trace_probe_ops.regex_lock);
3387
3388 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3389 if (!hash)
3390
3391 goto out_unlock;
3392
3393 INIT_LIST_HEAD(&free_list);
3394
3395 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3396 struct hlist_head *hhd = &ftrace_func_hash[i];
3397
3398 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3399
3400
3401 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3402 continue;
3403
3404 if ((flags & PROBE_TEST_DATA) && entry->data != data)
3405 continue;
3406
3407
3408 if (glob) {
3409 kallsyms_lookup(entry->ip, NULL, NULL,
3410 NULL, str);
3411 if (!ftrace_match(str, glob, len, type))
3412 continue;
3413 }
3414
3415 rec_entry = ftrace_lookup_ip(hash, entry->ip);
3416
3417 if (rec_entry)
3418 free_hash_entry(hash, rec_entry);
3419
3420 hlist_del_rcu(&entry->node);
3421 list_add(&entry->free_list, &free_list);
3422 }
3423 }
3424 mutex_lock(&ftrace_lock);
3425 __disable_ftrace_function_probe();
3426
3427
3428
3429
3430 ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3431 synchronize_sched();
3432 list_for_each_entry_safe(entry, p, &free_list, free_list) {
3433 list_del(&entry->free_list);
3434 ftrace_free_entry(entry);
3435 }
3436 mutex_unlock(&ftrace_lock);
3437
3438 out_unlock:
3439 mutex_unlock(&trace_probe_ops.regex_lock);
3440 free_ftrace_hash(hash);
3441}
3442
3443void
3444unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3445 void *data)
3446{
3447 __unregister_ftrace_function_probe(glob, ops, data,
3448 PROBE_TEST_FUNC | PROBE_TEST_DATA);
3449}
3450
3451void
3452unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3453{
3454 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3455}
3456
3457void unregister_ftrace_function_probe_all(char *glob)
3458{
3459 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3460}
3461
3462static LIST_HEAD(ftrace_commands);
3463static DEFINE_MUTEX(ftrace_cmd_mutex);
3464
3465
3466
3467
3468
3469__init int register_ftrace_command(struct ftrace_func_command *cmd)
3470{
3471 struct ftrace_func_command *p;
3472 int ret = 0;
3473
3474 mutex_lock(&ftrace_cmd_mutex);
3475 list_for_each_entry(p, &ftrace_commands, list) {
3476 if (strcmp(cmd->name, p->name) == 0) {
3477 ret = -EBUSY;
3478 goto out_unlock;
3479 }
3480 }
3481 list_add(&cmd->list, &ftrace_commands);
3482 out_unlock:
3483 mutex_unlock(&ftrace_cmd_mutex);
3484
3485 return ret;
3486}
3487
3488
3489
3490
3491
3492__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
3493{
3494 struct ftrace_func_command *p, *n;
3495 int ret = -ENODEV;
3496
3497 mutex_lock(&ftrace_cmd_mutex);
3498 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3499 if (strcmp(cmd->name, p->name) == 0) {
3500 ret = 0;
3501 list_del_init(&p->list);
3502 goto out_unlock;
3503 }
3504 }
3505 out_unlock:
3506 mutex_unlock(&ftrace_cmd_mutex);
3507
3508 return ret;
3509}
3510
3511static int ftrace_process_regex(struct ftrace_hash *hash,
3512 char *buff, int len, int enable)
3513{
3514 char *func, *command, *next = buff;
3515 struct ftrace_func_command *p;
3516 int ret = -EINVAL;
3517
3518 func = strsep(&next, ":");
3519
3520 if (!next) {
3521 ret = ftrace_match_records(hash, func, len);
3522 if (!ret)
3523 ret = -EINVAL;
3524 if (ret < 0)
3525 return ret;
3526 return 0;
3527 }
3528
3529
3530
3531 command = strsep(&next, ":");
3532
3533 mutex_lock(&ftrace_cmd_mutex);
3534 list_for_each_entry(p, &ftrace_commands, list) {
3535 if (strcmp(p->name, command) == 0) {
3536 ret = p->func(hash, func, command, next, enable);
3537 goto out_unlock;
3538 }
3539 }
3540 out_unlock:
3541 mutex_unlock(&ftrace_cmd_mutex);
3542
3543 return ret;
3544}
3545
3546static ssize_t
3547ftrace_regex_write(struct file *file, const char __user *ubuf,
3548 size_t cnt, loff_t *ppos, int enable)
3549{
3550 struct ftrace_iterator *iter;
3551 struct trace_parser *parser;
3552 ssize_t ret, read;
3553
3554 if (!cnt)
3555 return 0;
3556
3557 if (file->f_mode & FMODE_READ) {
3558 struct seq_file *m = file->private_data;
3559 iter = m->private;
3560 } else
3561 iter = file->private_data;
3562
3563 if (unlikely(ftrace_disabled))
3564 return -ENODEV;
3565
3566
3567
3568 parser = &iter->parser;
3569 read = trace_get_user(parser, ubuf, cnt, ppos);
3570
3571 if (read >= 0 && trace_parser_loaded(parser) &&
3572 !trace_parser_cont(parser)) {
3573 ret = ftrace_process_regex(iter->hash, parser->buffer,
3574 parser->idx, enable);
3575 trace_parser_clear(parser);
3576 if (ret < 0)
3577 goto out;
3578 }
3579
3580 ret = read;
3581 out:
3582 return ret;
3583}
3584
3585ssize_t
3586ftrace_filter_write(struct file *file, const char __user *ubuf,
3587 size_t cnt, loff_t *ppos)
3588{
3589 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3590}
3591
3592ssize_t
3593ftrace_notrace_write(struct file *file, const char __user *ubuf,
3594 size_t cnt, loff_t *ppos)
3595{
3596 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3597}
3598
3599static int
3600ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3601{
3602 struct ftrace_func_entry *entry;
3603
3604 if (!ftrace_location(ip))
3605 return -EINVAL;
3606
3607 if (remove) {
3608 entry = ftrace_lookup_ip(hash, ip);
3609 if (!entry)
3610 return -ENOENT;
3611 free_hash_entry(hash, entry);
3612 return 0;
3613 }
3614
3615 return add_hash_entry(hash, ip);
3616}
3617
3618static int
3619ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3620 unsigned long ip, int remove, int reset, int enable)
3621{
3622 struct ftrace_hash **orig_hash;
3623 struct ftrace_hash *hash;
3624 int ret;
3625
3626
3627 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3628 ops = &global_ops;
3629
3630 if (unlikely(ftrace_disabled))
3631 return -ENODEV;
3632
3633 mutex_lock(&ops->regex_lock);
3634
3635 if (enable)
3636 orig_hash = &ops->filter_hash;
3637 else
3638 orig_hash = &ops->notrace_hash;
3639
3640 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3641 if (!hash) {
3642 ret = -ENOMEM;
3643 goto out_regex_unlock;
3644 }
3645
3646 if (reset)
3647 ftrace_filter_reset(hash);
3648 if (buf && !ftrace_match_records(hash, buf, len)) {
3649 ret = -EINVAL;
3650 goto out_regex_unlock;
3651 }
3652 if (ip) {
3653 ret = ftrace_match_addr(hash, ip, remove);
3654 if (ret < 0)
3655 goto out_regex_unlock;
3656 }
3657
3658 mutex_lock(&ftrace_lock);
3659 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3660 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3661 && ftrace_enabled)
3662 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3663
3664 mutex_unlock(&ftrace_lock);
3665
3666 out_regex_unlock:
3667 mutex_unlock(&ops->regex_lock);
3668
3669 free_ftrace_hash(hash);
3670 return ret;
3671}
3672
3673static int
3674ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3675 int reset, int enable)
3676{
3677 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3678}
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3691 int remove, int reset)
3692{
3693 ftrace_ops_init(ops);
3694 return ftrace_set_addr(ops, ip, remove, reset, 1);
3695}
3696EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3697
3698static int
3699ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3700 int reset, int enable)
3701{
3702 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3703}
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3716 int len, int reset)
3717{
3718 ftrace_ops_init(ops);
3719 return ftrace_set_regex(ops, buf, len, reset, 1);
3720}
3721EXPORT_SYMBOL_GPL(ftrace_set_filter);
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3735 int len, int reset)
3736{
3737 ftrace_ops_init(ops);
3738 return ftrace_set_regex(ops, buf, len, reset, 0);
3739}
3740EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3752{
3753 ftrace_set_regex(&global_ops, buf, len, reset, 1);
3754}
3755EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3769{
3770 ftrace_set_regex(&global_ops, buf, len, reset, 0);
3771}
3772EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3773
3774
3775
3776
3777#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3778static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3779static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3780
3781static int __init set_ftrace_notrace(char *str)
3782{
3783 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3784 return 1;
3785}
3786__setup("ftrace_notrace=", set_ftrace_notrace);
3787
3788static int __init set_ftrace_filter(char *str)
3789{
3790 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3791 return 1;
3792}
3793__setup("ftrace_filter=", set_ftrace_filter);
3794
3795#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3796static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3797static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3798
3799static int __init set_graph_function(char *str)
3800{
3801 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3802 return 1;
3803}
3804__setup("ftrace_graph_filter=", set_graph_function);
3805
3806static int __init set_graph_max_depth_function(char *str)
3807{
3808 if (!str)
3809 return 0;
3810 fgraph_max_depth = simple_strtoul(str, NULL, 0);
3811 return 1;
3812}
3813__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
3814
3815static void __init set_ftrace_early_graph(char *buf)
3816{
3817 int ret;
3818 char *func;
3819
3820 while (buf) {
3821 func = strsep(&buf, ",");
3822
3823 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3824 func);
3825 if (ret)
3826 printk(KERN_DEBUG "ftrace: function %s not "
3827 "traceable\n", func);
3828 }
3829}
3830#endif
3831
3832void __init
3833ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3834{
3835 char *func;
3836
3837 ftrace_ops_init(ops);
3838
3839 while (buf) {
3840 func = strsep(&buf, ",");
3841 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3842 }
3843}
3844
3845static void __init set_ftrace_early_filters(void)
3846{
3847 if (ftrace_filter_buf[0])
3848 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3849 if (ftrace_notrace_buf[0])
3850 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3851#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3852 if (ftrace_graph_buf[0])
3853 set_ftrace_early_graph(ftrace_graph_buf);
3854#endif
3855}
3856
3857int ftrace_regex_release(struct inode *inode, struct file *file)
3858{
3859 struct seq_file *m = (struct seq_file *)file->private_data;
3860 struct ftrace_iterator *iter;
3861 struct ftrace_hash **orig_hash;
3862 struct trace_parser *parser;
3863 int filter_hash;
3864 int ret;
3865
3866 if (file->f_mode & FMODE_READ) {
3867 iter = m->private;
3868 seq_release(inode, file);
3869 } else
3870 iter = file->private_data;
3871
3872 parser = &iter->parser;
3873 if (trace_parser_loaded(parser)) {
3874 parser->buffer[parser->idx] = 0;
3875 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3876 }
3877
3878 trace_parser_put(parser);
3879
3880 mutex_lock(&iter->ops->regex_lock);
3881
3882 if (file->f_mode & FMODE_WRITE) {
3883 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3884
3885 if (filter_hash)
3886 orig_hash = &iter->ops->filter_hash;
3887 else
3888 orig_hash = &iter->ops->notrace_hash;
3889
3890 mutex_lock(&ftrace_lock);
3891 ret = ftrace_hash_move(iter->ops, filter_hash,
3892 orig_hash, iter->hash);
3893 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3894 && ftrace_enabled)
3895 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3896
3897 mutex_unlock(&ftrace_lock);
3898 }
3899
3900 mutex_unlock(&iter->ops->regex_lock);
3901 free_ftrace_hash(iter->hash);
3902 kfree(iter);
3903
3904 return 0;
3905}
3906
3907static const struct file_operations ftrace_avail_fops = {
3908 .open = ftrace_avail_open,
3909 .read = seq_read,
3910 .llseek = seq_lseek,
3911 .release = seq_release_private,
3912};
3913
3914static const struct file_operations ftrace_enabled_fops = {
3915 .open = ftrace_enabled_open,
3916 .read = seq_read,
3917 .llseek = seq_lseek,
3918 .release = seq_release_private,
3919};
3920
3921static const struct file_operations ftrace_filter_fops = {
3922 .open = ftrace_filter_open,
3923 .read = seq_read,
3924 .write = ftrace_filter_write,
3925 .llseek = ftrace_filter_lseek,
3926 .release = ftrace_regex_release,
3927};
3928
3929static const struct file_operations ftrace_notrace_fops = {
3930 .open = ftrace_notrace_open,
3931 .read = seq_read,
3932 .write = ftrace_notrace_write,
3933 .llseek = ftrace_filter_lseek,
3934 .release = ftrace_regex_release,
3935};
3936
3937#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3938
3939static DEFINE_MUTEX(graph_lock);
3940
3941int ftrace_graph_count;
3942int ftrace_graph_filter_enabled;
3943unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3944
3945static void *
3946__g_next(struct seq_file *m, loff_t *pos)
3947{
3948 if (*pos >= ftrace_graph_count)
3949 return NULL;
3950 return &ftrace_graph_funcs[*pos];
3951}
3952
3953static void *
3954g_next(struct seq_file *m, void *v, loff_t *pos)
3955{
3956 (*pos)++;
3957 return __g_next(m, pos);
3958}
3959
3960static void *g_start(struct seq_file *m, loff_t *pos)
3961{
3962 mutex_lock(&graph_lock);
3963
3964
3965 if (!ftrace_graph_filter_enabled && !*pos)
3966 return (void *)1;
3967
3968 return __g_next(m, pos);
3969}
3970
3971static void g_stop(struct seq_file *m, void *p)
3972{
3973 mutex_unlock(&graph_lock);
3974}
3975
3976static int g_show(struct seq_file *m, void *v)
3977{
3978 unsigned long *ptr = v;
3979
3980 if (!ptr)
3981 return 0;
3982
3983 if (ptr == (unsigned long *)1) {
3984 seq_printf(m, "#### all functions enabled ####\n");
3985 return 0;
3986 }
3987
3988 seq_printf(m, "%ps\n", (void *)*ptr);
3989
3990 return 0;
3991}
3992
3993static const struct seq_operations ftrace_graph_seq_ops = {
3994 .start = g_start,
3995 .next = g_next,
3996 .stop = g_stop,
3997 .show = g_show,
3998};
3999
4000static int
4001ftrace_graph_open(struct inode *inode, struct file *file)
4002{
4003 int ret = 0;
4004
4005 if (unlikely(ftrace_disabled))
4006 return -ENODEV;
4007
4008 mutex_lock(&graph_lock);
4009 if ((file->f_mode & FMODE_WRITE) &&
4010 (file->f_flags & O_TRUNC)) {
4011 ftrace_graph_filter_enabled = 0;
4012 ftrace_graph_count = 0;
4013 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
4014 }
4015 mutex_unlock(&graph_lock);
4016
4017 if (file->f_mode & FMODE_READ)
4018 ret = seq_open(file, &ftrace_graph_seq_ops);
4019
4020 return ret;
4021}
4022
4023static int
4024ftrace_graph_release(struct inode *inode, struct file *file)
4025{
4026 if (file->f_mode & FMODE_READ)
4027 seq_release(inode, file);
4028 return 0;
4029}
4030
4031static int
4032ftrace_set_func(unsigned long *array, int *idx, char *buffer)
4033{
4034 struct dyn_ftrace *rec;
4035 struct ftrace_page *pg;
4036 int search_len;
4037 int fail = 1;
4038 int type, not;
4039 char *search;
4040 bool exists;
4041 int i;
4042
4043
4044 type = filter_parse_regex(buffer, strlen(buffer), &search, ¬);
4045 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
4046 return -EBUSY;
4047
4048 search_len = strlen(search);
4049
4050 mutex_lock(&ftrace_lock);
4051
4052 if (unlikely(ftrace_disabled)) {
4053 mutex_unlock(&ftrace_lock);
4054 return -ENODEV;
4055 }
4056
4057 do_for_each_ftrace_rec(pg, rec) {
4058
4059 if (rec->flags & FTRACE_FL_DISABLED)
4060 continue;
4061
4062 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
4063
4064 exists = false;
4065 for (i = 0; i < *idx; i++) {
4066 if (array[i] == rec->ip) {
4067 exists = true;
4068 break;
4069 }
4070 }
4071
4072 if (!not) {
4073 fail = 0;
4074 if (!exists) {
4075 array[(*idx)++] = rec->ip;
4076 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
4077 goto out;
4078 }
4079 } else {
4080 if (exists) {
4081 array[i] = array[--(*idx)];
4082 array[*idx] = 0;
4083 fail = 0;
4084 }
4085 }
4086 }
4087 } while_for_each_ftrace_rec();
4088out:
4089 mutex_unlock(&ftrace_lock);
4090
4091 if (fail)
4092 return -EINVAL;
4093
4094 ftrace_graph_filter_enabled = !!(*idx);
4095
4096 return 0;
4097}
4098
4099static ssize_t
4100ftrace_graph_write(struct file *file, const char __user *ubuf,
4101 size_t cnt, loff_t *ppos)
4102{
4103 struct trace_parser parser;
4104 ssize_t read, ret;
4105
4106 if (!cnt)
4107 return 0;
4108
4109 mutex_lock(&graph_lock);
4110
4111 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
4112 ret = -ENOMEM;
4113 goto out_unlock;
4114 }
4115
4116 read = trace_get_user(&parser, ubuf, cnt, ppos);
4117
4118 if (read >= 0 && trace_parser_loaded((&parser))) {
4119 parser.buffer[parser.idx] = 0;
4120
4121
4122 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
4123 parser.buffer);
4124 if (ret)
4125 goto out_free;
4126 }
4127
4128 ret = read;
4129
4130out_free:
4131 trace_parser_put(&parser);
4132out_unlock:
4133 mutex_unlock(&graph_lock);
4134
4135 return ret;
4136}
4137
4138static const struct file_operations ftrace_graph_fops = {
4139 .open = ftrace_graph_open,
4140 .read = seq_read,
4141 .write = ftrace_graph_write,
4142 .llseek = ftrace_filter_lseek,
4143 .release = ftrace_graph_release,
4144};
4145#endif
4146
4147static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
4148{
4149
4150 trace_create_file("available_filter_functions", 0444,
4151 d_tracer, NULL, &ftrace_avail_fops);
4152
4153 trace_create_file("enabled_functions", 0444,
4154 d_tracer, NULL, &ftrace_enabled_fops);
4155
4156 trace_create_file("set_ftrace_filter", 0644, d_tracer,
4157 NULL, &ftrace_filter_fops);
4158
4159 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
4160 NULL, &ftrace_notrace_fops);
4161
4162#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4163 trace_create_file("set_graph_function", 0444, d_tracer,
4164 NULL,
4165 &ftrace_graph_fops);
4166#endif
4167
4168 return 0;
4169}
4170
4171static int ftrace_cmp_ips(const void *a, const void *b)
4172{
4173 const unsigned long *ipa = a;
4174 const unsigned long *ipb = b;
4175
4176 if (*ipa > *ipb)
4177 return 1;
4178 if (*ipa < *ipb)
4179 return -1;
4180 return 0;
4181}
4182
4183static void ftrace_swap_ips(void *a, void *b, int size)
4184{
4185 unsigned long *ipa = a;
4186 unsigned long *ipb = b;
4187 unsigned long t;
4188
4189 t = *ipa;
4190 *ipa = *ipb;
4191 *ipb = t;
4192}
4193
4194static int ftrace_process_locs(struct module *mod,
4195 unsigned long *start,
4196 unsigned long *end)
4197{
4198 struct ftrace_page *start_pg;
4199 struct ftrace_page *pg;
4200 struct dyn_ftrace *rec;
4201 unsigned long count;
4202 unsigned long *p;
4203 unsigned long addr;
4204 unsigned long flags = 0;
4205 int ret = -ENOMEM;
4206
4207 count = end - start;
4208
4209 if (!count)
4210 return 0;
4211
4212 sort(start, count, sizeof(*start),
4213 ftrace_cmp_ips, ftrace_swap_ips);
4214
4215 start_pg = ftrace_allocate_pages(count);
4216 if (!start_pg)
4217 return -ENOMEM;
4218
4219 mutex_lock(&ftrace_lock);
4220
4221
4222
4223
4224
4225
4226 if (!mod) {
4227 WARN_ON(ftrace_pages || ftrace_pages_start);
4228
4229 ftrace_pages = ftrace_pages_start = start_pg;
4230 } else {
4231 if (!ftrace_pages)
4232 goto out;
4233
4234 if (WARN_ON(ftrace_pages->next)) {
4235
4236 while (ftrace_pages->next)
4237 ftrace_pages = ftrace_pages->next;
4238 }
4239
4240 ftrace_pages->next = start_pg;
4241 }
4242
4243 p = start;
4244 pg = start_pg;
4245 while (p < end) {
4246 addr = ftrace_call_adjust(*p++);
4247
4248
4249
4250
4251
4252
4253 if (!addr)
4254 continue;
4255
4256 if (pg->index == pg->size) {
4257
4258 if (WARN_ON(!pg->next))
4259 break;
4260 pg = pg->next;
4261 }
4262
4263 rec = &pg->records[pg->index++];
4264 rec->ip = addr;
4265 }
4266
4267
4268 WARN_ON(pg->next);
4269
4270
4271 ftrace_pages = pg;
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281 if (!mod)
4282 local_irq_save(flags);
4283 ftrace_update_code(mod, start_pg);
4284 if (!mod)
4285 local_irq_restore(flags);
4286 ret = 0;
4287 out:
4288 mutex_unlock(&ftrace_lock);
4289
4290 return ret;
4291}
4292
4293#ifdef CONFIG_MODULES
4294
4295#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4296
4297static int referenced_filters(struct dyn_ftrace *rec)
4298{
4299 struct ftrace_ops *ops;
4300 int cnt = 0;
4301
4302 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
4303 if (ops_references_rec(ops, rec))
4304 cnt++;
4305 }
4306
4307 return cnt;
4308}
4309
4310void ftrace_release_mod(struct module *mod)
4311{
4312 struct dyn_ftrace *rec;
4313 struct ftrace_page **last_pg;
4314 struct ftrace_page *pg;
4315 int order;
4316
4317 mutex_lock(&ftrace_lock);
4318
4319 if (ftrace_disabled)
4320 goto out_unlock;
4321
4322
4323
4324
4325
4326 last_pg = &ftrace_pages_start;
4327 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4328 rec = &pg->records[0];
4329 if (within_module_core(rec->ip, mod)) {
4330
4331
4332
4333
4334 if (WARN_ON(pg == ftrace_pages_start))
4335 goto out_unlock;
4336
4337
4338 if (pg == ftrace_pages)
4339 ftrace_pages = next_to_ftrace_page(last_pg);
4340
4341 *last_pg = pg->next;
4342 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4343 free_pages((unsigned long)pg->records, order);
4344 kfree(pg);
4345 } else
4346 last_pg = &pg->next;
4347 }
4348 out_unlock:
4349 mutex_unlock(&ftrace_lock);
4350}
4351
4352void ftrace_module_enable(struct module *mod)
4353{
4354 struct dyn_ftrace *rec;
4355 struct ftrace_page *pg;
4356
4357 mutex_lock(&ftrace_lock);
4358
4359 if (ftrace_disabled)
4360 goto out_unlock;
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375 if (ftrace_start_up)
4376 ftrace_arch_code_modify_prepare();
4377
4378 do_for_each_ftrace_rec(pg, rec) {
4379 int cnt;
4380
4381
4382
4383
4384
4385
4386 if (!within_module_core(rec->ip, mod))
4387 break;
4388
4389 cnt = 0;
4390
4391
4392
4393
4394
4395
4396
4397 if (ftrace_start_up)
4398 cnt += referenced_filters(rec);
4399
4400
4401 rec->flags = cnt;
4402
4403 if (ftrace_start_up && cnt) {
4404 int failed = __ftrace_replace_code(rec, 1);
4405 if (failed) {
4406 ftrace_bug(failed, rec->ip);
4407 goto out_loop;
4408 }
4409 }
4410
4411 } while_for_each_ftrace_rec();
4412
4413 out_loop:
4414 if (ftrace_start_up)
4415 ftrace_arch_code_modify_post_process();
4416
4417 out_unlock:
4418 mutex_unlock(&ftrace_lock);
4419}
4420
4421static void ftrace_init_module(struct module *mod,
4422 unsigned long *start, unsigned long *end)
4423{
4424 if (ftrace_disabled || start == end)
4425 return;
4426 ftrace_process_locs(mod, start, end);
4427}
4428
4429void ftrace_module_init(struct module *mod)
4430{
4431#ifdef CONFIG_S390
4432 struct module_ext *mod_ext;
4433
4434 mutex_lock(&module_ext_mutex);
4435 mod_ext = find_module_ext(mod);
4436 mutex_unlock(&module_ext_mutex);
4437
4438 ftrace_init_module(mod, mod_ext->ftrace_callsites,
4439 mod_ext->ftrace_callsites +
4440 mod_ext->num_ftrace_callsites);
4441#else
4442 ftrace_init_module(mod, mod->ftrace_callsites,
4443 mod->ftrace_callsites +
4444 mod->num_ftrace_callsites);
4445#endif
4446}
4447#endif
4448
4449void __init ftrace_init(void)
4450{
4451 extern unsigned long __start_mcount_loc[];
4452 extern unsigned long __stop_mcount_loc[];
4453 unsigned long count, addr, flags;
4454 int ret;
4455
4456
4457 addr = (unsigned long)ftrace_stub;
4458
4459 local_irq_save(flags);
4460 ftrace_dyn_arch_init(&addr);
4461 local_irq_restore(flags);
4462
4463
4464 if (addr)
4465 goto failed;
4466
4467 count = __stop_mcount_loc - __start_mcount_loc;
4468
4469 ret = ftrace_dyn_table_alloc(count);
4470 if (ret)
4471 goto failed;
4472
4473 last_ftrace_enabled = ftrace_enabled = 1;
4474
4475 ret = ftrace_process_locs(NULL,
4476 __start_mcount_loc,
4477 __stop_mcount_loc);
4478
4479 set_ftrace_early_filters();
4480
4481 return;
4482 failed:
4483 ftrace_disabled = 1;
4484}
4485
4486#else
4487
4488static struct ftrace_ops global_ops = {
4489 .func = ftrace_stub,
4490 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4491 INIT_REGEX_LOCK(global_ops)
4492};
4493
4494static int __init ftrace_nodyn_init(void)
4495{
4496 ftrace_enabled = 1;
4497 return 0;
4498}
4499core_initcall(ftrace_nodyn_init);
4500
4501static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4502static inline void ftrace_startup_enable(int command) { }
4503
4504# define ftrace_startup(ops, command) \
4505 ({ \
4506 int ___ret = __register_ftrace_function(ops); \
4507 if (!___ret) \
4508 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4509 ___ret; \
4510 })
4511# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
4512
4513# define ftrace_startup_sysctl() do { } while (0)
4514# define ftrace_shutdown_sysctl() do { } while (0)
4515
4516static inline int
4517ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4518{
4519 return 1;
4520}
4521
4522#endif
4523
4524static void
4525ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4526 struct ftrace_ops *op, struct pt_regs *regs)
4527{
4528 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4529 return;
4530
4531
4532
4533
4534
4535 preempt_disable_notrace();
4536 trace_recursion_set(TRACE_CONTROL_BIT);
4537
4538
4539
4540
4541
4542 if (!rcu_is_watching())
4543 goto out;
4544
4545 do_for_each_ftrace_op(op, ftrace_control_list) {
4546 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4547 !ftrace_function_local_disabled(op) &&
4548 ftrace_ops_test(op, ip, regs))
4549 op->func(ip, parent_ip, op, regs);
4550 } while_for_each_ftrace_op(op);
4551 out:
4552 trace_recursion_clear(TRACE_CONTROL_BIT);
4553 preempt_enable_notrace();
4554}
4555
4556static struct ftrace_ops control_ops = {
4557 .func = ftrace_ops_control_func,
4558 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4559 INIT_REGEX_LOCK(control_ops)
4560};
4561
4562static inline void
4563__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4564 struct ftrace_ops *ignored, struct pt_regs *regs)
4565{
4566 struct ftrace_ops *op;
4567 int bit;
4568
4569 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4570 if (bit < 0)
4571 return;
4572
4573
4574
4575
4576
4577 preempt_disable_notrace();
4578 do_for_each_ftrace_op(op, ftrace_ops_list) {
4579 if (ftrace_ops_test(op, ip, regs))
4580 op->func(ip, parent_ip, op, regs);
4581 } while_for_each_ftrace_op(op);
4582 preempt_enable_notrace();
4583 trace_clear_recursion(bit);
4584}
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599#if ARCH_SUPPORTS_FTRACE_OPS
4600static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4601 struct ftrace_ops *op, struct pt_regs *regs)
4602{
4603 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4604}
4605#else
4606static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4607{
4608 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4609}
4610#endif
4611
4612static void clear_ftrace_swapper(void)
4613{
4614 struct task_struct *p;
4615 int cpu;
4616
4617 get_online_cpus();
4618 for_each_online_cpu(cpu) {
4619 p = idle_task(cpu);
4620 clear_tsk_trace_trace(p);
4621 }
4622 put_online_cpus();
4623}
4624
4625static void set_ftrace_swapper(void)
4626{
4627 struct task_struct *p;
4628 int cpu;
4629
4630 get_online_cpus();
4631 for_each_online_cpu(cpu) {
4632 p = idle_task(cpu);
4633 set_tsk_trace_trace(p);
4634 }
4635 put_online_cpus();
4636}
4637
4638static void clear_ftrace_pid(struct pid *pid)
4639{
4640 struct task_struct *p;
4641
4642 rcu_read_lock();
4643 do_each_pid_task(pid, PIDTYPE_PID, p) {
4644 clear_tsk_trace_trace(p);
4645 } while_each_pid_task(pid, PIDTYPE_PID, p);
4646 rcu_read_unlock();
4647
4648 put_pid(pid);
4649}
4650
4651static void set_ftrace_pid(struct pid *pid)
4652{
4653 struct task_struct *p;
4654
4655 rcu_read_lock();
4656 do_each_pid_task(pid, PIDTYPE_PID, p) {
4657 set_tsk_trace_trace(p);
4658 } while_each_pid_task(pid, PIDTYPE_PID, p);
4659 rcu_read_unlock();
4660}
4661
4662static void clear_ftrace_pid_task(struct pid *pid)
4663{
4664 if (pid == ftrace_swapper_pid)
4665 clear_ftrace_swapper();
4666 else
4667 clear_ftrace_pid(pid);
4668}
4669
4670static void set_ftrace_pid_task(struct pid *pid)
4671{
4672 if (pid == ftrace_swapper_pid)
4673 set_ftrace_swapper();
4674 else
4675 set_ftrace_pid(pid);
4676}
4677
4678static int ftrace_pid_add(int p)
4679{
4680 struct pid *pid;
4681 struct ftrace_pid *fpid;
4682 int ret = -EINVAL;
4683
4684 mutex_lock(&ftrace_lock);
4685
4686 if (!p)
4687 pid = ftrace_swapper_pid;
4688 else
4689 pid = find_get_pid(p);
4690
4691 if (!pid)
4692 goto out;
4693
4694 ret = 0;
4695
4696 list_for_each_entry(fpid, &ftrace_pids, list)
4697 if (fpid->pid == pid)
4698 goto out_put;
4699
4700 ret = -ENOMEM;
4701
4702 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4703 if (!fpid)
4704 goto out_put;
4705
4706 list_add(&fpid->list, &ftrace_pids);
4707 fpid->pid = pid;
4708
4709 set_ftrace_pid_task(pid);
4710
4711 ftrace_update_pid_func();
4712 ftrace_startup_enable(0);
4713
4714 mutex_unlock(&ftrace_lock);
4715 return 0;
4716
4717out_put:
4718 if (pid != ftrace_swapper_pid)
4719 put_pid(pid);
4720
4721out:
4722 mutex_unlock(&ftrace_lock);
4723 return ret;
4724}
4725
4726static void ftrace_pid_reset(void)
4727{
4728 struct ftrace_pid *fpid, *safe;
4729
4730 mutex_lock(&ftrace_lock);
4731 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4732 struct pid *pid = fpid->pid;
4733
4734 clear_ftrace_pid_task(pid);
4735
4736 list_del(&fpid->list);
4737 kfree(fpid);
4738 }
4739
4740 ftrace_update_pid_func();
4741 ftrace_startup_enable(0);
4742
4743 mutex_unlock(&ftrace_lock);
4744}
4745
4746static void *fpid_start(struct seq_file *m, loff_t *pos)
4747{
4748 mutex_lock(&ftrace_lock);
4749
4750 if (list_empty(&ftrace_pids) && (!*pos))
4751 return (void *) 1;
4752
4753 return seq_list_start(&ftrace_pids, *pos);
4754}
4755
4756static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4757{
4758 if (v == (void *)1)
4759 return NULL;
4760
4761 return seq_list_next(v, &ftrace_pids, pos);
4762}
4763
4764static void fpid_stop(struct seq_file *m, void *p)
4765{
4766 mutex_unlock(&ftrace_lock);
4767}
4768
4769static int fpid_show(struct seq_file *m, void *v)
4770{
4771 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4772
4773 if (v == (void *)1) {
4774 seq_printf(m, "no pid\n");
4775 return 0;
4776 }
4777
4778 if (fpid->pid == ftrace_swapper_pid)
4779 seq_printf(m, "swapper tasks\n");
4780 else
4781 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4782
4783 return 0;
4784}
4785
4786static const struct seq_operations ftrace_pid_sops = {
4787 .start = fpid_start,
4788 .next = fpid_next,
4789 .stop = fpid_stop,
4790 .show = fpid_show,
4791};
4792
4793static int
4794ftrace_pid_open(struct inode *inode, struct file *file)
4795{
4796 int ret = 0;
4797
4798 if ((file->f_mode & FMODE_WRITE) &&
4799 (file->f_flags & O_TRUNC))
4800 ftrace_pid_reset();
4801
4802 if (file->f_mode & FMODE_READ)
4803 ret = seq_open(file, &ftrace_pid_sops);
4804
4805 return ret;
4806}
4807
4808static ssize_t
4809ftrace_pid_write(struct file *filp, const char __user *ubuf,
4810 size_t cnt, loff_t *ppos)
4811{
4812 char buf[64], *tmp;
4813 long val;
4814 int ret;
4815
4816 if (cnt >= sizeof(buf))
4817 return -EINVAL;
4818
4819 if (copy_from_user(&buf, ubuf, cnt))
4820 return -EFAULT;
4821
4822 buf[cnt] = 0;
4823
4824
4825
4826
4827
4828 tmp = strstrip(buf);
4829 if (strlen(tmp) == 0)
4830 return 1;
4831
4832 ret = kstrtol(tmp, 10, &val);
4833 if (ret < 0)
4834 return ret;
4835
4836 ret = ftrace_pid_add(val);
4837
4838 return ret ? ret : cnt;
4839}
4840
4841static int
4842ftrace_pid_release(struct inode *inode, struct file *file)
4843{
4844 if (file->f_mode & FMODE_READ)
4845 seq_release(inode, file);
4846
4847 return 0;
4848}
4849
4850static const struct file_operations ftrace_pid_fops = {
4851 .open = ftrace_pid_open,
4852 .write = ftrace_pid_write,
4853 .read = seq_read,
4854 .llseek = ftrace_filter_lseek,
4855 .release = ftrace_pid_release,
4856};
4857
4858static __init int ftrace_init_debugfs(void)
4859{
4860 struct dentry *d_tracer;
4861
4862 d_tracer = tracing_init_dentry();
4863 if (!d_tracer)
4864 return 0;
4865
4866 ftrace_init_dyn_debugfs(d_tracer);
4867
4868 trace_create_file("set_ftrace_pid", 0644, d_tracer,
4869 NULL, &ftrace_pid_fops);
4870
4871 ftrace_profile_debugfs(d_tracer);
4872
4873 return 0;
4874}
4875fs_initcall(ftrace_init_debugfs);
4876
4877
4878
4879
4880
4881
4882
4883
4884void ftrace_kill(void)
4885{
4886 ftrace_disabled = 1;
4887 ftrace_enabled = 0;
4888 clear_ftrace_function();
4889}
4890
4891
4892
4893
4894int ftrace_is_dead(void)
4895{
4896 return ftrace_disabled;
4897}
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910int register_ftrace_function(struct ftrace_ops *ops)
4911{
4912 int ret = -1;
4913
4914 ftrace_ops_init(ops);
4915
4916 mutex_lock(&ftrace_lock);
4917
4918 ret = ftrace_startup(ops, 0);
4919
4920 mutex_unlock(&ftrace_lock);
4921
4922 return ret;
4923}
4924EXPORT_SYMBOL_GPL(register_ftrace_function);
4925
4926
4927
4928
4929
4930
4931
4932int unregister_ftrace_function(struct ftrace_ops *ops)
4933{
4934 int ret;
4935
4936 mutex_lock(&ftrace_lock);
4937 ret = ftrace_shutdown(ops, 0);
4938 mutex_unlock(&ftrace_lock);
4939
4940 return ret;
4941}
4942EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4943
4944int
4945ftrace_enable_sysctl(struct ctl_table *table, int write,
4946 void __user *buffer, size_t *lenp,
4947 loff_t *ppos)
4948{
4949 int ret = -ENODEV;
4950
4951 mutex_lock(&ftrace_lock);
4952
4953 if (unlikely(ftrace_disabled))
4954 goto out;
4955
4956 ret = proc_dointvec(table, write, buffer, lenp, ppos);
4957
4958 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4959 goto out;
4960
4961 last_ftrace_enabled = !!ftrace_enabled;
4962
4963 if (ftrace_enabled) {
4964
4965 ftrace_startup_sysctl();
4966
4967
4968 if (ftrace_ops_list != &ftrace_list_end)
4969 update_ftrace_function();
4970
4971 } else {
4972
4973 ftrace_trace_function = ftrace_stub;
4974
4975 ftrace_shutdown_sysctl();
4976 }
4977
4978 out:
4979 mutex_unlock(&ftrace_lock);
4980 return ret;
4981}
4982
4983#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4984
4985static int ftrace_graph_active;
4986static struct notifier_block ftrace_suspend_notifier;
4987
4988int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4989{
4990 return 0;
4991}
4992
4993
4994trace_func_graph_ret_t ftrace_graph_return =
4995 (trace_func_graph_ret_t)ftrace_stub;
4996trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4997static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
4998
4999
5000static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5001{
5002 int i;
5003 int ret = 0;
5004 unsigned long flags;
5005 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
5006 struct task_struct *g, *t;
5007
5008 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
5009 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
5010 * sizeof(struct ftrace_ret_stack),
5011 GFP_KERNEL);
5012 if (!ret_stack_list[i]) {
5013 start = 0;
5014 end = i;
5015 ret = -ENOMEM;
5016 goto free;
5017 }
5018 }
5019
5020 qread_lock_irqsave(&tasklist_lock, flags);
5021 do_each_thread(g, t) {
5022 if (start == end) {
5023 ret = -EAGAIN;
5024 goto unlock;
5025 }
5026
5027 if (t->ret_stack == NULL) {
5028 atomic_set(&t->tracing_graph_pause, 0);
5029 atomic_set(&t->trace_overrun, 0);
5030 t->curr_ret_stack = -1;
5031
5032 smp_wmb();
5033 t->ret_stack = ret_stack_list[start++];
5034 }
5035 } while_each_thread(g, t);
5036
5037unlock:
5038 qread_unlock_irqrestore(&tasklist_lock, flags);
5039free:
5040 for (i = start; i < end; i++)
5041 kfree(ret_stack_list[i]);
5042 return ret;
5043}
5044
5045static void
5046ftrace_graph_probe_sched_switch(void *ignore,
5047 struct task_struct *prev, struct task_struct *next)
5048{
5049 unsigned long long timestamp;
5050 int index;
5051
5052
5053
5054
5055
5056 if (trace_flags & TRACE_ITER_SLEEP_TIME)
5057 return;
5058
5059 timestamp = trace_clock_local();
5060
5061 prev->ftrace_timestamp = timestamp;
5062
5063
5064 if (!next->ftrace_timestamp)
5065 return;
5066
5067
5068
5069
5070
5071 timestamp -= next->ftrace_timestamp;
5072
5073 for (index = next->curr_ret_stack; index >= 0; index--)
5074 next->ret_stack[index].calltime += timestamp;
5075}
5076
5077
5078static int start_graph_tracing(void)
5079{
5080 struct ftrace_ret_stack **ret_stack_list;
5081 int ret, cpu;
5082
5083 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5084 sizeof(struct ftrace_ret_stack *),
5085 GFP_KERNEL);
5086
5087 if (!ret_stack_list)
5088 return -ENOMEM;
5089
5090
5091 for_each_online_cpu(cpu) {
5092 if (!idle_task(cpu)->ret_stack)
5093 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
5094 }
5095
5096 do {
5097 ret = alloc_retstack_tasklist(ret_stack_list);
5098 } while (ret == -EAGAIN);
5099
5100 if (!ret) {
5101 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5102 if (ret)
5103 pr_info("ftrace_graph: Couldn't activate tracepoint"
5104 " probe to kernel_sched_switch\n");
5105 }
5106
5107 kfree(ret_stack_list);
5108 return ret;
5109}
5110
5111
5112
5113
5114
5115
5116static int
5117ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5118 void *unused)
5119{
5120 switch (state) {
5121 case PM_HIBERNATION_PREPARE:
5122 pause_graph_tracing();
5123 break;
5124
5125 case PM_POST_HIBERNATION:
5126 unpause_graph_tracing();
5127 break;
5128 }
5129 return NOTIFY_DONE;
5130}
5131
5132
5133static struct ftrace_ops fgraph_ops __read_mostly = {
5134 .func = ftrace_stub,
5135 .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
5136 FTRACE_OPS_FL_RECURSION_SAFE,
5137};
5138
5139static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5140{
5141 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5142 return 0;
5143 return __ftrace_graph_entry(trace);
5144}
5145
5146
5147
5148
5149
5150
5151
5152
5153static void update_function_graph_func(void)
5154{
5155 if (ftrace_ops_list == &ftrace_list_end ||
5156 (ftrace_ops_list == &global_ops &&
5157 global_ops.next == &ftrace_list_end))
5158 ftrace_graph_entry = __ftrace_graph_entry;
5159 else
5160 ftrace_graph_entry = ftrace_graph_entry_test;
5161}
5162
5163int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5164 trace_func_graph_ent_t entryfunc)
5165{
5166 int ret = 0;
5167
5168 mutex_lock(&ftrace_lock);
5169
5170
5171 if (ftrace_graph_active) {
5172 ret = -EBUSY;
5173 goto out;
5174 }
5175
5176 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
5177 register_pm_notifier(&ftrace_suspend_notifier);
5178
5179 ftrace_graph_active++;
5180 ret = start_graph_tracing();
5181 if (ret) {
5182 ftrace_graph_active--;
5183 goto out;
5184 }
5185
5186 ftrace_graph_return = retfunc;
5187
5188
5189
5190
5191
5192
5193
5194 __ftrace_graph_entry = entryfunc;
5195 ftrace_graph_entry = ftrace_graph_entry_test;
5196 update_function_graph_func();
5197
5198 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
5199
5200out:
5201 mutex_unlock(&ftrace_lock);
5202 return ret;
5203}
5204
5205void unregister_ftrace_graph(void)
5206{
5207 mutex_lock(&ftrace_lock);
5208
5209 if (unlikely(!ftrace_graph_active))
5210 goto out;
5211
5212 ftrace_graph_active--;
5213 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5214 ftrace_graph_entry = ftrace_graph_entry_stub;
5215 __ftrace_graph_entry = ftrace_graph_entry_stub;
5216 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
5217 unregister_pm_notifier(&ftrace_suspend_notifier);
5218 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5219
5220 out:
5221 mutex_unlock(&ftrace_lock);
5222}
5223
5224static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5225
5226static void
5227graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5228{
5229 atomic_set(&t->tracing_graph_pause, 0);
5230 atomic_set(&t->trace_overrun, 0);
5231 t->ftrace_timestamp = 0;
5232
5233 smp_wmb();
5234 t->ret_stack = ret_stack;
5235}
5236
5237
5238
5239
5240
5241void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5242{
5243 t->curr_ret_stack = -1;
5244
5245
5246
5247
5248 if (t->ret_stack)
5249 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5250
5251 if (ftrace_graph_active) {
5252 struct ftrace_ret_stack *ret_stack;
5253
5254 ret_stack = per_cpu(idle_ret_stack, cpu);
5255 if (!ret_stack) {
5256 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5257 * sizeof(struct ftrace_ret_stack),
5258 GFP_KERNEL);
5259 if (!ret_stack)
5260 return;
5261 per_cpu(idle_ret_stack, cpu) = ret_stack;
5262 }
5263 graph_init_task(t, ret_stack);
5264 }
5265}
5266
5267
5268void ftrace_graph_init_task(struct task_struct *t)
5269{
5270
5271 t->ret_stack = NULL;
5272 t->curr_ret_stack = -1;
5273
5274 if (ftrace_graph_active) {
5275 struct ftrace_ret_stack *ret_stack;
5276
5277 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5278 * sizeof(struct ftrace_ret_stack),
5279 GFP_KERNEL);
5280 if (!ret_stack)
5281 return;
5282 graph_init_task(t, ret_stack);
5283 }
5284}
5285
5286void ftrace_graph_exit_task(struct task_struct *t)
5287{
5288 struct ftrace_ret_stack *ret_stack = t->ret_stack;
5289
5290 t->ret_stack = NULL;
5291
5292 barrier();
5293
5294 kfree(ret_stack);
5295}
5296#endif
5297