1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/ring_buffer.h>
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
17#include <linux/slab.h>
18#include <linux/fs.h>
19
20#include "trace.h"
21
22static void tracing_start_function_trace(struct trace_array *tr);
23static void tracing_stop_function_trace(struct trace_array *tr);
24static void
25function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct ftrace_regs *fregs);
27static void
28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct ftrace_regs *fregs);
30static void
31function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32 struct ftrace_ops *op, struct ftrace_regs *fregs);
33static void
34function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 struct ftrace_ops *op,
36 struct ftrace_regs *fregs);
37static struct tracer_flags func_flags;
38
39
40enum {
41
42 TRACE_FUNC_NO_OPTS = 0x0,
43 TRACE_FUNC_OPT_STACK = 0x1,
44 TRACE_FUNC_OPT_NO_REPEATS = 0x2,
45
46
47 TRACE_FUNC_OPT_HIGHEST_BIT = 0x4
48};
49
50#define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
51
52int ftrace_allocate_ftrace_ops(struct trace_array *tr)
53{
54 struct ftrace_ops *ops;
55
56
57 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
58 return 0;
59
60 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
61 if (!ops)
62 return -ENOMEM;
63
64
65 ops->func = function_trace_call;
66 ops->flags = FTRACE_OPS_FL_PID;
67
68 tr->ops = ops;
69 ops->private = tr;
70
71 return 0;
72}
73
74void ftrace_free_ftrace_ops(struct trace_array *tr)
75{
76 kfree(tr->ops);
77 tr->ops = NULL;
78}
79
80int ftrace_create_function_files(struct trace_array *tr,
81 struct dentry *parent)
82{
83
84
85
86
87 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
88 return 0;
89
90 if (!tr->ops)
91 return -EINVAL;
92
93 ftrace_create_filter_files(tr->ops, parent);
94
95 return 0;
96}
97
98void ftrace_destroy_function_files(struct trace_array *tr)
99{
100 ftrace_destroy_filter_files(tr->ops);
101 ftrace_free_ftrace_ops(tr);
102}
103
104static ftrace_func_t select_trace_function(u32 flags_val)
105{
106 switch (flags_val & TRACE_FUNC_OPT_MASK) {
107 case TRACE_FUNC_NO_OPTS:
108 return function_trace_call;
109 case TRACE_FUNC_OPT_STACK:
110 return function_stack_trace_call;
111 case TRACE_FUNC_OPT_NO_REPEATS:
112 return function_no_repeats_trace_call;
113 case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
114 return function_stack_no_repeats_trace_call;
115 default:
116 return NULL;
117 }
118}
119
120static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
121{
122 if (!tr->last_func_repeats &&
123 (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
124 tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
125 if (!tr->last_func_repeats)
126 return false;
127 }
128
129 return true;
130}
131
132static int function_trace_init(struct trace_array *tr)
133{
134 ftrace_func_t func;
135
136
137
138
139
140 if (!tr->ops)
141 return -ENOMEM;
142
143 func = select_trace_function(func_flags.val);
144 if (!func)
145 return -EINVAL;
146
147 if (!handle_func_repeats(tr, func_flags.val))
148 return -ENOMEM;
149
150 ftrace_init_array_ops(tr, func);
151
152 tr->array_buffer.cpu = raw_smp_processor_id();
153
154 tracing_start_cmdline_record();
155 tracing_start_function_trace(tr);
156 return 0;
157}
158
159static void function_trace_reset(struct trace_array *tr)
160{
161 tracing_stop_function_trace(tr);
162 tracing_stop_cmdline_record();
163 ftrace_reset_array_ops(tr);
164}
165
166static void function_trace_start(struct trace_array *tr)
167{
168 tracing_reset_online_cpus(&tr->array_buffer);
169}
170
171static void
172function_trace_call(unsigned long ip, unsigned long parent_ip,
173 struct ftrace_ops *op, struct ftrace_regs *fregs)
174{
175 struct trace_array *tr = op->private;
176 struct trace_array_cpu *data;
177 unsigned int trace_ctx;
178 int bit;
179 int cpu;
180
181 if (unlikely(!tr->function_enabled))
182 return;
183
184 bit = ftrace_test_recursion_trylock(ip, parent_ip);
185 if (bit < 0)
186 return;
187
188 trace_ctx = tracing_gen_ctx();
189 preempt_disable_notrace();
190
191 cpu = smp_processor_id();
192 data = per_cpu_ptr(tr->array_buffer.data, cpu);
193 if (!atomic_read(&data->disabled))
194 trace_function(tr, ip, parent_ip, trace_ctx);
195
196 ftrace_test_recursion_unlock(bit);
197 preempt_enable_notrace();
198}
199
200#ifdef CONFIG_UNWINDER_ORC
201
202
203
204
205
206
207#define STACK_SKIP 2
208#else
209
210
211
212
213
214
215#define STACK_SKIP 3
216#endif
217
218static void
219function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
220 struct ftrace_ops *op, struct ftrace_regs *fregs)
221{
222 struct trace_array *tr = op->private;
223 struct trace_array_cpu *data;
224 unsigned long flags;
225 long disabled;
226 int cpu;
227 unsigned int trace_ctx;
228
229 if (unlikely(!tr->function_enabled))
230 return;
231
232
233
234
235
236 local_irq_save(flags);
237 cpu = raw_smp_processor_id();
238 data = per_cpu_ptr(tr->array_buffer.data, cpu);
239 disabled = atomic_inc_return(&data->disabled);
240
241 if (likely(disabled == 1)) {
242 trace_ctx = tracing_gen_ctx_flags(flags);
243 trace_function(tr, ip, parent_ip, trace_ctx);
244 __trace_stack(tr, trace_ctx, STACK_SKIP);
245 }
246
247 atomic_dec(&data->disabled);
248 local_irq_restore(flags);
249}
250
251static inline bool is_repeat_check(struct trace_array *tr,
252 struct trace_func_repeats *last_info,
253 unsigned long ip, unsigned long parent_ip)
254{
255 if (last_info->ip == ip &&
256 last_info->parent_ip == parent_ip &&
257 last_info->count < U16_MAX) {
258 last_info->ts_last_call =
259 ring_buffer_time_stamp(tr->array_buffer.buffer);
260 last_info->count++;
261 return true;
262 }
263
264 return false;
265}
266
267static inline void process_repeats(struct trace_array *tr,
268 unsigned long ip, unsigned long parent_ip,
269 struct trace_func_repeats *last_info,
270 unsigned int trace_ctx)
271{
272 if (last_info->count) {
273 trace_last_func_repeats(tr, last_info, trace_ctx);
274 last_info->count = 0;
275 }
276
277 last_info->ip = ip;
278 last_info->parent_ip = parent_ip;
279}
280
281static void
282function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
283 struct ftrace_ops *op,
284 struct ftrace_regs *fregs)
285{
286 struct trace_func_repeats *last_info;
287 struct trace_array *tr = op->private;
288 struct trace_array_cpu *data;
289 unsigned int trace_ctx;
290 unsigned long flags;
291 int bit;
292 int cpu;
293
294 if (unlikely(!tr->function_enabled))
295 return;
296
297 bit = ftrace_test_recursion_trylock(ip, parent_ip);
298 if (bit < 0)
299 return;
300
301 preempt_disable_notrace();
302
303 cpu = smp_processor_id();
304 data = per_cpu_ptr(tr->array_buffer.data, cpu);
305 if (atomic_read(&data->disabled))
306 goto out;
307
308
309
310
311
312
313
314
315 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
316 if (is_repeat_check(tr, last_info, ip, parent_ip))
317 goto out;
318
319 local_save_flags(flags);
320 trace_ctx = tracing_gen_ctx_flags(flags);
321 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
322
323 trace_function(tr, ip, parent_ip, trace_ctx);
324
325out:
326 ftrace_test_recursion_unlock(bit);
327 preempt_enable_notrace();
328}
329
330static void
331function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
332 struct ftrace_ops *op,
333 struct ftrace_regs *fregs)
334{
335 struct trace_func_repeats *last_info;
336 struct trace_array *tr = op->private;
337 struct trace_array_cpu *data;
338 unsigned long flags;
339 long disabled;
340 int cpu;
341 unsigned int trace_ctx;
342
343 if (unlikely(!tr->function_enabled))
344 return;
345
346
347
348
349
350 local_irq_save(flags);
351 cpu = raw_smp_processor_id();
352 data = per_cpu_ptr(tr->array_buffer.data, cpu);
353 disabled = atomic_inc_return(&data->disabled);
354
355 if (likely(disabled == 1)) {
356 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
357 if (is_repeat_check(tr, last_info, ip, parent_ip))
358 goto out;
359
360 trace_ctx = tracing_gen_ctx_flags(flags);
361 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
362
363 trace_function(tr, ip, parent_ip, trace_ctx);
364 __trace_stack(tr, trace_ctx, STACK_SKIP);
365 }
366
367 out:
368 atomic_dec(&data->disabled);
369 local_irq_restore(flags);
370}
371
372static struct tracer_opt func_opts[] = {
373#ifdef CONFIG_STACKTRACE
374 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
375#endif
376 { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
377 { }
378};
379
380static struct tracer_flags func_flags = {
381 .val = TRACE_FUNC_NO_OPTS,
382 .opts = func_opts
383};
384
385static void tracing_start_function_trace(struct trace_array *tr)
386{
387 tr->function_enabled = 0;
388 register_ftrace_function(tr->ops);
389 tr->function_enabled = 1;
390}
391
392static void tracing_stop_function_trace(struct trace_array *tr)
393{
394 tr->function_enabled = 0;
395 unregister_ftrace_function(tr->ops);
396}
397
398static struct tracer function_trace;
399
400static int
401func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
402{
403 ftrace_func_t func;
404 u32 new_flags;
405
406
407 if (!!set == !!(func_flags.val & bit))
408 return 0;
409
410
411 if (tr->current_trace != &function_trace)
412 return 0;
413
414 new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
415 func = select_trace_function(new_flags);
416 if (!func)
417 return -EINVAL;
418
419
420 if (tr->ops->func == func)
421 return 0;
422
423 if (!handle_func_repeats(tr, new_flags))
424 return -ENOMEM;
425
426 unregister_ftrace_function(tr->ops);
427 tr->ops->func = func;
428 register_ftrace_function(tr->ops);
429
430 return 0;
431}
432
433static struct tracer function_trace __tracer_data =
434{
435 .name = "function",
436 .init = function_trace_init,
437 .reset = function_trace_reset,
438 .start = function_trace_start,
439 .flags = &func_flags,
440 .set_flag = func_set_flag,
441 .allow_instances = true,
442#ifdef CONFIG_FTRACE_SELFTEST
443 .selftest = trace_selftest_startup_function,
444#endif
445};
446
447#ifdef CONFIG_DYNAMIC_FTRACE
448static void update_traceon_count(struct ftrace_probe_ops *ops,
449 unsigned long ip,
450 struct trace_array *tr, bool on,
451 void *data)
452{
453 struct ftrace_func_mapper *mapper = data;
454 long *count;
455 long old_count;
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
487 old_count = *count;
488
489 if (old_count <= 0)
490 return;
491
492
493 smp_rmb();
494
495 if (on == !!tracer_tracing_is_on(tr))
496 return;
497
498 if (on)
499 tracer_tracing_on(tr);
500 else
501 tracer_tracing_off(tr);
502
503
504 smp_wmb();
505
506 *count = old_count - 1;
507}
508
509static void
510ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
511 struct trace_array *tr, struct ftrace_probe_ops *ops,
512 void *data)
513{
514 update_traceon_count(ops, ip, tr, 1, data);
515}
516
517static void
518ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
519 struct trace_array *tr, struct ftrace_probe_ops *ops,
520 void *data)
521{
522 update_traceon_count(ops, ip, tr, 0, data);
523}
524
525static void
526ftrace_traceon(unsigned long ip, unsigned long parent_ip,
527 struct trace_array *tr, struct ftrace_probe_ops *ops,
528 void *data)
529{
530 if (tracer_tracing_is_on(tr))
531 return;
532
533 tracer_tracing_on(tr);
534}
535
536static void
537ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
538 struct trace_array *tr, struct ftrace_probe_ops *ops,
539 void *data)
540{
541 if (!tracer_tracing_is_on(tr))
542 return;
543
544 tracer_tracing_off(tr);
545}
546
547#ifdef CONFIG_UNWINDER_ORC
548
549
550
551
552
553
554
555#define FTRACE_STACK_SKIP 3
556#else
557
558
559
560
561
562
563
564
565
566#define FTRACE_STACK_SKIP 5
567#endif
568
569static __always_inline void trace_stack(struct trace_array *tr)
570{
571 unsigned int trace_ctx;
572
573 trace_ctx = tracing_gen_ctx();
574
575 __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
576}
577
578static void
579ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
580 struct trace_array *tr, struct ftrace_probe_ops *ops,
581 void *data)
582{
583 trace_stack(tr);
584}
585
586static void
587ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
588 struct trace_array *tr, struct ftrace_probe_ops *ops,
589 void *data)
590{
591 struct ftrace_func_mapper *mapper = data;
592 long *count;
593 long old_count;
594 long new_count;
595
596 if (!tracing_is_on())
597 return;
598
599
600 if (!mapper) {
601 trace_stack(tr);
602 return;
603 }
604
605 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
606
607
608
609
610
611 do {
612 old_count = *count;
613
614 if (!old_count)
615 return;
616
617 new_count = old_count - 1;
618 new_count = cmpxchg(count, old_count, new_count);
619 if (new_count == old_count)
620 trace_stack(tr);
621
622 if (!tracing_is_on())
623 return;
624
625 } while (new_count != old_count);
626}
627
628static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
629 void *data)
630{
631 struct ftrace_func_mapper *mapper = data;
632 long *count = NULL;
633
634 if (mapper)
635 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
636
637 if (count) {
638 if (*count <= 0)
639 return 0;
640 (*count)--;
641 }
642
643 return 1;
644}
645
646static void
647ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
648 struct trace_array *tr, struct ftrace_probe_ops *ops,
649 void *data)
650{
651 if (update_count(ops, ip, data))
652 ftrace_dump(DUMP_ALL);
653}
654
655
656static void
657ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
658 struct trace_array *tr, struct ftrace_probe_ops *ops,
659 void *data)
660{
661 if (update_count(ops, ip, data))
662 ftrace_dump(DUMP_ORIG);
663}
664
665static int
666ftrace_probe_print(const char *name, struct seq_file *m,
667 unsigned long ip, struct ftrace_probe_ops *ops,
668 void *data)
669{
670 struct ftrace_func_mapper *mapper = data;
671 long *count = NULL;
672
673 seq_printf(m, "%ps:%s", (void *)ip, name);
674
675 if (mapper)
676 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
677
678 if (count)
679 seq_printf(m, ":count=%ld\n", *count);
680 else
681 seq_puts(m, ":unlimited\n");
682
683 return 0;
684}
685
686static int
687ftrace_traceon_print(struct seq_file *m, unsigned long ip,
688 struct ftrace_probe_ops *ops,
689 void *data)
690{
691 return ftrace_probe_print("traceon", m, ip, ops, data);
692}
693
694static int
695ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
696 struct ftrace_probe_ops *ops, void *data)
697{
698 return ftrace_probe_print("traceoff", m, ip, ops, data);
699}
700
701static int
702ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
703 struct ftrace_probe_ops *ops, void *data)
704{
705 return ftrace_probe_print("stacktrace", m, ip, ops, data);
706}
707
708static int
709ftrace_dump_print(struct seq_file *m, unsigned long ip,
710 struct ftrace_probe_ops *ops, void *data)
711{
712 return ftrace_probe_print("dump", m, ip, ops, data);
713}
714
715static int
716ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
717 struct ftrace_probe_ops *ops, void *data)
718{
719 return ftrace_probe_print("cpudump", m, ip, ops, data);
720}
721
722
723static int
724ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
725 unsigned long ip, void *init_data, void **data)
726{
727 struct ftrace_func_mapper *mapper = *data;
728
729 if (!mapper) {
730 mapper = allocate_ftrace_func_mapper();
731 if (!mapper)
732 return -ENOMEM;
733 *data = mapper;
734 }
735
736 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
737}
738
739static void
740ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
741 unsigned long ip, void *data)
742{
743 struct ftrace_func_mapper *mapper = data;
744
745 if (!ip) {
746 free_ftrace_func_mapper(mapper, NULL);
747 return;
748 }
749
750 ftrace_func_mapper_remove_ip(mapper, ip);
751}
752
753static struct ftrace_probe_ops traceon_count_probe_ops = {
754 .func = ftrace_traceon_count,
755 .print = ftrace_traceon_print,
756 .init = ftrace_count_init,
757 .free = ftrace_count_free,
758};
759
760static struct ftrace_probe_ops traceoff_count_probe_ops = {
761 .func = ftrace_traceoff_count,
762 .print = ftrace_traceoff_print,
763 .init = ftrace_count_init,
764 .free = ftrace_count_free,
765};
766
767static struct ftrace_probe_ops stacktrace_count_probe_ops = {
768 .func = ftrace_stacktrace_count,
769 .print = ftrace_stacktrace_print,
770 .init = ftrace_count_init,
771 .free = ftrace_count_free,
772};
773
774static struct ftrace_probe_ops dump_probe_ops = {
775 .func = ftrace_dump_probe,
776 .print = ftrace_dump_print,
777 .init = ftrace_count_init,
778 .free = ftrace_count_free,
779};
780
781static struct ftrace_probe_ops cpudump_probe_ops = {
782 .func = ftrace_cpudump_probe,
783 .print = ftrace_cpudump_print,
784};
785
786static struct ftrace_probe_ops traceon_probe_ops = {
787 .func = ftrace_traceon,
788 .print = ftrace_traceon_print,
789};
790
791static struct ftrace_probe_ops traceoff_probe_ops = {
792 .func = ftrace_traceoff,
793 .print = ftrace_traceoff_print,
794};
795
796static struct ftrace_probe_ops stacktrace_probe_ops = {
797 .func = ftrace_stacktrace,
798 .print = ftrace_stacktrace_print,
799};
800
801static int
802ftrace_trace_probe_callback(struct trace_array *tr,
803 struct ftrace_probe_ops *ops,
804 struct ftrace_hash *hash, char *glob,
805 char *cmd, char *param, int enable)
806{
807 void *count = (void *)-1;
808 char *number;
809 int ret;
810
811
812 if (!enable)
813 return -EINVAL;
814
815 if (glob[0] == '!')
816 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
817
818 if (!param)
819 goto out_reg;
820
821 number = strsep(¶m, ":");
822
823 if (!strlen(number))
824 goto out_reg;
825
826
827
828
829
830 ret = kstrtoul(number, 0, (unsigned long *)&count);
831 if (ret)
832 return ret;
833
834 out_reg:
835 ret = register_ftrace_function_probe(glob, tr, ops, count);
836
837 return ret < 0 ? ret : 0;
838}
839
840static int
841ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
842 char *glob, char *cmd, char *param, int enable)
843{
844 struct ftrace_probe_ops *ops;
845
846 if (!tr)
847 return -ENODEV;
848
849
850 if (strcmp(cmd, "traceon") == 0)
851 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
852 else
853 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
854
855 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
856 param, enable);
857}
858
859static int
860ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
861 char *glob, char *cmd, char *param, int enable)
862{
863 struct ftrace_probe_ops *ops;
864
865 if (!tr)
866 return -ENODEV;
867
868 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
869
870 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
871 param, enable);
872}
873
874static int
875ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
876 char *glob, char *cmd, char *param, int enable)
877{
878 struct ftrace_probe_ops *ops;
879
880 if (!tr)
881 return -ENODEV;
882
883 ops = &dump_probe_ops;
884
885
886 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
887 "1", enable);
888}
889
890static int
891ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
892 char *glob, char *cmd, char *param, int enable)
893{
894 struct ftrace_probe_ops *ops;
895
896 if (!tr)
897 return -ENODEV;
898
899 ops = &cpudump_probe_ops;
900
901
902 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
903 "1", enable);
904}
905
906static struct ftrace_func_command ftrace_traceon_cmd = {
907 .name = "traceon",
908 .func = ftrace_trace_onoff_callback,
909};
910
911static struct ftrace_func_command ftrace_traceoff_cmd = {
912 .name = "traceoff",
913 .func = ftrace_trace_onoff_callback,
914};
915
916static struct ftrace_func_command ftrace_stacktrace_cmd = {
917 .name = "stacktrace",
918 .func = ftrace_stacktrace_callback,
919};
920
921static struct ftrace_func_command ftrace_dump_cmd = {
922 .name = "dump",
923 .func = ftrace_dump_callback,
924};
925
926static struct ftrace_func_command ftrace_cpudump_cmd = {
927 .name = "cpudump",
928 .func = ftrace_cpudump_callback,
929};
930
931static int __init init_func_cmd_traceon(void)
932{
933 int ret;
934
935 ret = register_ftrace_command(&ftrace_traceoff_cmd);
936 if (ret)
937 return ret;
938
939 ret = register_ftrace_command(&ftrace_traceon_cmd);
940 if (ret)
941 goto out_free_traceoff;
942
943 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
944 if (ret)
945 goto out_free_traceon;
946
947 ret = register_ftrace_command(&ftrace_dump_cmd);
948 if (ret)
949 goto out_free_stacktrace;
950
951 ret = register_ftrace_command(&ftrace_cpudump_cmd);
952 if (ret)
953 goto out_free_dump;
954
955 return 0;
956
957 out_free_dump:
958 unregister_ftrace_command(&ftrace_dump_cmd);
959 out_free_stacktrace:
960 unregister_ftrace_command(&ftrace_stacktrace_cmd);
961 out_free_traceon:
962 unregister_ftrace_command(&ftrace_traceon_cmd);
963 out_free_traceoff:
964 unregister_ftrace_command(&ftrace_traceoff_cmd);
965
966 return ret;
967}
968#else
969static inline int init_func_cmd_traceon(void)
970{
971 return 0;
972}
973#endif
974
975__init int init_function_trace(void)
976{
977 init_func_cmd_traceon();
978 return register_tracer(&function_trace);
979}
980