1
2
3#include <linux/stringify.h>
4#include <linux/kthread.h>
5#include <linux/delay.h>
6
7static inline int trace_valid_entry(struct trace_entry *entry)
8{
9 switch (entry->type) {
10 case TRACE_FN:
11 case TRACE_CTX:
12 case TRACE_WAKE:
13 case TRACE_STACK:
14 case TRACE_PRINT:
15 case TRACE_SPECIAL:
16 case TRACE_BRANCH:
17 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
19 case TRACE_HW_BRANCHES:
20 return 1;
21 }
22 return 0;
23}
24
25static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
26{
27 struct ring_buffer_event *event;
28 struct trace_entry *entry;
29 unsigned int loops = 0;
30
31 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
32 entry = ring_buffer_event_data(event);
33
34
35
36
37
38
39 if (loops++ > trace_buf_size) {
40 printk(KERN_CONT ".. bad ring buffer ");
41 goto failed;
42 }
43 if (!trace_valid_entry(entry)) {
44 printk(KERN_CONT ".. invalid entry %d ",
45 entry->type);
46 goto failed;
47 }
48 }
49 return 0;
50
51 failed:
52
53 tracing_disabled = 1;
54 printk(KERN_CONT ".. corrupted trace buffer .. ");
55 return -1;
56}
57
58
59
60
61
62static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
63{
64 unsigned long flags, cnt = 0;
65 int cpu, ret = 0;
66
67
68 local_irq_save(flags);
69 __raw_spin_lock(&ftrace_max_lock);
70
71 cnt = ring_buffer_entries(tr->buffer);
72
73
74
75
76
77
78
79
80 tracing_off();
81 for_each_possible_cpu(cpu) {
82 ret = trace_test_buffer_cpu(tr, cpu);
83 if (ret)
84 break;
85 }
86 tracing_on();
87 __raw_spin_unlock(&ftrace_max_lock);
88 local_irq_restore(flags);
89
90 if (count)
91 *count = cnt;
92
93 return ret;
94}
95
96static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
97{
98 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
99 trace->name, init_ret);
100}
101#ifdef CONFIG_FUNCTION_TRACER
102
103#ifdef CONFIG_DYNAMIC_FTRACE
104
105
106int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
107 struct trace_array *tr,
108 int (*func)(void))
109{
110 int save_ftrace_enabled = ftrace_enabled;
111 int save_tracer_enabled = tracer_enabled;
112 unsigned long count;
113 char *func_name;
114 int ret;
115
116
117 printk(KERN_CONT "PASSED\n");
118 pr_info("Testing dynamic ftrace: ");
119
120
121 ftrace_enabled = 1;
122 tracer_enabled = 1;
123
124
125 func();
126
127
128
129
130
131
132 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
133
134
135 ftrace_set_filter(func_name, strlen(func_name), 1);
136
137
138 ret = tracer_init(trace, tr);
139 if (ret) {
140 warn_failed_init_tracer(trace, ret);
141 goto out;
142 }
143
144
145 msleep(100);
146
147
148 ret = trace_test_buffer(tr, &count);
149 if (ret)
150 goto out;
151
152 if (count) {
153 ret = -1;
154 printk(KERN_CONT ".. filter did not filter .. ");
155 goto out;
156 }
157
158
159 func();
160
161
162 msleep(100);
163
164
165 tracing_stop();
166 ftrace_enabled = 0;
167
168
169 ret = trace_test_buffer(tr, &count);
170 trace->reset(tr);
171 tracing_start();
172
173
174 if (!ret && count != 1) {
175 printk(KERN_CONT ".. filter failed count=%ld ..", count);
176 ret = -1;
177 goto out;
178 }
179
180 out:
181 ftrace_enabled = save_ftrace_enabled;
182 tracer_enabled = save_tracer_enabled;
183
184
185 ftrace_set_filter(NULL, 0, 1);
186
187 return ret;
188}
189#else
190# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
191#endif
192
193
194
195
196
197
198int
199trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
200{
201 int save_ftrace_enabled = ftrace_enabled;
202 int save_tracer_enabled = tracer_enabled;
203 unsigned long count;
204 int ret;
205
206
207 msleep(1);
208
209
210 ftrace_enabled = 1;
211 tracer_enabled = 1;
212
213 ret = tracer_init(trace, tr);
214 if (ret) {
215 warn_failed_init_tracer(trace, ret);
216 goto out;
217 }
218
219
220 msleep(100);
221
222 tracing_stop();
223 ftrace_enabled = 0;
224
225
226 ret = trace_test_buffer(tr, &count);
227 trace->reset(tr);
228 tracing_start();
229
230 if (!ret && !count) {
231 printk(KERN_CONT ".. no entries found ..");
232 ret = -1;
233 goto out;
234 }
235
236 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
237 DYN_FTRACE_TEST_NAME);
238
239 out:
240 ftrace_enabled = save_ftrace_enabled;
241 tracer_enabled = save_tracer_enabled;
242
243
244 if (ret)
245 ftrace_kill();
246
247 return ret;
248}
249#endif
250
251
252#ifdef CONFIG_FUNCTION_GRAPH_TRACER
253
254
255#define GRAPH_MAX_FUNC_TEST 100000000
256
257static void __ftrace_dump(bool disable_tracing);
258static unsigned int graph_hang_thresh;
259
260
261static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
262{
263
264 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
265 ftrace_graph_stop();
266 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
267 if (ftrace_dump_on_oops)
268 __ftrace_dump(false);
269 return 0;
270 }
271
272 return trace_graph_entry(trace);
273}
274
275
276
277
278
279int
280trace_selftest_startup_function_graph(struct tracer *trace,
281 struct trace_array *tr)
282{
283 int ret;
284 unsigned long count;
285
286
287
288
289
290 tracing_reset_online_cpus(tr);
291 set_graph_array(tr);
292 ret = register_ftrace_graph(&trace_graph_return,
293 &trace_graph_entry_watchdog);
294 if (ret) {
295 warn_failed_init_tracer(trace, ret);
296 goto out;
297 }
298 tracing_start_cmdline_record();
299
300
301 msleep(100);
302
303
304 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
305 tracing_selftest_disabled = true;
306 ret = -1;
307 goto out;
308 }
309
310 tracing_stop();
311
312
313 ret = trace_test_buffer(tr, &count);
314
315 trace->reset(tr);
316 tracing_start();
317
318 if (!ret && !count) {
319 printk(KERN_CONT ".. no entries found ..");
320 ret = -1;
321 goto out;
322 }
323
324
325
326out:
327
328 if (ret)
329 ftrace_graph_stop();
330
331 return ret;
332}
333#endif
334
335
336#ifdef CONFIG_IRQSOFF_TRACER
337int
338trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
339{
340 unsigned long save_max = tracing_max_latency;
341 unsigned long count;
342 int ret;
343
344
345 ret = tracer_init(trace, tr);
346 if (ret) {
347 warn_failed_init_tracer(trace, ret);
348 return ret;
349 }
350
351
352 tracing_max_latency = 0;
353
354 local_irq_disable();
355 udelay(100);
356 local_irq_enable();
357
358
359
360
361
362
363
364 trace->stop(tr);
365
366 tracing_stop();
367
368 ret = trace_test_buffer(tr, NULL);
369 if (!ret)
370 ret = trace_test_buffer(&max_tr, &count);
371 trace->reset(tr);
372 tracing_start();
373
374 if (!ret && !count) {
375 printk(KERN_CONT ".. no entries found ..");
376 ret = -1;
377 }
378
379 tracing_max_latency = save_max;
380
381 return ret;
382}
383#endif
384
385#ifdef CONFIG_PREEMPT_TRACER
386int
387trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
388{
389 unsigned long save_max = tracing_max_latency;
390 unsigned long count;
391 int ret;
392
393
394
395
396
397
398
399
400
401 if (preempt_count()) {
402 printk(KERN_CONT "can not test ... force ");
403 return 0;
404 }
405
406
407 ret = tracer_init(trace, tr);
408 if (ret) {
409 warn_failed_init_tracer(trace, ret);
410 return ret;
411 }
412
413
414 tracing_max_latency = 0;
415
416 preempt_disable();
417 udelay(100);
418 preempt_enable();
419
420
421
422
423
424
425
426 trace->stop(tr);
427
428 tracing_stop();
429
430 ret = trace_test_buffer(tr, NULL);
431 if (!ret)
432 ret = trace_test_buffer(&max_tr, &count);
433 trace->reset(tr);
434 tracing_start();
435
436 if (!ret && !count) {
437 printk(KERN_CONT ".. no entries found ..");
438 ret = -1;
439 }
440
441 tracing_max_latency = save_max;
442
443 return ret;
444}
445#endif
446
447#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
448int
449trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
450{
451 unsigned long save_max = tracing_max_latency;
452 unsigned long count;
453 int ret;
454
455
456
457
458
459
460
461
462
463 if (preempt_count()) {
464 printk(KERN_CONT "can not test ... force ");
465 return 0;
466 }
467
468
469 ret = tracer_init(trace, tr);
470 if (ret) {
471 warn_failed_init_tracer(trace, ret);
472 goto out_no_start;
473 }
474
475
476 tracing_max_latency = 0;
477
478
479 preempt_disable();
480 local_irq_disable();
481 udelay(100);
482 preempt_enable();
483
484 local_irq_enable();
485
486
487
488
489
490
491
492 trace->stop(tr);
493
494 tracing_stop();
495
496 ret = trace_test_buffer(tr, NULL);
497 if (ret)
498 goto out;
499
500 ret = trace_test_buffer(&max_tr, &count);
501 if (ret)
502 goto out;
503
504 if (!ret && !count) {
505 printk(KERN_CONT ".. no entries found ..");
506 ret = -1;
507 goto out;
508 }
509
510
511 tracing_max_latency = 0;
512 tracing_start();
513 trace->start(tr);
514
515 preempt_disable();
516 local_irq_disable();
517 udelay(100);
518 preempt_enable();
519
520 local_irq_enable();
521
522 trace->stop(tr);
523
524 tracing_stop();
525
526 ret = trace_test_buffer(tr, NULL);
527 if (ret)
528 goto out;
529
530 ret = trace_test_buffer(&max_tr, &count);
531
532 if (!ret && !count) {
533 printk(KERN_CONT ".. no entries found ..");
534 ret = -1;
535 goto out;
536 }
537
538out:
539 tracing_start();
540out_no_start:
541 trace->reset(tr);
542 tracing_max_latency = save_max;
543
544 return ret;
545}
546#endif
547
548#ifdef CONFIG_NOP_TRACER
549int
550trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
551{
552
553 return 0;
554}
555#endif
556
557#ifdef CONFIG_SCHED_TRACER
558static int trace_wakeup_test_thread(void *data)
559{
560
561 struct sched_param param = { .sched_priority = 5 };
562 struct completion *x = data;
563
564 sched_setscheduler(current, SCHED_FIFO, ¶m);
565
566
567 complete(x);
568
569
570 set_current_state(TASK_INTERRUPTIBLE);
571 schedule();
572
573
574 while (!kthread_should_stop()) {
575
576
577
578
579 msleep(100);
580 }
581
582 return 0;
583}
584
585int
586trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
587{
588 unsigned long save_max = tracing_max_latency;
589 struct task_struct *p;
590 struct completion isrt;
591 unsigned long count;
592 int ret;
593
594 init_completion(&isrt);
595
596
597 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
598 if (IS_ERR(p)) {
599 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
600 return -1;
601 }
602
603
604 wait_for_completion(&isrt);
605
606
607 ret = tracer_init(trace, tr);
608 if (ret) {
609 warn_failed_init_tracer(trace, ret);
610 return ret;
611 }
612
613
614 tracing_max_latency = 0;
615
616
617 msleep(100);
618
619
620
621
622
623
624
625
626
627
628
629
630 wake_up_process(p);
631
632
633 msleep(100);
634
635
636 tracing_stop();
637
638 ret = trace_test_buffer(tr, NULL);
639 if (!ret)
640 ret = trace_test_buffer(&max_tr, &count);
641
642
643 trace->reset(tr);
644 tracing_start();
645
646 tracing_max_latency = save_max;
647
648
649 kthread_stop(p);
650
651 if (!ret && !count) {
652 printk(KERN_CONT ".. no entries found ..");
653 ret = -1;
654 }
655
656 return ret;
657}
658#endif
659
660#ifdef CONFIG_CONTEXT_SWITCH_TRACER
661int
662trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
663{
664 unsigned long count;
665 int ret;
666
667
668 ret = tracer_init(trace, tr);
669 if (ret) {
670 warn_failed_init_tracer(trace, ret);
671 return ret;
672 }
673
674
675 msleep(100);
676
677 tracing_stop();
678
679 ret = trace_test_buffer(tr, &count);
680 trace->reset(tr);
681 tracing_start();
682
683 if (!ret && !count) {
684 printk(KERN_CONT ".. no entries found ..");
685 ret = -1;
686 }
687
688 return ret;
689}
690#endif
691
692#ifdef CONFIG_SYSPROF_TRACER
693int
694trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
695{
696 unsigned long count;
697 int ret;
698
699
700 ret = tracer_init(trace, tr);
701 if (ret) {
702 warn_failed_init_tracer(trace, ret);
703 return ret;
704 }
705
706
707 msleep(100);
708
709 tracing_stop();
710
711 ret = trace_test_buffer(tr, &count);
712 trace->reset(tr);
713 tracing_start();
714
715 if (!ret && !count) {
716 printk(KERN_CONT ".. no entries found ..");
717 ret = -1;
718 }
719
720 return ret;
721}
722#endif
723
724#ifdef CONFIG_BRANCH_TRACER
725int
726trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
727{
728 unsigned long count;
729 int ret;
730
731
732 ret = tracer_init(trace, tr);
733 if (ret) {
734 warn_failed_init_tracer(trace, ret);
735 return ret;
736 }
737
738
739 msleep(100);
740
741 tracing_stop();
742
743 ret = trace_test_buffer(tr, &count);
744 trace->reset(tr);
745 tracing_start();
746
747 if (!ret && !count) {
748 printk(KERN_CONT ".. no entries found ..");
749 ret = -1;
750 }
751
752 return ret;
753}
754#endif
755
756#ifdef CONFIG_HW_BRANCH_TRACER
757int
758trace_selftest_startup_hw_branches(struct tracer *trace,
759 struct trace_array *tr)
760{
761 struct trace_iterator *iter;
762 struct tracer tracer;
763 unsigned long count;
764 int ret;
765
766 if (!trace->open) {
767 printk(KERN_CONT "missing open function...");
768 return -1;
769 }
770
771 ret = tracer_init(trace, tr);
772 if (ret) {
773 warn_failed_init_tracer(trace, ret);
774 return ret;
775 }
776
777
778
779
780
781 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
782 if (!iter)
783 return -ENOMEM;
784
785 memcpy(&tracer, trace, sizeof(tracer));
786
787 iter->trace = &tracer;
788 iter->tr = tr;
789 iter->pos = -1;
790 mutex_init(&iter->mutex);
791
792 trace->open(iter);
793
794 mutex_destroy(&iter->mutex);
795 kfree(iter);
796
797 tracing_stop();
798
799 ret = trace_test_buffer(tr, &count);
800 trace->reset(tr);
801 tracing_start();
802
803 if (!ret && !count) {
804 printk(KERN_CONT "no entries found..");
805 ret = -1;
806 }
807
808 return ret;
809}
810#endif
811