1
2
3
4
5
6
7
8
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14
15#include "trace.h"
16#include "trace_output.h"
17
18static bool kill_ftrace_graph;
19
20
21
22
23
24
25
26
27bool ftrace_graph_is_dead(void)
28{
29 return kill_ftrace_graph;
30}
31
32
33
34
35
36
37
38
39
40void ftrace_graph_stop(void)
41{
42 kill_ftrace_graph = true;
43}
44
45
46static int ftrace_graph_skip_irqs;
47
48struct fgraph_cpu_data {
49 pid_t last_pid;
50 int depth;
51 int depth_irq;
52 int ignore;
53 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
54};
55
56struct fgraph_data {
57 struct fgraph_cpu_data __percpu *cpu_data;
58
59
60 struct ftrace_graph_ent_entry ent;
61 struct ftrace_graph_ret_entry ret;
62 int failed;
63 int cpu;
64};
65
66#define TRACE_GRAPH_INDENT 2
67
68
69#define TRACE_GRAPH_PRINT_OVERRUN 0x1
70#define TRACE_GRAPH_PRINT_CPU 0x2
71#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
72#define TRACE_GRAPH_PRINT_PROC 0x8
73#define TRACE_GRAPH_PRINT_DURATION 0x10
74#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
75#define TRACE_GRAPH_PRINT_IRQS 0x40
76
77unsigned int fgraph_max_depth;
78
79static struct tracer_opt trace_opts[] = {
80
81 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
82
83 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
84
85 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
86
87 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
88
89 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
90
91 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
92
93 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
94 { }
95};
96
97static struct tracer_flags tracer_flags = {
98
99 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
100 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
101 .opts = trace_opts
102};
103
104static struct trace_array *graph_array;
105
106
107
108
109
110
111enum {
112 DURATION_FILL_FULL = -1,
113 DURATION_FILL_START = -2,
114 DURATION_FILL_END = -3,
115};
116
117static enum print_line_t
118print_graph_duration(unsigned long long duration, struct trace_seq *s,
119 u32 flags);
120
121
122int
123ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
124 unsigned long frame_pointer, unsigned long *retp)
125{
126 unsigned long long calltime;
127 int index;
128
129 if (unlikely(ftrace_graph_is_dead()))
130 return -EBUSY;
131
132 if (!current->ret_stack)
133 return -EBUSY;
134
135
136
137
138
139 smp_rmb();
140
141
142 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
143 atomic_inc(¤t->trace_overrun);
144 return -EBUSY;
145 }
146
147 calltime = trace_clock_local();
148
149 index = ++current->curr_ret_stack;
150 barrier();
151 current->ret_stack[index].ret = ret;
152 current->ret_stack[index].func = func;
153 current->ret_stack[index].calltime = calltime;
154 current->ret_stack[index].subtime = 0;
155#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
156 current->ret_stack[index].fp = frame_pointer;
157#endif
158#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
159 current->ret_stack[index].retp = retp;
160#endif
161 *depth = index;
162
163 return 0;
164}
165
166
167static void
168ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
169 unsigned long frame_pointer)
170{
171 int index;
172
173 index = current->curr_ret_stack;
174
175 if (unlikely(index < 0)) {
176 ftrace_graph_stop();
177 WARN_ON(1);
178
179 *ret = (unsigned long)panic;
180 return;
181 }
182
183#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
199 ftrace_graph_stop();
200 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
201 " from func %ps return to %lx\n",
202 current->ret_stack[index].fp,
203 frame_pointer,
204 (void *)current->ret_stack[index].func,
205 current->ret_stack[index].ret);
206 *ret = (unsigned long)panic;
207 return;
208 }
209#endif
210
211 *ret = current->ret_stack[index].ret;
212 trace->func = current->ret_stack[index].func;
213 trace->calltime = current->ret_stack[index].calltime;
214 trace->overrun = atomic_read(¤t->trace_overrun);
215 trace->depth = index;
216}
217
218
219
220
221
222unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
223{
224 struct ftrace_graph_ret trace;
225 unsigned long ret;
226
227 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
228 trace.rettime = trace_clock_local();
229 barrier();
230 current->curr_ret_stack--;
231
232
233
234
235
236
237 ftrace_graph_return(&trace);
238
239 if (unlikely(!ret)) {
240 ftrace_graph_stop();
241 WARN_ON(1);
242
243 ret = (unsigned long)panic;
244 }
245
246 return ret;
247}
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
265unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
266 unsigned long ret, unsigned long *retp)
267{
268 int index = task->curr_ret_stack;
269 int i;
270
271 if (ret != (unsigned long)return_to_handler)
272 return ret;
273
274 if (index < 0)
275 return ret;
276
277 for (i = 0; i <= index; i++)
278 if (task->ret_stack[i].retp == retp)
279 return task->ret_stack[i].ret;
280
281 return ret;
282}
283#else
284unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
285 unsigned long ret, unsigned long *retp)
286{
287 int task_idx;
288
289 if (ret != (unsigned long)return_to_handler)
290 return ret;
291
292 task_idx = task->curr_ret_stack;
293
294 if (!task->ret_stack || task_idx < *idx)
295 return ret;
296
297 task_idx -= *idx;
298 (*idx)++;
299
300 return task->ret_stack[task_idx].ret;
301}
302#endif
303
304int __trace_graph_entry(struct trace_array *tr,
305 struct ftrace_graph_ent *trace,
306 unsigned long flags,
307 int pc)
308{
309 struct ftrace_event_call *call = &event_funcgraph_entry;
310 struct ring_buffer_event *event;
311 struct ring_buffer *buffer = tr->trace_buffer.buffer;
312 struct ftrace_graph_ent_entry *entry;
313
314 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
315 return 0;
316
317 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
318 sizeof(*entry), flags, pc);
319 if (!event)
320 return 0;
321 entry = ring_buffer_event_data(event);
322 entry->graph_ent = *trace;
323 if (!filter_current_check_discard(buffer, call, entry, event))
324 __buffer_unlock_commit(buffer, event);
325
326 return 1;
327}
328
329static inline int ftrace_graph_ignore_irqs(void)
330{
331 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
332 return 0;
333
334 return in_irq();
335}
336
337int trace_graph_entry(struct ftrace_graph_ent *trace)
338{
339 struct trace_array *tr = graph_array;
340 struct trace_array_cpu *data;
341 unsigned long flags;
342 long disabled;
343 int ret;
344 int cpu;
345 int pc;
346
347 if (!ftrace_trace_task(current))
348 return 0;
349
350
351 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
352 ftrace_graph_ignore_irqs()) ||
353 (fgraph_max_depth && trace->depth >= fgraph_max_depth))
354 return 0;
355
356 local_irq_save(flags);
357 cpu = raw_smp_processor_id();
358 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
359 disabled = atomic_inc_return(&data->disabled);
360 if (likely(disabled == 1)) {
361 pc = preempt_count();
362 ret = __trace_graph_entry(tr, trace, flags, pc);
363 } else {
364 ret = 0;
365 }
366
367 atomic_dec(&data->disabled);
368 local_irq_restore(flags);
369
370 return ret;
371}
372
373int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
374{
375 if (tracing_thresh)
376 return 1;
377 else
378 return trace_graph_entry(trace);
379}
380
381static void
382__trace_graph_function(struct trace_array *tr,
383 unsigned long ip, unsigned long flags, int pc)
384{
385 u64 time = trace_clock_local();
386 struct ftrace_graph_ent ent = {
387 .func = ip,
388 .depth = 0,
389 };
390 struct ftrace_graph_ret ret = {
391 .func = ip,
392 .depth = 0,
393 .calltime = time,
394 .rettime = time,
395 };
396
397 __trace_graph_entry(tr, &ent, flags, pc);
398 __trace_graph_return(tr, &ret, flags, pc);
399}
400
401void
402trace_graph_function(struct trace_array *tr,
403 unsigned long ip, unsigned long parent_ip,
404 unsigned long flags, int pc)
405{
406 __trace_graph_function(tr, ip, flags, pc);
407}
408
409void __trace_graph_return(struct trace_array *tr,
410 struct ftrace_graph_ret *trace,
411 unsigned long flags,
412 int pc)
413{
414 struct ftrace_event_call *call = &event_funcgraph_exit;
415 struct ring_buffer_event *event;
416 struct ring_buffer *buffer = tr->trace_buffer.buffer;
417 struct ftrace_graph_ret_entry *entry;
418
419 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
420 return;
421
422 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
423 sizeof(*entry), flags, pc);
424 if (!event)
425 return;
426 entry = ring_buffer_event_data(event);
427 entry->ret = *trace;
428 if (!filter_current_check_discard(buffer, call, entry, event))
429 __buffer_unlock_commit(buffer, event);
430}
431
432void trace_graph_return(struct ftrace_graph_ret *trace)
433{
434 struct trace_array *tr = graph_array;
435 struct trace_array_cpu *data;
436 unsigned long flags;
437 long disabled;
438 int cpu;
439 int pc;
440
441 local_irq_save(flags);
442 cpu = raw_smp_processor_id();
443 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
444 disabled = atomic_inc_return(&data->disabled);
445 if (likely(disabled == 1)) {
446 pc = preempt_count();
447 __trace_graph_return(tr, trace, flags, pc);
448 }
449 atomic_dec(&data->disabled);
450 local_irq_restore(flags);
451}
452
453void set_graph_array(struct trace_array *tr)
454{
455 graph_array = tr;
456
457
458
459 smp_mb();
460}
461
462void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
463{
464 if (tracing_thresh &&
465 (trace->rettime - trace->calltime < tracing_thresh))
466 return;
467 else
468 trace_graph_return(trace);
469}
470
471static int graph_trace_init(struct trace_array *tr)
472{
473 int ret;
474
475 set_graph_array(tr);
476 if (tracing_thresh)
477 ret = register_ftrace_graph(&trace_graph_thresh_return,
478 &trace_graph_thresh_entry);
479 else
480 ret = register_ftrace_graph(&trace_graph_return,
481 &trace_graph_entry);
482 if (ret)
483 return ret;
484 tracing_start_cmdline_record();
485
486 return 0;
487}
488
489static void graph_trace_reset(struct trace_array *tr)
490{
491 tracing_stop_cmdline_record();
492 unregister_ftrace_graph();
493}
494
495static int max_bytes_for_cpu;
496
497static enum print_line_t
498print_graph_cpu(struct trace_seq *s, int cpu)
499{
500 int ret;
501
502
503
504
505
506
507 ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
508 if (!ret)
509 return TRACE_TYPE_PARTIAL_LINE;
510
511 return TRACE_TYPE_HANDLED;
512}
513
514#define TRACE_GRAPH_PROCINFO_LENGTH 14
515
516static enum print_line_t
517print_graph_proc(struct trace_seq *s, pid_t pid)
518{
519 char comm[TASK_COMM_LEN];
520
521 char pid_str[11];
522 int spaces = 0;
523 int ret;
524 int len;
525 int i;
526
527 trace_find_cmdline(pid, comm);
528 comm[7] = '\0';
529 sprintf(pid_str, "%d", pid);
530
531
532 len = strlen(comm) + strlen(pid_str) + 1;
533
534 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
535 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
536
537
538 for (i = 0; i < spaces / 2; i++) {
539 ret = trace_seq_putc(s, ' ');
540 if (!ret)
541 return TRACE_TYPE_PARTIAL_LINE;
542 }
543
544 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
545 if (!ret)
546 return TRACE_TYPE_PARTIAL_LINE;
547
548
549 for (i = 0; i < spaces - (spaces / 2); i++) {
550 ret = trace_seq_putc(s, ' ');
551 if (!ret)
552 return TRACE_TYPE_PARTIAL_LINE;
553 }
554 return TRACE_TYPE_HANDLED;
555}
556
557
558static enum print_line_t
559print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
560{
561 if (!trace_seq_putc(s, ' '))
562 return 0;
563
564 return trace_print_lat_fmt(s, entry);
565}
566
567
568static enum print_line_t
569verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
570{
571 pid_t prev_pid;
572 pid_t *last_pid;
573 int ret;
574
575 if (!data)
576 return TRACE_TYPE_HANDLED;
577
578 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
579
580 if (*last_pid == pid)
581 return TRACE_TYPE_HANDLED;
582
583 prev_pid = *last_pid;
584 *last_pid = pid;
585
586 if (prev_pid == -1)
587 return TRACE_TYPE_HANDLED;
588
589
590
591
592
593
594
595
596 ret = trace_seq_puts(s,
597 " ------------------------------------------\n");
598 if (!ret)
599 return TRACE_TYPE_PARTIAL_LINE;
600
601 ret = print_graph_cpu(s, cpu);
602 if (ret == TRACE_TYPE_PARTIAL_LINE)
603 return TRACE_TYPE_PARTIAL_LINE;
604
605 ret = print_graph_proc(s, prev_pid);
606 if (ret == TRACE_TYPE_PARTIAL_LINE)
607 return TRACE_TYPE_PARTIAL_LINE;
608
609 ret = trace_seq_puts(s, " => ");
610 if (!ret)
611 return TRACE_TYPE_PARTIAL_LINE;
612
613 ret = print_graph_proc(s, pid);
614 if (ret == TRACE_TYPE_PARTIAL_LINE)
615 return TRACE_TYPE_PARTIAL_LINE;
616
617 ret = trace_seq_puts(s,
618 "\n ------------------------------------------\n\n");
619 if (!ret)
620 return TRACE_TYPE_PARTIAL_LINE;
621
622 return TRACE_TYPE_HANDLED;
623}
624
625static struct ftrace_graph_ret_entry *
626get_return_for_leaf(struct trace_iterator *iter,
627 struct ftrace_graph_ent_entry *curr)
628{
629 struct fgraph_data *data = iter->private;
630 struct ring_buffer_iter *ring_iter = NULL;
631 struct ring_buffer_event *event;
632 struct ftrace_graph_ret_entry *next;
633
634
635
636
637
638 if (data && data->failed) {
639 curr = &data->ent;
640 next = &data->ret;
641 } else {
642
643 ring_iter = trace_buffer_iter(iter, iter->cpu);
644
645
646 if (ring_iter)
647 event = ring_buffer_iter_peek(ring_iter, NULL);
648 else {
649
650
651
652
653 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
654 NULL, NULL);
655 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
656 NULL, NULL);
657 }
658
659 if (!event)
660 return NULL;
661
662 next = ring_buffer_event_data(event);
663
664 if (data) {
665
666
667
668
669 data->ent = *curr;
670
671
672
673
674
675 if (next->ent.type == TRACE_GRAPH_RET)
676 data->ret = *next;
677 else
678 data->ret.ent.type = next->ent.type;
679 }
680 }
681
682 if (next->ent.type != TRACE_GRAPH_RET)
683 return NULL;
684
685 if (curr->ent.pid != next->ent.pid ||
686 curr->graph_ent.func != next->ret.func)
687 return NULL;
688
689
690 if (ring_iter)
691 ring_buffer_read(ring_iter, NULL);
692
693 return next;
694}
695
696static int print_graph_abs_time(u64 t, struct trace_seq *s)
697{
698 unsigned long usecs_rem;
699
700 usecs_rem = do_div(t, NSEC_PER_SEC);
701 usecs_rem /= 1000;
702
703 return trace_seq_printf(s, "%5lu.%06lu | ",
704 (unsigned long)t, usecs_rem);
705}
706
707static enum print_line_t
708print_graph_irq(struct trace_iterator *iter, unsigned long addr,
709 enum trace_type type, int cpu, pid_t pid, u32 flags)
710{
711 int ret;
712 struct trace_seq *s = &iter->seq;
713
714 if (addr < (unsigned long)__irqentry_text_start ||
715 addr >= (unsigned long)__irqentry_text_end)
716 return TRACE_TYPE_UNHANDLED;
717
718 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
719
720 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
721 ret = print_graph_abs_time(iter->ts, s);
722 if (!ret)
723 return TRACE_TYPE_PARTIAL_LINE;
724 }
725
726
727 if (flags & TRACE_GRAPH_PRINT_CPU) {
728 ret = print_graph_cpu(s, cpu);
729 if (ret == TRACE_TYPE_PARTIAL_LINE)
730 return TRACE_TYPE_PARTIAL_LINE;
731 }
732
733
734 if (flags & TRACE_GRAPH_PRINT_PROC) {
735 ret = print_graph_proc(s, pid);
736 if (ret == TRACE_TYPE_PARTIAL_LINE)
737 return TRACE_TYPE_PARTIAL_LINE;
738 ret = trace_seq_puts(s, " | ");
739 if (!ret)
740 return TRACE_TYPE_PARTIAL_LINE;
741 }
742 }
743
744
745 ret = print_graph_duration(DURATION_FILL_START, s, flags);
746 if (ret != TRACE_TYPE_HANDLED)
747 return ret;
748
749 if (type == TRACE_GRAPH_ENT)
750 ret = trace_seq_puts(s, "==========>");
751 else
752 ret = trace_seq_puts(s, "<==========");
753
754 if (!ret)
755 return TRACE_TYPE_PARTIAL_LINE;
756
757 ret = print_graph_duration(DURATION_FILL_END, s, flags);
758 if (ret != TRACE_TYPE_HANDLED)
759 return ret;
760
761 ret = trace_seq_putc(s, '\n');
762
763 if (!ret)
764 return TRACE_TYPE_PARTIAL_LINE;
765 return TRACE_TYPE_HANDLED;
766}
767
768enum print_line_t
769trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
770{
771 unsigned long nsecs_rem = do_div(duration, 1000);
772
773 char msecs_str[21];
774 char nsecs_str[5];
775 int ret, len;
776 int i;
777
778 sprintf(msecs_str, "%lu", (unsigned long) duration);
779
780
781 ret = trace_seq_printf(s, "%s", msecs_str);
782 if (!ret)
783 return TRACE_TYPE_PARTIAL_LINE;
784
785 len = strlen(msecs_str);
786
787
788 if (len < 7) {
789 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
790
791 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
792 ret = trace_seq_printf(s, ".%s", nsecs_str);
793 if (!ret)
794 return TRACE_TYPE_PARTIAL_LINE;
795 len += strlen(nsecs_str);
796 }
797
798 ret = trace_seq_puts(s, " us ");
799 if (!ret)
800 return TRACE_TYPE_PARTIAL_LINE;
801
802
803 for (i = len; i < 7; i++) {
804 ret = trace_seq_putc(s, ' ');
805 if (!ret)
806 return TRACE_TYPE_PARTIAL_LINE;
807 }
808 return TRACE_TYPE_HANDLED;
809}
810
811static enum print_line_t
812print_graph_duration(unsigned long long duration, struct trace_seq *s,
813 u32 flags)
814{
815 int ret = -1;
816
817 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
818 !(trace_flags & TRACE_ITER_CONTEXT_INFO))
819 return TRACE_TYPE_HANDLED;
820
821
822 switch (duration) {
823 case DURATION_FILL_FULL:
824 ret = trace_seq_puts(s, " | ");
825 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
826 case DURATION_FILL_START:
827 ret = trace_seq_puts(s, " ");
828 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
829 case DURATION_FILL_END:
830 ret = trace_seq_puts(s, " |");
831 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
832 }
833
834
835 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
836
837 if (duration > 100000ULL)
838 ret = trace_seq_puts(s, "! ");
839
840 else if (duration > 10000ULL)
841 ret = trace_seq_puts(s, "+ ");
842 }
843
844
845
846
847
848
849 if (ret == -1)
850 ret = trace_seq_puts(s, " ");
851
852
853 if (!ret)
854 return TRACE_TYPE_PARTIAL_LINE;
855
856 ret = trace_print_graph_duration(duration, s);
857 if (ret != TRACE_TYPE_HANDLED)
858 return ret;
859
860 ret = trace_seq_puts(s, "| ");
861 if (!ret)
862 return TRACE_TYPE_PARTIAL_LINE;
863
864 return TRACE_TYPE_HANDLED;
865}
866
867
868static enum print_line_t
869print_graph_entry_leaf(struct trace_iterator *iter,
870 struct ftrace_graph_ent_entry *entry,
871 struct ftrace_graph_ret_entry *ret_entry,
872 struct trace_seq *s, u32 flags)
873{
874 struct fgraph_data *data = iter->private;
875 struct ftrace_graph_ret *graph_ret;
876 struct ftrace_graph_ent *call;
877 unsigned long long duration;
878 int ret;
879 int i;
880
881 graph_ret = &ret_entry->ret;
882 call = &entry->graph_ent;
883 duration = graph_ret->rettime - graph_ret->calltime;
884
885 if (data) {
886 struct fgraph_cpu_data *cpu_data;
887 int cpu = iter->cpu;
888
889 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
890
891
892
893
894
895
896 cpu_data->depth = call->depth - 1;
897
898
899 if (call->depth < FTRACE_RETFUNC_DEPTH)
900 cpu_data->enter_funcs[call->depth] = 0;
901 }
902
903
904 ret = print_graph_duration(duration, s, flags);
905 if (ret == TRACE_TYPE_PARTIAL_LINE)
906 return TRACE_TYPE_PARTIAL_LINE;
907
908
909 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
910 ret = trace_seq_putc(s, ' ');
911 if (!ret)
912 return TRACE_TYPE_PARTIAL_LINE;
913 }
914
915 ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
916 if (!ret)
917 return TRACE_TYPE_PARTIAL_LINE;
918
919 return TRACE_TYPE_HANDLED;
920}
921
922static enum print_line_t
923print_graph_entry_nested(struct trace_iterator *iter,
924 struct ftrace_graph_ent_entry *entry,
925 struct trace_seq *s, int cpu, u32 flags)
926{
927 struct ftrace_graph_ent *call = &entry->graph_ent;
928 struct fgraph_data *data = iter->private;
929 int ret;
930 int i;
931
932 if (data) {
933 struct fgraph_cpu_data *cpu_data;
934 int cpu = iter->cpu;
935
936 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
937 cpu_data->depth = call->depth;
938
939
940 if (call->depth < FTRACE_RETFUNC_DEPTH)
941 cpu_data->enter_funcs[call->depth] = call->func;
942 }
943
944
945 ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
946 if (ret != TRACE_TYPE_HANDLED)
947 return ret;
948
949
950 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
951 ret = trace_seq_putc(s, ' ');
952 if (!ret)
953 return TRACE_TYPE_PARTIAL_LINE;
954 }
955
956 ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
957 if (!ret)
958 return TRACE_TYPE_PARTIAL_LINE;
959
960
961
962
963
964 return TRACE_TYPE_NO_CONSUME;
965}
966
967static enum print_line_t
968print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
969 int type, unsigned long addr, u32 flags)
970{
971 struct fgraph_data *data = iter->private;
972 struct trace_entry *ent = iter->ent;
973 int cpu = iter->cpu;
974 int ret;
975
976
977 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
978 return TRACE_TYPE_PARTIAL_LINE;
979
980 if (type) {
981
982 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
983 if (ret == TRACE_TYPE_PARTIAL_LINE)
984 return TRACE_TYPE_PARTIAL_LINE;
985 }
986
987 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
988 return 0;
989
990
991 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
992 ret = print_graph_abs_time(iter->ts, s);
993 if (!ret)
994 return TRACE_TYPE_PARTIAL_LINE;
995 }
996
997
998 if (flags & TRACE_GRAPH_PRINT_CPU) {
999 ret = print_graph_cpu(s, cpu);
1000 if (ret == TRACE_TYPE_PARTIAL_LINE)
1001 return TRACE_TYPE_PARTIAL_LINE;
1002 }
1003
1004
1005 if (flags & TRACE_GRAPH_PRINT_PROC) {
1006 ret = print_graph_proc(s, ent->pid);
1007 if (ret == TRACE_TYPE_PARTIAL_LINE)
1008 return TRACE_TYPE_PARTIAL_LINE;
1009
1010 ret = trace_seq_puts(s, " | ");
1011 if (!ret)
1012 return TRACE_TYPE_PARTIAL_LINE;
1013 }
1014
1015
1016 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1017 ret = print_graph_lat_fmt(s, ent);
1018 if (ret == TRACE_TYPE_PARTIAL_LINE)
1019 return TRACE_TYPE_PARTIAL_LINE;
1020 }
1021
1022 return 0;
1023}
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036static int
1037check_irq_entry(struct trace_iterator *iter, u32 flags,
1038 unsigned long addr, int depth)
1039{
1040 int cpu = iter->cpu;
1041 int *depth_irq;
1042 struct fgraph_data *data = iter->private;
1043
1044
1045
1046
1047
1048
1049 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1050 (!data))
1051 return 0;
1052
1053 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1054
1055
1056
1057
1058 if (*depth_irq >= 0)
1059 return 1;
1060
1061 if ((addr < (unsigned long)__irqentry_text_start) ||
1062 (addr >= (unsigned long)__irqentry_text_end))
1063 return 0;
1064
1065
1066
1067
1068 *depth_irq = depth;
1069 return 1;
1070}
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083static int
1084check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1085{
1086 int cpu = iter->cpu;
1087 int *depth_irq;
1088 struct fgraph_data *data = iter->private;
1089
1090
1091
1092
1093
1094
1095 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1096 (!data))
1097 return 0;
1098
1099 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1100
1101
1102
1103
1104 if (*depth_irq == -1)
1105 return 0;
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116 if (*depth_irq >= depth) {
1117 *depth_irq = -1;
1118 return 1;
1119 }
1120
1121
1122
1123
1124 return 1;
1125}
1126
1127static enum print_line_t
1128print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1129 struct trace_iterator *iter, u32 flags)
1130{
1131 struct fgraph_data *data = iter->private;
1132 struct ftrace_graph_ent *call = &field->graph_ent;
1133 struct ftrace_graph_ret_entry *leaf_ret;
1134 static enum print_line_t ret;
1135 int cpu = iter->cpu;
1136
1137 if (check_irq_entry(iter, flags, call->func, call->depth))
1138 return TRACE_TYPE_HANDLED;
1139
1140 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1141 return TRACE_TYPE_PARTIAL_LINE;
1142
1143 leaf_ret = get_return_for_leaf(iter, field);
1144 if (leaf_ret)
1145 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1146 else
1147 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1148
1149 if (data) {
1150
1151
1152
1153
1154 if (s->full) {
1155 data->failed = 1;
1156 data->cpu = cpu;
1157 } else
1158 data->failed = 0;
1159 }
1160
1161 return ret;
1162}
1163
1164static enum print_line_t
1165print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1166 struct trace_entry *ent, struct trace_iterator *iter,
1167 u32 flags)
1168{
1169 unsigned long long duration = trace->rettime - trace->calltime;
1170 struct fgraph_data *data = iter->private;
1171 pid_t pid = ent->pid;
1172 int cpu = iter->cpu;
1173 int func_match = 1;
1174 int ret;
1175 int i;
1176
1177 if (check_irq_return(iter, flags, trace->depth))
1178 return TRACE_TYPE_HANDLED;
1179
1180 if (data) {
1181 struct fgraph_cpu_data *cpu_data;
1182 int cpu = iter->cpu;
1183
1184 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1185
1186
1187
1188
1189
1190
1191 cpu_data->depth = trace->depth - 1;
1192
1193 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1194 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1195 func_match = 0;
1196 cpu_data->enter_funcs[trace->depth] = 0;
1197 }
1198 }
1199
1200 if (print_graph_prologue(iter, s, 0, 0, flags))
1201 return TRACE_TYPE_PARTIAL_LINE;
1202
1203
1204 ret = print_graph_duration(duration, s, flags);
1205 if (ret == TRACE_TYPE_PARTIAL_LINE)
1206 return TRACE_TYPE_PARTIAL_LINE;
1207
1208
1209 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1210 ret = trace_seq_putc(s, ' ');
1211 if (!ret)
1212 return TRACE_TYPE_PARTIAL_LINE;
1213 }
1214
1215
1216
1217
1218
1219
1220
1221 if (func_match) {
1222 ret = trace_seq_puts(s, "}\n");
1223 if (!ret)
1224 return TRACE_TYPE_PARTIAL_LINE;
1225 } else {
1226 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1227 if (!ret)
1228 return TRACE_TYPE_PARTIAL_LINE;
1229 }
1230
1231
1232 if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1233 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1234 trace->overrun);
1235 if (!ret)
1236 return TRACE_TYPE_PARTIAL_LINE;
1237 }
1238
1239 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1240 cpu, pid, flags);
1241 if (ret == TRACE_TYPE_PARTIAL_LINE)
1242 return TRACE_TYPE_PARTIAL_LINE;
1243
1244 return TRACE_TYPE_HANDLED;
1245}
1246
1247static enum print_line_t
1248print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1249 struct trace_iterator *iter, u32 flags)
1250{
1251 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1252 struct fgraph_data *data = iter->private;
1253 struct trace_event *event;
1254 int depth = 0;
1255 int ret;
1256 int i;
1257
1258 if (data)
1259 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1260
1261 if (print_graph_prologue(iter, s, 0, 0, flags))
1262 return TRACE_TYPE_PARTIAL_LINE;
1263
1264
1265 ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
1266 if (ret != TRACE_TYPE_HANDLED)
1267 return ret;
1268
1269
1270 if (depth > 0)
1271 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1272 ret = trace_seq_putc(s, ' ');
1273 if (!ret)
1274 return TRACE_TYPE_PARTIAL_LINE;
1275 }
1276
1277
1278 ret = trace_seq_puts(s, "/* ");
1279 if (!ret)
1280 return TRACE_TYPE_PARTIAL_LINE;
1281
1282 switch (iter->ent->type) {
1283 case TRACE_BPRINT:
1284 ret = trace_print_bprintk_msg_only(iter);
1285 if (ret != TRACE_TYPE_HANDLED)
1286 return ret;
1287 break;
1288 case TRACE_PRINT:
1289 ret = trace_print_printk_msg_only(iter);
1290 if (ret != TRACE_TYPE_HANDLED)
1291 return ret;
1292 break;
1293 default:
1294 event = ftrace_find_event(ent->type);
1295 if (!event)
1296 return TRACE_TYPE_UNHANDLED;
1297
1298 ret = event->funcs->trace(iter, sym_flags, event);
1299 if (ret != TRACE_TYPE_HANDLED)
1300 return ret;
1301 }
1302
1303
1304 if (s->buffer[s->len - 1] == '\n') {
1305 s->buffer[s->len - 1] = '\0';
1306 s->len--;
1307 }
1308
1309 ret = trace_seq_puts(s, " */\n");
1310 if (!ret)
1311 return TRACE_TYPE_PARTIAL_LINE;
1312
1313 return TRACE_TYPE_HANDLED;
1314}
1315
1316
1317enum print_line_t
1318print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1319{
1320 struct ftrace_graph_ent_entry *field;
1321 struct fgraph_data *data = iter->private;
1322 struct trace_entry *entry = iter->ent;
1323 struct trace_seq *s = &iter->seq;
1324 int cpu = iter->cpu;
1325 int ret;
1326
1327 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1328 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1329 return TRACE_TYPE_HANDLED;
1330 }
1331
1332
1333
1334
1335
1336 if (data && data->failed) {
1337 field = &data->ent;
1338 iter->cpu = data->cpu;
1339 ret = print_graph_entry(field, s, iter, flags);
1340 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1341 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1342 ret = TRACE_TYPE_NO_CONSUME;
1343 }
1344 iter->cpu = cpu;
1345 return ret;
1346 }
1347
1348 switch (entry->type) {
1349 case TRACE_GRAPH_ENT: {
1350
1351
1352
1353
1354
1355
1356 struct ftrace_graph_ent_entry saved;
1357 trace_assign_type(field, entry);
1358 saved = *field;
1359 return print_graph_entry(&saved, s, iter, flags);
1360 }
1361 case TRACE_GRAPH_RET: {
1362 struct ftrace_graph_ret_entry *field;
1363 trace_assign_type(field, entry);
1364 return print_graph_return(&field->ret, s, entry, iter, flags);
1365 }
1366 case TRACE_STACK:
1367 case TRACE_FN:
1368
1369 return TRACE_TYPE_UNHANDLED;
1370
1371 default:
1372 return print_graph_comment(s, entry, iter, flags);
1373 }
1374
1375 return TRACE_TYPE_HANDLED;
1376}
1377
1378static enum print_line_t
1379print_graph_function(struct trace_iterator *iter)
1380{
1381 return print_graph_function_flags(iter, tracer_flags.val);
1382}
1383
1384static enum print_line_t
1385print_graph_function_event(struct trace_iterator *iter, int flags,
1386 struct trace_event *event)
1387{
1388 return print_graph_function(iter);
1389}
1390
1391static void print_lat_header(struct seq_file *s, u32 flags)
1392{
1393 static const char spaces[] = " "
1394 " "
1395 " ";
1396 int size = 0;
1397
1398 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1399 size += 16;
1400 if (flags & TRACE_GRAPH_PRINT_CPU)
1401 size += 4;
1402 if (flags & TRACE_GRAPH_PRINT_PROC)
1403 size += 17;
1404
1405 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1406 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1407 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1408 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1409 seq_printf(s, "#%.*s||| / \n", size, spaces);
1410}
1411
1412static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1413{
1414 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1415
1416 if (lat)
1417 print_lat_header(s, flags);
1418
1419
1420 seq_printf(s, "#");
1421 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1422 seq_printf(s, " TIME ");
1423 if (flags & TRACE_GRAPH_PRINT_CPU)
1424 seq_printf(s, " CPU");
1425 if (flags & TRACE_GRAPH_PRINT_PROC)
1426 seq_printf(s, " TASK/PID ");
1427 if (lat)
1428 seq_printf(s, "||||");
1429 if (flags & TRACE_GRAPH_PRINT_DURATION)
1430 seq_printf(s, " DURATION ");
1431 seq_printf(s, " FUNCTION CALLS\n");
1432
1433
1434 seq_printf(s, "#");
1435 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1436 seq_printf(s, " | ");
1437 if (flags & TRACE_GRAPH_PRINT_CPU)
1438 seq_printf(s, " | ");
1439 if (flags & TRACE_GRAPH_PRINT_PROC)
1440 seq_printf(s, " | | ");
1441 if (lat)
1442 seq_printf(s, "||||");
1443 if (flags & TRACE_GRAPH_PRINT_DURATION)
1444 seq_printf(s, " | | ");
1445 seq_printf(s, " | | | |\n");
1446}
1447
1448void print_graph_headers(struct seq_file *s)
1449{
1450 print_graph_headers_flags(s, tracer_flags.val);
1451}
1452
1453void print_graph_headers_flags(struct seq_file *s, u32 flags)
1454{
1455 struct trace_iterator *iter = s->private;
1456
1457 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1458 return;
1459
1460 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1461
1462 if (trace_empty(iter))
1463 return;
1464
1465 print_trace_header(s, iter);
1466 }
1467
1468 __print_graph_headers_flags(s, flags);
1469}
1470
1471void graph_trace_open(struct trace_iterator *iter)
1472{
1473
1474 struct fgraph_data *data;
1475 int cpu;
1476
1477 iter->private = NULL;
1478
1479 data = kzalloc(sizeof(*data), GFP_KERNEL);
1480 if (!data)
1481 goto out_err;
1482
1483 data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1484 if (!data->cpu_data)
1485 goto out_err_free;
1486
1487 for_each_possible_cpu(cpu) {
1488 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1489 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1490 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1491 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1492
1493 *pid = -1;
1494 *depth = 0;
1495 *ignore = 0;
1496 *depth_irq = -1;
1497 }
1498
1499 iter->private = data;
1500
1501 return;
1502
1503 out_err_free:
1504 kfree(data);
1505 out_err:
1506 pr_warning("function graph tracer: not enough memory\n");
1507}
1508
1509void graph_trace_close(struct trace_iterator *iter)
1510{
1511 struct fgraph_data *data = iter->private;
1512
1513 if (data) {
1514 free_percpu(data->cpu_data);
1515 kfree(data);
1516 }
1517}
1518
1519static int
1520func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1521{
1522 if (bit == TRACE_GRAPH_PRINT_IRQS)
1523 ftrace_graph_skip_irqs = !set;
1524
1525 return 0;
1526}
1527
1528static struct trace_event_functions graph_functions = {
1529 .trace = print_graph_function_event,
1530};
1531
1532static struct trace_event graph_trace_entry_event = {
1533 .type = TRACE_GRAPH_ENT,
1534 .funcs = &graph_functions,
1535};
1536
1537static struct trace_event graph_trace_ret_event = {
1538 .type = TRACE_GRAPH_RET,
1539 .funcs = &graph_functions
1540};
1541
1542static struct tracer graph_trace __read_mostly = {
1543 .name = "function_graph",
1544 .open = graph_trace_open,
1545 .pipe_open = graph_trace_open,
1546 .close = graph_trace_close,
1547 .pipe_close = graph_trace_close,
1548 .wait_pipe = poll_wait_pipe,
1549 .init = graph_trace_init,
1550 .reset = graph_trace_reset,
1551 .print_line = print_graph_function,
1552 .print_header = print_graph_headers,
1553 .flags = &tracer_flags,
1554 .set_flag = func_graph_set_flag,
1555#ifdef CONFIG_FTRACE_SELFTEST
1556 .selftest = trace_selftest_startup_function_graph,
1557#endif
1558};
1559
1560
1561static ssize_t
1562graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1563 loff_t *ppos)
1564{
1565 unsigned long val;
1566 int ret;
1567
1568 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1569 if (ret)
1570 return ret;
1571
1572 fgraph_max_depth = val;
1573
1574 *ppos += cnt;
1575
1576 return cnt;
1577}
1578
1579static ssize_t
1580graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1581 loff_t *ppos)
1582{
1583 char buf[15];
1584 int n;
1585
1586 n = sprintf(buf, "%d\n", fgraph_max_depth);
1587
1588 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1589}
1590
1591static const struct file_operations graph_depth_fops = {
1592 .open = tracing_open_generic,
1593 .write = graph_depth_write,
1594 .read = graph_depth_read,
1595 .llseek = generic_file_llseek,
1596};
1597
1598static __init int init_graph_debugfs(void)
1599{
1600 struct dentry *d_tracer;
1601
1602 d_tracer = tracing_init_dentry();
1603 if (!d_tracer)
1604 return 0;
1605
1606 trace_create_file("max_graph_depth", 0644, d_tracer,
1607 NULL, &graph_depth_fops);
1608
1609 return 0;
1610}
1611fs_initcall(init_graph_debugfs);
1612
1613static __init int init_graph_trace(void)
1614{
1615 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1616
1617 if (!register_ftrace_event(&graph_trace_entry_event)) {
1618 pr_warning("Warning: could not register graph trace events\n");
1619 return 1;
1620 }
1621
1622 if (!register_ftrace_event(&graph_trace_ret_event)) {
1623 pr_warning("Warning: could not register graph trace events\n");
1624 return 1;
1625 }
1626
1627 return register_tracer(&graph_trace);
1628}
1629
1630core_initcall(init_graph_trace);
1631