1
2
3
4
5
6
7
8
9
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/interrupt.h>
13#include <linux/slab.h>
14#include <linux/fs.h>
15
16#include "trace.h"
17#include "trace_output.h"
18
19static bool kill_ftrace_graph;
20
21
22
23
24
25
26
27
28bool ftrace_graph_is_dead(void)
29{
30 return kill_ftrace_graph;
31}
32
33
34
35
36
37
38
39
40
41void ftrace_graph_stop(void)
42{
43 kill_ftrace_graph = true;
44}
45
46
47static int ftrace_graph_skip_irqs;
48
49struct fgraph_cpu_data {
50 pid_t last_pid;
51 int depth;
52 int depth_irq;
53 int ignore;
54 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
55};
56
57struct fgraph_data {
58 struct fgraph_cpu_data __percpu *cpu_data;
59
60
61 struct ftrace_graph_ent_entry ent;
62 struct ftrace_graph_ret_entry ret;
63 int failed;
64 int cpu;
65};
66
67#define TRACE_GRAPH_INDENT 2
68
69unsigned int fgraph_max_depth;
70
71static struct tracer_opt trace_opts[] = {
72
73 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
74
75 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
76
77 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
78
79 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
80
81 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
82
83 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
84
85 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
86
87 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
88
89 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
90
91 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
92 { }
93};
94
95static struct tracer_flags tracer_flags = {
96
97 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
98 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
99 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
100 .opts = trace_opts
101};
102
103static struct trace_array *graph_array;
104
105
106
107
108
109
110enum {
111 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
113 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
114};
115
116static void
117print_graph_duration(struct trace_array *tr, unsigned long long duration,
118 struct trace_seq *s, u32 flags);
119
120
121int
122ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
123 unsigned long frame_pointer, unsigned long *retp)
124{
125 unsigned long long calltime;
126 int index;
127
128 if (unlikely(ftrace_graph_is_dead()))
129 return -EBUSY;
130
131 if (!current->ret_stack)
132 return -EBUSY;
133
134
135
136
137
138 smp_rmb();
139
140
141 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
142 atomic_inc(¤t->trace_overrun);
143 return -EBUSY;
144 }
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162 if (current->curr_ret_stack < -1)
163 return -EBUSY;
164
165 calltime = trace_clock_local();
166
167 index = ++current->curr_ret_stack;
168 if (ftrace_graph_notrace_addr(func))
169 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
170 barrier();
171 current->ret_stack[index].ret = ret;
172 current->ret_stack[index].func = func;
173 current->ret_stack[index].calltime = calltime;
174#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
175 current->ret_stack[index].fp = frame_pointer;
176#endif
177#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
178 current->ret_stack[index].retp = retp;
179#endif
180 *depth = current->curr_ret_stack;
181
182 return 0;
183}
184
185
186
187
188
189
190
191#ifndef MCOUNT_INSN_SIZE
192
193# ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
194# error MCOUNT_INSN_SIZE not defined with direct calls enabled
195# endif
196# define MCOUNT_INSN_SIZE 0
197#endif
198
199int function_graph_enter(unsigned long ret, unsigned long func,
200 unsigned long frame_pointer, unsigned long *retp)
201{
202 struct ftrace_graph_ent trace;
203
204
205
206
207
208
209
210 if (ftrace_direct_func_count &&
211 ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE))
212 return -EBUSY;
213 trace.func = func;
214 trace.depth = current->curr_ret_stack + 1;
215
216
217 if (!ftrace_graph_entry(&trace))
218 return -EBUSY;
219
220 return ftrace_push_return_trace(ret, func, &trace.depth,
221 frame_pointer, retp);
222}
223
224
225static void
226ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
227 unsigned long frame_pointer)
228{
229 int index;
230
231 index = current->curr_ret_stack;
232
233
234
235
236
237
238
239
240 if (index < 0)
241 index += FTRACE_NOTRACE_DEPTH;
242
243 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
244 ftrace_graph_stop();
245 WARN_ON(1);
246
247 *ret = (unsigned long)panic;
248 return;
249 }
250
251#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
267 ftrace_graph_stop();
268 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
269 " from func %ps return to %lx\n",
270 current->ret_stack[index].fp,
271 frame_pointer,
272 (void *)current->ret_stack[index].func,
273 current->ret_stack[index].ret);
274 *ret = (unsigned long)panic;
275 return;
276 }
277#endif
278
279 *ret = current->ret_stack[index].ret;
280 trace->func = current->ret_stack[index].func;
281 trace->calltime = current->ret_stack[index].calltime;
282 trace->overrun = atomic_read(¤t->trace_overrun);
283 trace->depth = index;
284}
285
286
287
288
289
290unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
291{
292 struct ftrace_graph_ret trace;
293 unsigned long ret;
294
295 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
296 trace.rettime = trace_clock_local();
297 barrier();
298 current->curr_ret_stack--;
299
300
301
302
303
304 if (current->curr_ret_stack < -1) {
305 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
306 return ret;
307 }
308
309
310
311
312
313
314 ftrace_graph_return(&trace);
315
316 if (unlikely(!ret)) {
317 ftrace_graph_stop();
318 WARN_ON(1);
319
320 ret = (unsigned long)panic;
321 }
322
323 return ret;
324}
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
342unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
343 unsigned long ret, unsigned long *retp)
344{
345 int index = task->curr_ret_stack;
346 int i;
347
348 if (ret != (unsigned long)return_to_handler)
349 return ret;
350
351 if (index < -1)
352 index += FTRACE_NOTRACE_DEPTH;
353
354 if (index < 0)
355 return ret;
356
357 for (i = 0; i <= index; i++)
358 if (task->ret_stack[i].retp == retp)
359 return task->ret_stack[i].ret;
360
361 return ret;
362}
363#else
364unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
365 unsigned long ret, unsigned long *retp)
366{
367 int task_idx;
368
369 if (ret != (unsigned long)return_to_handler)
370 return ret;
371
372 task_idx = task->curr_ret_stack;
373
374 if (!task->ret_stack || task_idx < *idx)
375 return ret;
376
377 task_idx -= *idx;
378 (*idx)++;
379
380 return task->ret_stack[task_idx].ret;
381}
382#endif
383
384int __trace_graph_entry(struct trace_array *tr,
385 struct ftrace_graph_ent *trace,
386 unsigned long flags,
387 int pc)
388{
389 struct trace_event_call *call = &event_funcgraph_entry;
390 struct ring_buffer_event *event;
391 struct ring_buffer *buffer = tr->trace_buffer.buffer;
392 struct ftrace_graph_ent_entry *entry;
393
394 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
395 sizeof(*entry), flags, pc);
396 if (!event)
397 return 0;
398 entry = ring_buffer_event_data(event);
399 entry->graph_ent = *trace;
400 if (!call_filter_check_discard(call, entry, buffer, event))
401 trace_buffer_unlock_commit_nostack(buffer, event);
402
403 return 1;
404}
405
406static inline int ftrace_graph_ignore_irqs(void)
407{
408 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
409 return 0;
410
411 return in_irq();
412}
413
414int trace_graph_entry(struct ftrace_graph_ent *trace)
415{
416 struct trace_array *tr = graph_array;
417 struct trace_array_cpu *data;
418 unsigned long flags;
419 long disabled;
420 int ret;
421 int cpu;
422 int pc;
423
424 if (!ftrace_trace_task(tr))
425 return 0;
426
427 if (ftrace_graph_ignore_func(trace))
428 return 0;
429
430 if (ftrace_graph_ignore_irqs())
431 return 0;
432
433
434
435
436
437
438
439
440 if (ftrace_graph_notrace_addr(trace->func))
441 return 1;
442
443
444
445
446
447 if (tracing_thresh)
448 return 1;
449
450 local_irq_save(flags);
451 cpu = raw_smp_processor_id();
452 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
453 disabled = atomic_inc_return(&data->disabled);
454 if (likely(disabled == 1)) {
455 pc = preempt_count();
456 ret = __trace_graph_entry(tr, trace, flags, pc);
457 } else {
458 ret = 0;
459 }
460
461 atomic_dec(&data->disabled);
462 local_irq_restore(flags);
463
464 return ret;
465}
466
467static void
468__trace_graph_function(struct trace_array *tr,
469 unsigned long ip, unsigned long flags, int pc)
470{
471 u64 time = trace_clock_local();
472 struct ftrace_graph_ent ent = {
473 .func = ip,
474 .depth = 0,
475 };
476 struct ftrace_graph_ret ret = {
477 .func = ip,
478 .depth = 0,
479 .calltime = time,
480 .rettime = time,
481 };
482
483 __trace_graph_entry(tr, &ent, flags, pc);
484 __trace_graph_return(tr, &ret, flags, pc);
485}
486
487void
488trace_graph_function(struct trace_array *tr,
489 unsigned long ip, unsigned long parent_ip,
490 unsigned long flags, int pc)
491{
492 __trace_graph_function(tr, ip, flags, pc);
493}
494
495void __trace_graph_return(struct trace_array *tr,
496 struct ftrace_graph_ret *trace,
497 unsigned long flags,
498 int pc)
499{
500 struct trace_event_call *call = &event_funcgraph_exit;
501 struct ring_buffer_event *event;
502 struct ring_buffer *buffer = tr->trace_buffer.buffer;
503 struct ftrace_graph_ret_entry *entry;
504
505 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
506 sizeof(*entry), flags, pc);
507 if (!event)
508 return;
509 entry = ring_buffer_event_data(event);
510 entry->ret = *trace;
511 if (!call_filter_check_discard(call, entry, buffer, event))
512 trace_buffer_unlock_commit_nostack(buffer, event);
513}
514
515void trace_graph_return(struct ftrace_graph_ret *trace)
516{
517 struct trace_array *tr = graph_array;
518 struct trace_array_cpu *data;
519 unsigned long flags;
520 long disabled;
521 int cpu;
522 int pc;
523
524 local_irq_save(flags);
525 cpu = raw_smp_processor_id();
526 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
527 disabled = atomic_inc_return(&data->disabled);
528 if (likely(disabled == 1)) {
529 pc = preempt_count();
530 __trace_graph_return(tr, trace, flags, pc);
531 }
532 atomic_dec(&data->disabled);
533 local_irq_restore(flags);
534}
535
536void set_graph_array(struct trace_array *tr)
537{
538 graph_array = tr;
539
540
541
542 smp_mb();
543}
544
545static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
546{
547 if (tracing_thresh &&
548 (trace->rettime - trace->calltime < tracing_thresh))
549 return;
550 else
551 trace_graph_return(trace);
552}
553
554static int graph_trace_init(struct trace_array *tr)
555{
556 int ret;
557
558 set_graph_array(tr);
559 if (tracing_thresh)
560 ret = register_ftrace_graph(&trace_graph_thresh_return,
561 &trace_graph_entry);
562 else
563 ret = register_ftrace_graph(&trace_graph_return,
564 &trace_graph_entry);
565 if (ret)
566 return ret;
567 tracing_start_cmdline_record();
568
569 return 0;
570}
571
572static void graph_trace_reset(struct trace_array *tr)
573{
574 tracing_stop_cmdline_record();
575 unregister_ftrace_graph();
576}
577
578static int graph_trace_update_thresh(struct trace_array *tr)
579{
580 graph_trace_reset(tr);
581 return graph_trace_init(tr);
582}
583
584static int max_bytes_for_cpu;
585
586static void print_graph_cpu(struct trace_seq *s, int cpu)
587{
588
589
590
591
592
593 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
594}
595
596#define TRACE_GRAPH_PROCINFO_LENGTH 14
597
598static void print_graph_proc(struct trace_seq *s, pid_t pid)
599{
600 char comm[TASK_COMM_LEN];
601
602 char pid_str[11];
603 int spaces = 0;
604 int len;
605 int i;
606
607 trace_find_cmdline(pid, comm);
608 comm[7] = '\0';
609 sprintf(pid_str, "%d", pid);
610
611
612 len = strlen(comm) + strlen(pid_str) + 1;
613
614 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
615 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
616
617
618 for (i = 0; i < spaces / 2; i++)
619 trace_seq_putc(s, ' ');
620
621 trace_seq_printf(s, "%s-%s", comm, pid_str);
622
623
624 for (i = 0; i < spaces - (spaces / 2); i++)
625 trace_seq_putc(s, ' ');
626}
627
628
629static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
630{
631 trace_seq_putc(s, ' ');
632 trace_print_lat_fmt(s, entry);
633}
634
635
636static void
637verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
638{
639 pid_t prev_pid;
640 pid_t *last_pid;
641
642 if (!data)
643 return;
644
645 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
646
647 if (*last_pid == pid)
648 return;
649
650 prev_pid = *last_pid;
651 *last_pid = pid;
652
653 if (prev_pid == -1)
654 return;
655
656
657
658
659
660
661
662
663 trace_seq_puts(s, " ------------------------------------------\n");
664 print_graph_cpu(s, cpu);
665 print_graph_proc(s, prev_pid);
666 trace_seq_puts(s, " => ");
667 print_graph_proc(s, pid);
668 trace_seq_puts(s, "\n ------------------------------------------\n\n");
669}
670
671static struct ftrace_graph_ret_entry *
672get_return_for_leaf(struct trace_iterator *iter,
673 struct ftrace_graph_ent_entry *curr)
674{
675 struct fgraph_data *data = iter->private;
676 struct ring_buffer_iter *ring_iter = NULL;
677 struct ring_buffer_event *event;
678 struct ftrace_graph_ret_entry *next;
679
680
681
682
683
684 if (data && data->failed) {
685 curr = &data->ent;
686 next = &data->ret;
687 } else {
688
689 ring_iter = trace_buffer_iter(iter, iter->cpu);
690
691
692 if (ring_iter)
693 event = ring_buffer_iter_peek(ring_iter, NULL);
694 else {
695
696
697
698
699 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
700 NULL, NULL);
701 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
702 NULL, NULL);
703 }
704
705 if (!event)
706 return NULL;
707
708 next = ring_buffer_event_data(event);
709
710 if (data) {
711
712
713
714
715 data->ent = *curr;
716
717
718
719
720
721 if (next->ent.type == TRACE_GRAPH_RET)
722 data->ret = *next;
723 else
724 data->ret.ent.type = next->ent.type;
725 }
726 }
727
728 if (next->ent.type != TRACE_GRAPH_RET)
729 return NULL;
730
731 if (curr->ent.pid != next->ent.pid ||
732 curr->graph_ent.func != next->ret.func)
733 return NULL;
734
735
736 if (ring_iter)
737 ring_buffer_read(ring_iter, NULL);
738
739 return next;
740}
741
742static void print_graph_abs_time(u64 t, struct trace_seq *s)
743{
744 unsigned long usecs_rem;
745
746 usecs_rem = do_div(t, NSEC_PER_SEC);
747 usecs_rem /= 1000;
748
749 trace_seq_printf(s, "%5lu.%06lu | ",
750 (unsigned long)t, usecs_rem);
751}
752
753static void
754print_graph_irq(struct trace_iterator *iter, unsigned long addr,
755 enum trace_type type, int cpu, pid_t pid, u32 flags)
756{
757 struct trace_array *tr = iter->tr;
758 struct trace_seq *s = &iter->seq;
759 struct trace_entry *ent = iter->ent;
760
761 if (addr < (unsigned long)__irqentry_text_start ||
762 addr >= (unsigned long)__irqentry_text_end)
763 return;
764
765 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
766
767 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
768 print_graph_abs_time(iter->ts, s);
769
770
771 if (flags & TRACE_GRAPH_PRINT_CPU)
772 print_graph_cpu(s, cpu);
773
774
775 if (flags & TRACE_GRAPH_PRINT_PROC) {
776 print_graph_proc(s, pid);
777 trace_seq_puts(s, " | ");
778 }
779
780
781 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
782 print_graph_lat_fmt(s, ent);
783 }
784
785
786 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
787
788 if (type == TRACE_GRAPH_ENT)
789 trace_seq_puts(s, "==========>");
790 else
791 trace_seq_puts(s, "<==========");
792
793 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
794 trace_seq_putc(s, '\n');
795}
796
797void
798trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
799{
800 unsigned long nsecs_rem = do_div(duration, 1000);
801
802 char usecs_str[21];
803 char nsecs_str[5];
804 int len;
805 int i;
806
807 sprintf(usecs_str, "%lu", (unsigned long) duration);
808
809
810 trace_seq_printf(s, "%s", usecs_str);
811
812 len = strlen(usecs_str);
813
814
815 if (len < 7) {
816 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
817
818 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
819 trace_seq_printf(s, ".%s", nsecs_str);
820 len += strlen(nsecs_str) + 1;
821 }
822
823 trace_seq_puts(s, " us ");
824
825
826 for (i = len; i < 8; i++)
827 trace_seq_putc(s, ' ');
828}
829
830static void
831print_graph_duration(struct trace_array *tr, unsigned long long duration,
832 struct trace_seq *s, u32 flags)
833{
834 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
835 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
836 return;
837
838
839 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
840 case FLAGS_FILL_FULL:
841 trace_seq_puts(s, " | ");
842 return;
843 case FLAGS_FILL_START:
844 trace_seq_puts(s, " ");
845 return;
846 case FLAGS_FILL_END:
847 trace_seq_puts(s, " |");
848 return;
849 }
850
851
852 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
853 trace_seq_printf(s, "%c ", trace_find_mark(duration));
854 else
855 trace_seq_puts(s, " ");
856
857 trace_print_graph_duration(duration, s);
858 trace_seq_puts(s, "| ");
859}
860
861
862static enum print_line_t
863print_graph_entry_leaf(struct trace_iterator *iter,
864 struct ftrace_graph_ent_entry *entry,
865 struct ftrace_graph_ret_entry *ret_entry,
866 struct trace_seq *s, u32 flags)
867{
868 struct fgraph_data *data = iter->private;
869 struct trace_array *tr = iter->tr;
870 struct ftrace_graph_ret *graph_ret;
871 struct ftrace_graph_ent *call;
872 unsigned long long duration;
873 int cpu = iter->cpu;
874 int i;
875
876 graph_ret = &ret_entry->ret;
877 call = &entry->graph_ent;
878 duration = graph_ret->rettime - graph_ret->calltime;
879
880 if (data) {
881 struct fgraph_cpu_data *cpu_data;
882
883 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
884
885
886 if (call->depth < -1)
887 call->depth += FTRACE_NOTRACE_DEPTH;
888
889
890
891
892
893
894 cpu_data->depth = call->depth - 1;
895
896
897 if (call->depth < FTRACE_RETFUNC_DEPTH &&
898 !WARN_ON_ONCE(call->depth < 0))
899 cpu_data->enter_funcs[call->depth] = 0;
900 }
901
902
903 print_graph_duration(tr, duration, s, flags);
904
905
906 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
907 trace_seq_putc(s, ' ');
908
909 trace_seq_printf(s, "%ps();\n", (void *)call->func);
910
911 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
912 cpu, iter->ent->pid, flags);
913
914 return trace_handle_return(s);
915}
916
917static enum print_line_t
918print_graph_entry_nested(struct trace_iterator *iter,
919 struct ftrace_graph_ent_entry *entry,
920 struct trace_seq *s, int cpu, u32 flags)
921{
922 struct ftrace_graph_ent *call = &entry->graph_ent;
923 struct fgraph_data *data = iter->private;
924 struct trace_array *tr = iter->tr;
925 int i;
926
927 if (data) {
928 struct fgraph_cpu_data *cpu_data;
929 int cpu = iter->cpu;
930
931
932 if (call->depth < -1)
933 call->depth += FTRACE_NOTRACE_DEPTH;
934
935 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
936 cpu_data->depth = call->depth;
937
938
939 if (call->depth < FTRACE_RETFUNC_DEPTH &&
940 !WARN_ON_ONCE(call->depth < 0))
941 cpu_data->enter_funcs[call->depth] = call->func;
942 }
943
944
945 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
946
947
948 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
949 trace_seq_putc(s, ' ');
950
951 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
952
953 if (trace_seq_has_overflowed(s))
954 return TRACE_TYPE_PARTIAL_LINE;
955
956
957
958
959
960 return TRACE_TYPE_NO_CONSUME;
961}
962
963static void
964print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
965 int type, unsigned long addr, u32 flags)
966{
967 struct fgraph_data *data = iter->private;
968 struct trace_entry *ent = iter->ent;
969 struct trace_array *tr = iter->tr;
970 int cpu = iter->cpu;
971
972
973 verif_pid(s, ent->pid, cpu, data);
974
975 if (type)
976
977 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
978
979 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
980 return;
981
982
983 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
984 print_graph_abs_time(iter->ts, s);
985
986
987 if (flags & TRACE_GRAPH_PRINT_CPU)
988 print_graph_cpu(s, cpu);
989
990
991 if (flags & TRACE_GRAPH_PRINT_PROC) {
992 print_graph_proc(s, ent->pid);
993 trace_seq_puts(s, " | ");
994 }
995
996
997 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
998 print_graph_lat_fmt(s, ent);
999
1000 return;
1001}
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014static int
1015check_irq_entry(struct trace_iterator *iter, u32 flags,
1016 unsigned long addr, int depth)
1017{
1018 int cpu = iter->cpu;
1019 int *depth_irq;
1020 struct fgraph_data *data = iter->private;
1021
1022
1023
1024
1025
1026
1027 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1028 (!data))
1029 return 0;
1030
1031 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1032
1033
1034
1035
1036 if (*depth_irq >= 0)
1037 return 1;
1038
1039 if ((addr < (unsigned long)__irqentry_text_start) ||
1040 (addr >= (unsigned long)__irqentry_text_end))
1041 return 0;
1042
1043
1044
1045
1046 *depth_irq = depth;
1047 return 1;
1048}
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061static int
1062check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1063{
1064 int cpu = iter->cpu;
1065 int *depth_irq;
1066 struct fgraph_data *data = iter->private;
1067
1068
1069
1070
1071
1072
1073 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1074 (!data))
1075 return 0;
1076
1077 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1078
1079
1080
1081
1082 if (*depth_irq == -1)
1083 return 0;
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094 if (*depth_irq >= depth) {
1095 *depth_irq = -1;
1096 return 1;
1097 }
1098
1099
1100
1101
1102 return 1;
1103}
1104
1105static enum print_line_t
1106print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1107 struct trace_iterator *iter, u32 flags)
1108{
1109 struct fgraph_data *data = iter->private;
1110 struct ftrace_graph_ent *call = &field->graph_ent;
1111 struct ftrace_graph_ret_entry *leaf_ret;
1112 static enum print_line_t ret;
1113 int cpu = iter->cpu;
1114
1115 if (check_irq_entry(iter, flags, call->func, call->depth))
1116 return TRACE_TYPE_HANDLED;
1117
1118 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1119
1120 leaf_ret = get_return_for_leaf(iter, field);
1121 if (leaf_ret)
1122 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1123 else
1124 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1125
1126 if (data) {
1127
1128
1129
1130
1131 if (s->full) {
1132 data->failed = 1;
1133 data->cpu = cpu;
1134 } else
1135 data->failed = 0;
1136 }
1137
1138 return ret;
1139}
1140
1141static enum print_line_t
1142print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1143 struct trace_entry *ent, struct trace_iterator *iter,
1144 u32 flags)
1145{
1146 unsigned long long duration = trace->rettime - trace->calltime;
1147 struct fgraph_data *data = iter->private;
1148 struct trace_array *tr = iter->tr;
1149 pid_t pid = ent->pid;
1150 int cpu = iter->cpu;
1151 int func_match = 1;
1152 int i;
1153
1154 if (check_irq_return(iter, flags, trace->depth))
1155 return TRACE_TYPE_HANDLED;
1156
1157 if (data) {
1158 struct fgraph_cpu_data *cpu_data;
1159 int cpu = iter->cpu;
1160
1161 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1162
1163
1164
1165
1166
1167
1168 cpu_data->depth = trace->depth - 1;
1169
1170 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1171 !WARN_ON_ONCE(trace->depth < 0)) {
1172 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1173 func_match = 0;
1174 cpu_data->enter_funcs[trace->depth] = 0;
1175 }
1176 }
1177
1178 print_graph_prologue(iter, s, 0, 0, flags);
1179
1180
1181 print_graph_duration(tr, duration, s, flags);
1182
1183
1184 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1185 trace_seq_putc(s, ' ');
1186
1187
1188
1189
1190
1191
1192
1193
1194 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1195 trace_seq_puts(s, "}\n");
1196 else
1197 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1198
1199
1200 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1201 trace_seq_printf(s, " (Overruns: %lu)\n",
1202 trace->overrun);
1203
1204 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1205 cpu, pid, flags);
1206
1207 return trace_handle_return(s);
1208}
1209
1210static enum print_line_t
1211print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1212 struct trace_iterator *iter, u32 flags)
1213{
1214 struct trace_array *tr = iter->tr;
1215 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1216 struct fgraph_data *data = iter->private;
1217 struct trace_event *event;
1218 int depth = 0;
1219 int ret;
1220 int i;
1221
1222 if (data)
1223 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1224
1225 print_graph_prologue(iter, s, 0, 0, flags);
1226
1227
1228 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1229
1230
1231 if (depth > 0)
1232 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1233 trace_seq_putc(s, ' ');
1234
1235
1236 trace_seq_puts(s, "/* ");
1237
1238 switch (iter->ent->type) {
1239 case TRACE_BPUTS:
1240 ret = trace_print_bputs_msg_only(iter);
1241 if (ret != TRACE_TYPE_HANDLED)
1242 return ret;
1243 break;
1244 case TRACE_BPRINT:
1245 ret = trace_print_bprintk_msg_only(iter);
1246 if (ret != TRACE_TYPE_HANDLED)
1247 return ret;
1248 break;
1249 case TRACE_PRINT:
1250 ret = trace_print_printk_msg_only(iter);
1251 if (ret != TRACE_TYPE_HANDLED)
1252 return ret;
1253 break;
1254 default:
1255 event = ftrace_find_event(ent->type);
1256 if (!event)
1257 return TRACE_TYPE_UNHANDLED;
1258
1259 ret = event->funcs->trace(iter, sym_flags, event);
1260 if (ret != TRACE_TYPE_HANDLED)
1261 return ret;
1262 }
1263
1264 if (trace_seq_has_overflowed(s))
1265 goto out;
1266
1267
1268 if (s->buffer[s->seq.len - 1] == '\n') {
1269 s->buffer[s->seq.len - 1] = '\0';
1270 s->seq.len--;
1271 }
1272
1273 trace_seq_puts(s, " */\n");
1274 out:
1275 return trace_handle_return(s);
1276}
1277
1278
1279enum print_line_t
1280print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1281{
1282 struct ftrace_graph_ent_entry *field;
1283 struct fgraph_data *data = iter->private;
1284 struct trace_entry *entry = iter->ent;
1285 struct trace_seq *s = &iter->seq;
1286 int cpu = iter->cpu;
1287 int ret;
1288
1289 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1290 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1291 return TRACE_TYPE_HANDLED;
1292 }
1293
1294
1295
1296
1297
1298 if (data && data->failed) {
1299 field = &data->ent;
1300 iter->cpu = data->cpu;
1301 ret = print_graph_entry(field, s, iter, flags);
1302 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1303 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1304 ret = TRACE_TYPE_NO_CONSUME;
1305 }
1306 iter->cpu = cpu;
1307 return ret;
1308 }
1309
1310 switch (entry->type) {
1311 case TRACE_GRAPH_ENT: {
1312
1313
1314
1315
1316
1317
1318 struct ftrace_graph_ent_entry saved;
1319 trace_assign_type(field, entry);
1320 saved = *field;
1321 return print_graph_entry(&saved, s, iter, flags);
1322 }
1323 case TRACE_GRAPH_RET: {
1324 struct ftrace_graph_ret_entry *field;
1325 trace_assign_type(field, entry);
1326 return print_graph_return(&field->ret, s, entry, iter, flags);
1327 }
1328 case TRACE_STACK:
1329 case TRACE_FN:
1330
1331 return TRACE_TYPE_UNHANDLED;
1332
1333 default:
1334 return print_graph_comment(s, entry, iter, flags);
1335 }
1336
1337 return TRACE_TYPE_HANDLED;
1338}
1339
1340static enum print_line_t
1341print_graph_function(struct trace_iterator *iter)
1342{
1343 return print_graph_function_flags(iter, tracer_flags.val);
1344}
1345
1346static enum print_line_t
1347print_graph_function_event(struct trace_iterator *iter, int flags,
1348 struct trace_event *event)
1349{
1350 return print_graph_function(iter);
1351}
1352
1353static void print_lat_header(struct seq_file *s, u32 flags)
1354{
1355 static const char spaces[] = " "
1356 " "
1357 " ";
1358 int size = 0;
1359
1360 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1361 size += 16;
1362 if (flags & TRACE_GRAPH_PRINT_CPU)
1363 size += 4;
1364 if (flags & TRACE_GRAPH_PRINT_PROC)
1365 size += 17;
1366
1367 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1368 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1369 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1370 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1371 seq_printf(s, "#%.*s||| / \n", size, spaces);
1372}
1373
1374static void __print_graph_headers_flags(struct trace_array *tr,
1375 struct seq_file *s, u32 flags)
1376{
1377 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1378
1379 if (lat)
1380 print_lat_header(s, flags);
1381
1382
1383 seq_putc(s, '#');
1384 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1385 seq_puts(s, " TIME ");
1386 if (flags & TRACE_GRAPH_PRINT_CPU)
1387 seq_puts(s, " CPU");
1388 if (flags & TRACE_GRAPH_PRINT_PROC)
1389 seq_puts(s, " TASK/PID ");
1390 if (lat)
1391 seq_puts(s, "||||");
1392 if (flags & TRACE_GRAPH_PRINT_DURATION)
1393 seq_puts(s, " DURATION ");
1394 seq_puts(s, " FUNCTION CALLS\n");
1395
1396
1397 seq_putc(s, '#');
1398 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1399 seq_puts(s, " | ");
1400 if (flags & TRACE_GRAPH_PRINT_CPU)
1401 seq_puts(s, " | ");
1402 if (flags & TRACE_GRAPH_PRINT_PROC)
1403 seq_puts(s, " | | ");
1404 if (lat)
1405 seq_puts(s, "||||");
1406 if (flags & TRACE_GRAPH_PRINT_DURATION)
1407 seq_puts(s, " | | ");
1408 seq_puts(s, " | | | |\n");
1409}
1410
1411static void print_graph_headers(struct seq_file *s)
1412{
1413 print_graph_headers_flags(s, tracer_flags.val);
1414}
1415
1416void print_graph_headers_flags(struct seq_file *s, u32 flags)
1417{
1418 struct trace_iterator *iter = s->private;
1419 struct trace_array *tr = iter->tr;
1420
1421 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1422 return;
1423
1424 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1425
1426 if (trace_empty(iter))
1427 return;
1428
1429 print_trace_header(s, iter);
1430 }
1431
1432 __print_graph_headers_flags(tr, s, flags);
1433}
1434
1435void graph_trace_open(struct trace_iterator *iter)
1436{
1437
1438 struct fgraph_data *data;
1439 gfp_t gfpflags;
1440 int cpu;
1441
1442 iter->private = NULL;
1443
1444
1445 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1446
1447 data = kzalloc(sizeof(*data), gfpflags);
1448 if (!data)
1449 goto out_err;
1450
1451 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1452 if (!data->cpu_data)
1453 goto out_err_free;
1454
1455 for_each_possible_cpu(cpu) {
1456 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1457 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1458 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1459 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1460
1461 *pid = -1;
1462 *depth = 0;
1463 *ignore = 0;
1464 *depth_irq = -1;
1465 }
1466
1467 iter->private = data;
1468
1469 return;
1470
1471 out_err_free:
1472 kfree(data);
1473 out_err:
1474 pr_warn("function graph tracer: not enough memory\n");
1475}
1476
1477void graph_trace_close(struct trace_iterator *iter)
1478{
1479 struct fgraph_data *data = iter->private;
1480
1481 if (data) {
1482 free_percpu(data->cpu_data);
1483 kfree(data);
1484 }
1485}
1486
1487static int
1488func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1489{
1490 if (bit == TRACE_GRAPH_PRINT_IRQS)
1491 ftrace_graph_skip_irqs = !set;
1492
1493 if (bit == TRACE_GRAPH_SLEEP_TIME)
1494 ftrace_graph_sleep_time_control(set);
1495
1496 if (bit == TRACE_GRAPH_GRAPH_TIME)
1497 ftrace_graph_graph_time_control(set);
1498
1499 return 0;
1500}
1501
1502static struct trace_event_functions graph_functions = {
1503 .trace = print_graph_function_event,
1504};
1505
1506static struct trace_event graph_trace_entry_event = {
1507 .type = TRACE_GRAPH_ENT,
1508 .funcs = &graph_functions,
1509};
1510
1511static struct trace_event graph_trace_ret_event = {
1512 .type = TRACE_GRAPH_RET,
1513 .funcs = &graph_functions
1514};
1515
1516static struct tracer graph_trace __tracer_data = {
1517 .name = "function_graph",
1518 .update_thresh = graph_trace_update_thresh,
1519 .open = graph_trace_open,
1520 .pipe_open = graph_trace_open,
1521 .close = graph_trace_close,
1522 .pipe_close = graph_trace_close,
1523 .init = graph_trace_init,
1524 .reset = graph_trace_reset,
1525 .print_line = print_graph_function,
1526 .print_header = print_graph_headers,
1527 .flags = &tracer_flags,
1528 .set_flag = func_graph_set_flag,
1529#ifdef CONFIG_FTRACE_SELFTEST
1530 .selftest = trace_selftest_startup_function_graph,
1531#endif
1532};
1533
1534
1535static ssize_t
1536graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1537 loff_t *ppos)
1538{
1539 unsigned long val;
1540 int ret;
1541
1542 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1543 if (ret)
1544 return ret;
1545
1546 fgraph_max_depth = val;
1547
1548 *ppos += cnt;
1549
1550 return cnt;
1551}
1552
1553static ssize_t
1554graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1555 loff_t *ppos)
1556{
1557 char buf[15];
1558 int n;
1559
1560 n = sprintf(buf, "%d\n", fgraph_max_depth);
1561
1562 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1563}
1564
1565static const struct file_operations graph_depth_fops = {
1566 .open = tracing_open_generic,
1567 .write = graph_depth_write,
1568 .read = graph_depth_read,
1569 .llseek = generic_file_llseek,
1570};
1571
1572static __init int init_graph_tracefs(void)
1573{
1574 struct dentry *d_tracer;
1575
1576 d_tracer = tracing_init_dentry();
1577 if (IS_ERR(d_tracer))
1578 return 0;
1579
1580 trace_create_file("max_graph_depth", 0644, d_tracer,
1581 NULL, &graph_depth_fops);
1582
1583 return 0;
1584}
1585fs_initcall(init_graph_tracefs);
1586
1587static __init int init_graph_trace(void)
1588{
1589 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1590
1591 if (!register_trace_event(&graph_trace_entry_event)) {
1592 pr_warn("Warning: could not register graph trace events\n");
1593 return 1;
1594 }
1595
1596 if (!register_trace_event(&graph_trace_ret_event)) {
1597 pr_warn("Warning: could not register graph trace events\n");
1598 return 1;
1599 }
1600
1601 return register_tracer(&graph_trace);
1602}
1603
1604core_initcall(init_graph_trace);
1605