1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/rbtree.h>
17#include <linux/list.h>
18#include <linux/log2.h>
19#include <linux/zalloc.h>
20#include <errno.h>
21#include <stdlib.h>
22#include "thread.h"
23#include "event.h"
24#include "machine.h"
25#include "env.h"
26#include "debug.h"
27#include "symbol.h"
28#include "comm.h"
29#include "call-path.h"
30#include "thread-stack.h"
31
32#define STACK_GROWTH 2048
33
34
35
36
37
38
39
40
41enum retpoline_state_t {
42 RETPOLINE_NONE,
43 X86_RETPOLINE_POSSIBLE,
44 X86_RETPOLINE_DETECTED,
45};
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61struct thread_stack_entry {
62 u64 ret_addr;
63 u64 timestamp;
64 u64 ref;
65 u64 branch_count;
66 u64 insn_count;
67 u64 cyc_count;
68 u64 db_id;
69 struct call_path *cp;
70 bool no_call;
71 bool trace_end;
72 bool non_call;
73};
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92struct thread_stack {
93 struct thread_stack_entry *stack;
94 size_t cnt;
95 size_t sz;
96 u64 trace_nr;
97 u64 branch_count;
98 u64 insn_count;
99 u64 cyc_count;
100 u64 kernel_start;
101 u64 last_time;
102 struct call_return_processor *crp;
103 struct comm *comm;
104 unsigned int arr_sz;
105 enum retpoline_state_t rstate;
106};
107
108
109
110
111
112
113static inline bool thread_stack__per_cpu(struct thread *thread)
114{
115 return !(thread->tid || thread->pid_);
116}
117
118static int thread_stack__grow(struct thread_stack *ts)
119{
120 struct thread_stack_entry *new_stack;
121 size_t sz, new_sz;
122
123 new_sz = ts->sz + STACK_GROWTH;
124 sz = new_sz * sizeof(struct thread_stack_entry);
125
126 new_stack = realloc(ts->stack, sz);
127 if (!new_stack)
128 return -ENOMEM;
129
130 ts->stack = new_stack;
131 ts->sz = new_sz;
132
133 return 0;
134}
135
136static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
137 struct call_return_processor *crp)
138{
139 int err;
140
141 err = thread_stack__grow(ts);
142 if (err)
143 return err;
144
145 if (thread->mg && thread->mg->machine) {
146 struct machine *machine = thread->mg->machine;
147 const char *arch = perf_env__arch(machine->env);
148
149 ts->kernel_start = machine__kernel_start(machine);
150 if (!strcmp(arch, "x86"))
151 ts->rstate = X86_RETPOLINE_POSSIBLE;
152 } else {
153 ts->kernel_start = 1ULL << 63;
154 }
155 ts->crp = crp;
156
157 return 0;
158}
159
160static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
161 struct call_return_processor *crp)
162{
163 struct thread_stack *ts = thread->ts, *new_ts;
164 unsigned int old_sz = ts ? ts->arr_sz : 0;
165 unsigned int new_sz = 1;
166
167 if (thread_stack__per_cpu(thread) && cpu > 0)
168 new_sz = roundup_pow_of_two(cpu + 1);
169
170 if (!ts || new_sz > old_sz) {
171 new_ts = calloc(new_sz, sizeof(*ts));
172 if (!new_ts)
173 return NULL;
174 if (ts)
175 memcpy(new_ts, ts, old_sz * sizeof(*ts));
176 new_ts->arr_sz = new_sz;
177 zfree(&thread->ts);
178 thread->ts = new_ts;
179 ts = new_ts;
180 }
181
182 if (thread_stack__per_cpu(thread) && cpu > 0 &&
183 (unsigned int)cpu < ts->arr_sz)
184 ts += cpu;
185
186 if (!ts->stack &&
187 thread_stack__init(ts, thread, crp))
188 return NULL;
189
190 return ts;
191}
192
193static struct thread_stack *thread__cpu_stack(struct thread *thread, int cpu)
194{
195 struct thread_stack *ts = thread->ts;
196
197 if (cpu < 0)
198 cpu = 0;
199
200 if (!ts || (unsigned int)cpu >= ts->arr_sz)
201 return NULL;
202
203 ts += cpu;
204
205 if (!ts->stack)
206 return NULL;
207
208 return ts;
209}
210
211static inline struct thread_stack *thread__stack(struct thread *thread,
212 int cpu)
213{
214 if (!thread)
215 return NULL;
216
217 if (thread_stack__per_cpu(thread))
218 return thread__cpu_stack(thread, cpu);
219
220 return thread->ts;
221}
222
223static int thread_stack__push(struct thread_stack *ts, u64 ret_addr,
224 bool trace_end)
225{
226 int err = 0;
227
228 if (ts->cnt == ts->sz) {
229 err = thread_stack__grow(ts);
230 if (err) {
231 pr_warning("Out of memory: discarding thread stack\n");
232 ts->cnt = 0;
233 }
234 }
235
236 ts->stack[ts->cnt].trace_end = trace_end;
237 ts->stack[ts->cnt++].ret_addr = ret_addr;
238
239 return err;
240}
241
242static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
243{
244 size_t i;
245
246
247
248
249
250
251
252
253
254
255 for (i = ts->cnt; i; ) {
256 if (ts->stack[--i].ret_addr == ret_addr) {
257 ts->cnt = i;
258 return;
259 }
260 }
261}
262
263static void thread_stack__pop_trace_end(struct thread_stack *ts)
264{
265 size_t i;
266
267 for (i = ts->cnt; i; ) {
268 if (ts->stack[--i].trace_end)
269 ts->cnt = i;
270 else
271 return;
272 }
273}
274
275static bool thread_stack__in_kernel(struct thread_stack *ts)
276{
277 if (!ts->cnt)
278 return false;
279
280 return ts->stack[ts->cnt - 1].cp->in_kernel;
281}
282
283static int thread_stack__call_return(struct thread *thread,
284 struct thread_stack *ts, size_t idx,
285 u64 timestamp, u64 ref, bool no_return)
286{
287 struct call_return_processor *crp = ts->crp;
288 struct thread_stack_entry *tse;
289 struct call_return cr = {
290 .thread = thread,
291 .comm = ts->comm,
292 .db_id = 0,
293 };
294 u64 *parent_db_id;
295
296 tse = &ts->stack[idx];
297 cr.cp = tse->cp;
298 cr.call_time = tse->timestamp;
299 cr.return_time = timestamp;
300 cr.branch_count = ts->branch_count - tse->branch_count;
301 cr.insn_count = ts->insn_count - tse->insn_count;
302 cr.cyc_count = ts->cyc_count - tse->cyc_count;
303 cr.db_id = tse->db_id;
304 cr.call_ref = tse->ref;
305 cr.return_ref = ref;
306 if (tse->no_call)
307 cr.flags |= CALL_RETURN_NO_CALL;
308 if (no_return)
309 cr.flags |= CALL_RETURN_NO_RETURN;
310 if (tse->non_call)
311 cr.flags |= CALL_RETURN_NON_CALL;
312
313
314
315
316
317
318 parent_db_id = idx ? &(tse - 1)->db_id : NULL;
319
320 return crp->process(&cr, parent_db_id, crp->data);
321}
322
323static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
324{
325 struct call_return_processor *crp = ts->crp;
326 int err;
327
328 if (!crp) {
329 ts->cnt = 0;
330 return 0;
331 }
332
333 while (ts->cnt) {
334 err = thread_stack__call_return(thread, ts, --ts->cnt,
335 ts->last_time, 0, true);
336 if (err) {
337 pr_err("Error flushing thread stack!\n");
338 ts->cnt = 0;
339 return err;
340 }
341 }
342
343 return 0;
344}
345
346int thread_stack__flush(struct thread *thread)
347{
348 struct thread_stack *ts = thread->ts;
349 unsigned int pos;
350 int err = 0;
351
352 if (ts) {
353 for (pos = 0; pos < ts->arr_sz; pos++) {
354 int ret = __thread_stack__flush(thread, ts + pos);
355
356 if (ret)
357 err = ret;
358 }
359 }
360
361 return err;
362}
363
364int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
365 u64 to_ip, u16 insn_len, u64 trace_nr)
366{
367 struct thread_stack *ts = thread__stack(thread, cpu);
368
369 if (!thread)
370 return -EINVAL;
371
372 if (!ts) {
373 ts = thread_stack__new(thread, cpu, NULL);
374 if (!ts) {
375 pr_warning("Out of memory: no thread stack\n");
376 return -ENOMEM;
377 }
378 ts->trace_nr = trace_nr;
379 }
380
381
382
383
384
385
386 if (trace_nr != ts->trace_nr) {
387 if (ts->trace_nr)
388 __thread_stack__flush(thread, ts);
389 ts->trace_nr = trace_nr;
390 }
391
392
393 if (ts->crp)
394 return 0;
395
396 if (flags & PERF_IP_FLAG_CALL) {
397 u64 ret_addr;
398
399 if (!to_ip)
400 return 0;
401 ret_addr = from_ip + insn_len;
402 if (ret_addr == to_ip)
403 return 0;
404 return thread_stack__push(ts, ret_addr,
405 flags & PERF_IP_FLAG_TRACE_END);
406 } else if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
407
408
409
410
411
412
413
414 thread_stack__pop(ts, to_ip);
415 thread_stack__pop_trace_end(ts);
416 } else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) {
417 thread_stack__pop(ts, to_ip);
418 }
419
420 return 0;
421}
422
423void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr)
424{
425 struct thread_stack *ts = thread__stack(thread, cpu);
426
427 if (!ts)
428 return;
429
430 if (trace_nr != ts->trace_nr) {
431 if (ts->trace_nr)
432 __thread_stack__flush(thread, ts);
433 ts->trace_nr = trace_nr;
434 }
435}
436
437static void __thread_stack__free(struct thread *thread, struct thread_stack *ts)
438{
439 __thread_stack__flush(thread, ts);
440 zfree(&ts->stack);
441}
442
443static void thread_stack__reset(struct thread *thread, struct thread_stack *ts)
444{
445 unsigned int arr_sz = ts->arr_sz;
446
447 __thread_stack__free(thread, ts);
448 memset(ts, 0, sizeof(*ts));
449 ts->arr_sz = arr_sz;
450}
451
452void thread_stack__free(struct thread *thread)
453{
454 struct thread_stack *ts = thread->ts;
455 unsigned int pos;
456
457 if (ts) {
458 for (pos = 0; pos < ts->arr_sz; pos++)
459 __thread_stack__free(thread, ts + pos);
460 zfree(&thread->ts);
461 }
462}
463
464static inline u64 callchain_context(u64 ip, u64 kernel_start)
465{
466 return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL;
467}
468
469void thread_stack__sample(struct thread *thread, int cpu,
470 struct ip_callchain *chain,
471 size_t sz, u64 ip, u64 kernel_start)
472{
473 struct thread_stack *ts = thread__stack(thread, cpu);
474 u64 context = callchain_context(ip, kernel_start);
475 u64 last_context;
476 size_t i, j;
477
478 if (sz < 2) {
479 chain->nr = 0;
480 return;
481 }
482
483 chain->ips[0] = context;
484 chain->ips[1] = ip;
485
486 if (!ts) {
487 chain->nr = 2;
488 return;
489 }
490
491 last_context = context;
492
493 for (i = 2, j = 1; i < sz && j <= ts->cnt; i++, j++) {
494 ip = ts->stack[ts->cnt - j].ret_addr;
495 context = callchain_context(ip, kernel_start);
496 if (context != last_context) {
497 if (i >= sz - 1)
498 break;
499 chain->ips[i++] = context;
500 last_context = context;
501 }
502 chain->ips[i] = ip;
503 }
504
505 chain->nr = i;
506}
507
508struct call_return_processor *
509call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
510 void *data)
511{
512 struct call_return_processor *crp;
513
514 crp = zalloc(sizeof(struct call_return_processor));
515 if (!crp)
516 return NULL;
517 crp->cpr = call_path_root__new();
518 if (!crp->cpr)
519 goto out_free;
520 crp->process = process;
521 crp->data = data;
522 return crp;
523
524out_free:
525 free(crp);
526 return NULL;
527}
528
529void call_return_processor__free(struct call_return_processor *crp)
530{
531 if (crp) {
532 call_path_root__free(crp->cpr);
533 free(crp);
534 }
535}
536
537static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
538 u64 timestamp, u64 ref, struct call_path *cp,
539 bool no_call, bool trace_end)
540{
541 struct thread_stack_entry *tse;
542 int err;
543
544 if (!cp)
545 return -ENOMEM;
546
547 if (ts->cnt == ts->sz) {
548 err = thread_stack__grow(ts);
549 if (err)
550 return err;
551 }
552
553 tse = &ts->stack[ts->cnt++];
554 tse->ret_addr = ret_addr;
555 tse->timestamp = timestamp;
556 tse->ref = ref;
557 tse->branch_count = ts->branch_count;
558 tse->insn_count = ts->insn_count;
559 tse->cyc_count = ts->cyc_count;
560 tse->cp = cp;
561 tse->no_call = no_call;
562 tse->trace_end = trace_end;
563 tse->non_call = false;
564 tse->db_id = 0;
565
566 return 0;
567}
568
569static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
570 u64 ret_addr, u64 timestamp, u64 ref,
571 struct symbol *sym)
572{
573 int err;
574
575 if (!ts->cnt)
576 return 1;
577
578 if (ts->cnt == 1) {
579 struct thread_stack_entry *tse = &ts->stack[0];
580
581 if (tse->cp->sym == sym)
582 return thread_stack__call_return(thread, ts, --ts->cnt,
583 timestamp, ref, false);
584 }
585
586 if (ts->stack[ts->cnt - 1].ret_addr == ret_addr &&
587 !ts->stack[ts->cnt - 1].non_call) {
588 return thread_stack__call_return(thread, ts, --ts->cnt,
589 timestamp, ref, false);
590 } else {
591 size_t i = ts->cnt - 1;
592
593 while (i--) {
594 if (ts->stack[i].ret_addr != ret_addr ||
595 ts->stack[i].non_call)
596 continue;
597 i += 1;
598 while (ts->cnt > i) {
599 err = thread_stack__call_return(thread, ts,
600 --ts->cnt,
601 timestamp, ref,
602 true);
603 if (err)
604 return err;
605 }
606 return thread_stack__call_return(thread, ts, --ts->cnt,
607 timestamp, ref, false);
608 }
609 }
610
611 return 1;
612}
613
614static int thread_stack__bottom(struct thread_stack *ts,
615 struct perf_sample *sample,
616 struct addr_location *from_al,
617 struct addr_location *to_al, u64 ref)
618{
619 struct call_path_root *cpr = ts->crp->cpr;
620 struct call_path *cp;
621 struct symbol *sym;
622 u64 ip;
623
624 if (sample->ip) {
625 ip = sample->ip;
626 sym = from_al->sym;
627 } else if (sample->addr) {
628 ip = sample->addr;
629 sym = to_al->sym;
630 } else {
631 return 0;
632 }
633
634 cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
635 ts->kernel_start);
636
637 return thread_stack__push_cp(ts, ip, sample->time, ref, cp,
638 true, false);
639}
640
641static int thread_stack__pop_ks(struct thread *thread, struct thread_stack *ts,
642 struct perf_sample *sample, u64 ref)
643{
644 u64 tm = sample->time;
645 int err;
646
647
648 while (thread_stack__in_kernel(ts)) {
649 err = thread_stack__call_return(thread, ts, --ts->cnt,
650 tm, ref, true);
651 if (err)
652 return err;
653 }
654
655 return 0;
656}
657
658static int thread_stack__no_call_return(struct thread *thread,
659 struct thread_stack *ts,
660 struct perf_sample *sample,
661 struct addr_location *from_al,
662 struct addr_location *to_al, u64 ref)
663{
664 struct call_path_root *cpr = ts->crp->cpr;
665 struct call_path *root = &cpr->call_path;
666 struct symbol *fsym = from_al->sym;
667 struct symbol *tsym = to_al->sym;
668 struct call_path *cp, *parent;
669 u64 ks = ts->kernel_start;
670 u64 addr = sample->addr;
671 u64 tm = sample->time;
672 u64 ip = sample->ip;
673 int err;
674
675 if (ip >= ks && addr < ks) {
676
677 err = thread_stack__pop_ks(thread, ts, sample, ref);
678 if (err)
679 return err;
680
681
682 if (!ts->cnt) {
683 cp = call_path__findnew(cpr, root, tsym, addr, ks);
684 return thread_stack__push_cp(ts, 0, tm, ref, cp, true,
685 false);
686 }
687 } else if (thread_stack__in_kernel(ts) && ip < ks) {
688
689 err = thread_stack__pop_ks(thread, ts, sample, ref);
690 if (err)
691 return err;
692 }
693
694 if (ts->cnt)
695 parent = ts->stack[ts->cnt - 1].cp;
696 else
697 parent = root;
698
699 if (parent->sym == from_al->sym) {
700
701
702
703
704
705 if (ts->cnt == 1) {
706 err = thread_stack__call_return(thread, ts, --ts->cnt,
707 tm, ref, false);
708 if (err)
709 return err;
710 }
711
712 if (!ts->cnt) {
713 cp = call_path__findnew(cpr, root, tsym, addr, ks);
714
715 return thread_stack__push_cp(ts, addr, tm, ref, cp,
716 true, false);
717 }
718
719
720
721
722
723 cp = call_path__findnew(cpr, parent, tsym, addr, ks);
724
725 err = thread_stack__push_cp(ts, 0, tm, ref, cp, true, false);
726 if (!err)
727 ts->stack[ts->cnt - 1].non_call = true;
728
729 return err;
730 }
731
732
733
734
735
736
737 cp = call_path__findnew(cpr, parent, tsym, addr, ks);
738
739 err = thread_stack__push_cp(ts, addr, tm, ref, cp, true, false);
740 if (err)
741 return err;
742
743 cp = call_path__findnew(cpr, cp, fsym, ip, ks);
744
745 err = thread_stack__push_cp(ts, ip, tm, ref, cp, true, false);
746 if (err)
747 return err;
748
749 return thread_stack__call_return(thread, ts, --ts->cnt, tm, ref, false);
750}
751
752static int thread_stack__trace_begin(struct thread *thread,
753 struct thread_stack *ts, u64 timestamp,
754 u64 ref)
755{
756 struct thread_stack_entry *tse;
757 int err;
758
759 if (!ts->cnt)
760 return 0;
761
762
763 tse = &ts->stack[ts->cnt - 1];
764 if (tse->trace_end) {
765 err = thread_stack__call_return(thread, ts, --ts->cnt,
766 timestamp, ref, false);
767 if (err)
768 return err;
769 }
770
771 return 0;
772}
773
774static int thread_stack__trace_end(struct thread_stack *ts,
775 struct perf_sample *sample, u64 ref)
776{
777 struct call_path_root *cpr = ts->crp->cpr;
778 struct call_path *cp;
779 u64 ret_addr;
780
781
782 if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
783 return 0;
784
785 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
786 ts->kernel_start);
787
788 ret_addr = sample->ip + sample->insn_len;
789
790 return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
791 false, true);
792}
793
794static bool is_x86_retpoline(const char *name)
795{
796 const char *p = strstr(name, "__x86_indirect_thunk_");
797
798 return p == name || !strcmp(name, "__indirect_thunk_start");
799}
800
801
802
803
804
805
806static int thread_stack__x86_retpoline(struct thread_stack *ts,
807 struct perf_sample *sample,
808 struct addr_location *to_al)
809{
810 struct thread_stack_entry *tse = &ts->stack[ts->cnt - 1];
811 struct call_path_root *cpr = ts->crp->cpr;
812 struct symbol *sym = tse->cp->sym;
813 struct symbol *tsym = to_al->sym;
814 struct call_path *cp;
815
816 if (sym && is_x86_retpoline(sym->name)) {
817
818
819
820
821
822
823
824
825
826
827 ts->cnt -= 1;
828 sym = ts->stack[ts->cnt - 2].cp->sym;
829 if (sym && sym == tsym && to_al->addr != tsym->start) {
830
831
832
833
834
835 ts->cnt -= 1;
836 return 0;
837 }
838 } else if (sym && sym == tsym) {
839
840
841
842
843 ts->cnt -= 1;
844 return 0;
845 }
846
847 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 2].cp, tsym,
848 sample->addr, ts->kernel_start);
849 if (!cp)
850 return -ENOMEM;
851
852
853 ts->stack[ts->cnt - 1].cp = cp;
854
855 return 0;
856}
857
858int thread_stack__process(struct thread *thread, struct comm *comm,
859 struct perf_sample *sample,
860 struct addr_location *from_al,
861 struct addr_location *to_al, u64 ref,
862 struct call_return_processor *crp)
863{
864 struct thread_stack *ts = thread__stack(thread, sample->cpu);
865 enum retpoline_state_t rstate;
866 int err = 0;
867
868 if (ts && !ts->crp) {
869
870 thread_stack__reset(thread, ts);
871 ts = NULL;
872 }
873
874 if (!ts) {
875 ts = thread_stack__new(thread, sample->cpu, crp);
876 if (!ts)
877 return -ENOMEM;
878 ts->comm = comm;
879 }
880
881 rstate = ts->rstate;
882 if (rstate == X86_RETPOLINE_DETECTED)
883 ts->rstate = X86_RETPOLINE_POSSIBLE;
884
885
886 if (ts->comm != comm && thread->pid_ == thread->tid) {
887 err = __thread_stack__flush(thread, ts);
888 if (err)
889 return err;
890 ts->comm = comm;
891 }
892
893
894 if (!ts->cnt) {
895 err = thread_stack__bottom(ts, sample, from_al, to_al, ref);
896 if (err)
897 return err;
898 }
899
900 ts->branch_count += 1;
901 ts->insn_count += sample->insn_cnt;
902 ts->cyc_count += sample->cyc_cnt;
903 ts->last_time = sample->time;
904
905 if (sample->flags & PERF_IP_FLAG_CALL) {
906 bool trace_end = sample->flags & PERF_IP_FLAG_TRACE_END;
907 struct call_path_root *cpr = ts->crp->cpr;
908 struct call_path *cp;
909 u64 ret_addr;
910
911 if (!sample->ip || !sample->addr)
912 return 0;
913
914 ret_addr = sample->ip + sample->insn_len;
915 if (ret_addr == sample->addr)
916 return 0;
917
918 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
919 to_al->sym, sample->addr,
920 ts->kernel_start);
921 err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
922 cp, false, trace_end);
923
924
925
926
927
928 if (!err && rstate == X86_RETPOLINE_POSSIBLE && to_al->sym &&
929 from_al->sym == to_al->sym &&
930 to_al->addr != to_al->sym->start)
931 ts->rstate = X86_RETPOLINE_DETECTED;
932
933 } else if (sample->flags & PERF_IP_FLAG_RETURN) {
934 if (!sample->addr) {
935 u32 return_from_kernel = PERF_IP_FLAG_SYSCALLRET |
936 PERF_IP_FLAG_INTERRUPT;
937
938 if (!(sample->flags & return_from_kernel))
939 return 0;
940
941
942 return thread_stack__pop_ks(thread, ts, sample, ref);
943 }
944
945 if (!sample->ip)
946 return 0;
947
948
949 if (rstate == X86_RETPOLINE_DETECTED && ts->cnt > 2 &&
950 ts->stack[ts->cnt - 1].ret_addr != sample->addr)
951 return thread_stack__x86_retpoline(ts, sample, to_al);
952
953 err = thread_stack__pop_cp(thread, ts, sample->addr,
954 sample->time, ref, from_al->sym);
955 if (err) {
956 if (err < 0)
957 return err;
958 err = thread_stack__no_call_return(thread, ts, sample,
959 from_al, to_al, ref);
960 }
961 } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
962 err = thread_stack__trace_begin(thread, ts, sample->time, ref);
963 } else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
964 err = thread_stack__trace_end(ts, sample, ref);
965 } else if (sample->flags & PERF_IP_FLAG_BRANCH &&
966 from_al->sym != to_al->sym && to_al->sym &&
967 to_al->addr == to_al->sym->start) {
968 struct call_path_root *cpr = ts->crp->cpr;
969 struct call_path *cp;
970
971
972
973
974
975
976
977 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
978 to_al->sym, sample->addr,
979 ts->kernel_start);
980 err = thread_stack__push_cp(ts, 0, sample->time, ref, cp, false,
981 false);
982 if (!err)
983 ts->stack[ts->cnt - 1].non_call = true;
984 }
985
986 return err;
987}
988
989size_t thread_stack__depth(struct thread *thread, int cpu)
990{
991 struct thread_stack *ts = thread__stack(thread, cpu);
992
993 if (!ts)
994 return 0;
995 return ts->cnt;
996}
997