1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <inttypes.h>
17#include <sys/types.h>
18#include <sys/mman.h>
19#include <stdbool.h>
20#include <string.h>
21#include <limits.h>
22#include <errno.h>
23
24#include <linux/kernel.h>
25#include <linux/perf_event.h>
26#include <linux/types.h>
27#include <linux/bitops.h>
28#include <linux/log2.h>
29#include <linux/string.h>
30#include <linux/time64.h>
31
32#include <sys/param.h>
33#include <stdlib.h>
34#include <stdio.h>
35#include <linux/list.h>
36#include <linux/zalloc.h>
37
38#include "evlist.h"
39#include "dso.h"
40#include "map.h"
41#include "pmu.h"
42#include "evsel.h"
43#include "evsel_config.h"
44#include "symbol.h"
45#include "util/perf_api_probe.h"
46#include "util/synthetic-events.h"
47#include "thread_map.h"
48#include "asm/bug.h"
49#include "auxtrace.h"
50
51#include <linux/hash.h>
52
53#include "event.h"
54#include "record.h"
55#include "session.h"
56#include "debug.h"
57#include <subcmd/parse-options.h>
58
59#include "cs-etm.h"
60#include "intel-pt.h"
61#include "intel-bts.h"
62#include "arm-spe.h"
63#include "s390-cpumsf.h"
64#include "util/mmap.h"
65
66#include <linux/ctype.h>
67#include "symbol/kallsyms.h"
68#include <internal/lib.h>
69
70
71
72
73
74static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct evsel *last)
75{
76 struct evsel *evsel;
77 bool grp;
78
79 if (!evsel__is_group_leader(leader))
80 return -EINVAL;
81
82 grp = false;
83 evlist__for_each_entry(evlist, evsel) {
84 if (grp) {
85 if (!(evsel__leader(evsel) == leader ||
86 (evsel__leader(evsel) == evsel &&
87 evsel->core.nr_members <= 1)))
88 return -EINVAL;
89 } else if (evsel == leader) {
90 grp = true;
91 }
92 if (evsel == last)
93 break;
94 }
95
96 grp = false;
97 evlist__for_each_entry(evlist, evsel) {
98 if (grp) {
99 if (!evsel__has_leader(evsel, leader)) {
100 evsel__set_leader(evsel, leader);
101 if (leader->core.nr_members < 1)
102 leader->core.nr_members = 1;
103 leader->core.nr_members += 1;
104 }
105 } else if (evsel == leader) {
106 grp = true;
107 }
108 if (evsel == last)
109 break;
110 }
111
112 return 0;
113}
114
115static bool auxtrace__dont_decode(struct perf_session *session)
116{
117 return !session->itrace_synth_opts ||
118 session->itrace_synth_opts->dont_decode;
119}
120
121int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
122 struct auxtrace_mmap_params *mp,
123 void *userpg, int fd)
124{
125 struct perf_event_mmap_page *pc = userpg;
126
127 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
128
129 mm->userpg = userpg;
130 mm->mask = mp->mask;
131 mm->len = mp->len;
132 mm->prev = 0;
133 mm->idx = mp->idx;
134 mm->tid = mp->tid;
135 mm->cpu = mp->cpu;
136
137 if (!mp->len) {
138 mm->base = NULL;
139 return 0;
140 }
141
142#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
143 pr_err("Cannot use AUX area tracing mmaps\n");
144 return -1;
145#endif
146
147 pc->aux_offset = mp->offset;
148 pc->aux_size = mp->len;
149
150 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
151 if (mm->base == MAP_FAILED) {
152 pr_debug2("failed to mmap AUX area\n");
153 mm->base = NULL;
154 return -1;
155 }
156
157 return 0;
158}
159
160void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
161{
162 if (mm->base) {
163 munmap(mm->base, mm->len);
164 mm->base = NULL;
165 }
166}
167
168void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
169 off_t auxtrace_offset,
170 unsigned int auxtrace_pages,
171 bool auxtrace_overwrite)
172{
173 if (auxtrace_pages) {
174 mp->offset = auxtrace_offset;
175 mp->len = auxtrace_pages * (size_t)page_size;
176 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
177 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
178 pr_debug2("AUX area mmap length %zu\n", mp->len);
179 } else {
180 mp->len = 0;
181 }
182}
183
184void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
185 struct evlist *evlist, int idx,
186 bool per_cpu)
187{
188 mp->idx = idx;
189
190 if (per_cpu) {
191 mp->cpu = evlist->core.cpus->map[idx];
192 if (evlist->core.threads)
193 mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
194 else
195 mp->tid = -1;
196 } else {
197 mp->cpu = -1;
198 mp->tid = perf_thread_map__pid(evlist->core.threads, idx);
199 }
200}
201
202#define AUXTRACE_INIT_NR_QUEUES 32
203
204static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
205{
206 struct auxtrace_queue *queue_array;
207 unsigned int max_nr_queues, i;
208
209 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
210 if (nr_queues > max_nr_queues)
211 return NULL;
212
213 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
214 if (!queue_array)
215 return NULL;
216
217 for (i = 0; i < nr_queues; i++) {
218 INIT_LIST_HEAD(&queue_array[i].head);
219 queue_array[i].priv = NULL;
220 }
221
222 return queue_array;
223}
224
225int auxtrace_queues__init(struct auxtrace_queues *queues)
226{
227 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
228 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
229 if (!queues->queue_array)
230 return -ENOMEM;
231 return 0;
232}
233
234static int auxtrace_queues__grow(struct auxtrace_queues *queues,
235 unsigned int new_nr_queues)
236{
237 unsigned int nr_queues = queues->nr_queues;
238 struct auxtrace_queue *queue_array;
239 unsigned int i;
240
241 if (!nr_queues)
242 nr_queues = AUXTRACE_INIT_NR_QUEUES;
243
244 while (nr_queues && nr_queues < new_nr_queues)
245 nr_queues <<= 1;
246
247 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
248 return -EINVAL;
249
250 queue_array = auxtrace_alloc_queue_array(nr_queues);
251 if (!queue_array)
252 return -ENOMEM;
253
254 for (i = 0; i < queues->nr_queues; i++) {
255 list_splice_tail(&queues->queue_array[i].head,
256 &queue_array[i].head);
257 queue_array[i].tid = queues->queue_array[i].tid;
258 queue_array[i].cpu = queues->queue_array[i].cpu;
259 queue_array[i].set = queues->queue_array[i].set;
260 queue_array[i].priv = queues->queue_array[i].priv;
261 }
262
263 queues->nr_queues = nr_queues;
264 queues->queue_array = queue_array;
265
266 return 0;
267}
268
269static void *auxtrace_copy_data(u64 size, struct perf_session *session)
270{
271 int fd = perf_data__fd(session->data);
272 void *p;
273 ssize_t ret;
274
275 if (size > SSIZE_MAX)
276 return NULL;
277
278 p = malloc(size);
279 if (!p)
280 return NULL;
281
282 ret = readn(fd, p, size);
283 if (ret != (ssize_t)size) {
284 free(p);
285 return NULL;
286 }
287
288 return p;
289}
290
291static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
292 unsigned int idx,
293 struct auxtrace_buffer *buffer)
294{
295 struct auxtrace_queue *queue;
296 int err;
297
298 if (idx >= queues->nr_queues) {
299 err = auxtrace_queues__grow(queues, idx + 1);
300 if (err)
301 return err;
302 }
303
304 queue = &queues->queue_array[idx];
305
306 if (!queue->set) {
307 queue->set = true;
308 queue->tid = buffer->tid;
309 queue->cpu = buffer->cpu;
310 }
311
312 buffer->buffer_nr = queues->next_buffer_nr++;
313
314 list_add_tail(&buffer->list, &queue->head);
315
316 queues->new_data = true;
317 queues->populated = true;
318
319 return 0;
320}
321
322
323#define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
324
325static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
326 unsigned int idx,
327 struct auxtrace_buffer *buffer)
328{
329 u64 sz = buffer->size;
330 bool consecutive = false;
331 struct auxtrace_buffer *b;
332 int err;
333
334 while (sz > BUFFER_LIMIT_FOR_32_BIT) {
335 b = memdup(buffer, sizeof(struct auxtrace_buffer));
336 if (!b)
337 return -ENOMEM;
338 b->size = BUFFER_LIMIT_FOR_32_BIT;
339 b->consecutive = consecutive;
340 err = auxtrace_queues__queue_buffer(queues, idx, b);
341 if (err) {
342 auxtrace_buffer__free(b);
343 return err;
344 }
345 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
346 sz -= BUFFER_LIMIT_FOR_32_BIT;
347 consecutive = true;
348 }
349
350 buffer->size = sz;
351 buffer->consecutive = consecutive;
352
353 return 0;
354}
355
356static bool filter_cpu(struct perf_session *session, int cpu)
357{
358 unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
359
360 return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
361}
362
363static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
364 struct perf_session *session,
365 unsigned int idx,
366 struct auxtrace_buffer *buffer,
367 struct auxtrace_buffer **buffer_ptr)
368{
369 int err = -ENOMEM;
370
371 if (filter_cpu(session, buffer->cpu))
372 return 0;
373
374 buffer = memdup(buffer, sizeof(*buffer));
375 if (!buffer)
376 return -ENOMEM;
377
378 if (session->one_mmap) {
379 buffer->data = buffer->data_offset - session->one_mmap_offset +
380 session->one_mmap_addr;
381 } else if (perf_data__is_pipe(session->data)) {
382 buffer->data = auxtrace_copy_data(buffer->size, session);
383 if (!buffer->data)
384 goto out_free;
385 buffer->data_needs_freeing = true;
386 } else if (BITS_PER_LONG == 32 &&
387 buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
388 err = auxtrace_queues__split_buffer(queues, idx, buffer);
389 if (err)
390 goto out_free;
391 }
392
393 err = auxtrace_queues__queue_buffer(queues, idx, buffer);
394 if (err)
395 goto out_free;
396
397
398 if (buffer_ptr)
399 *buffer_ptr = buffer;
400
401 return 0;
402
403out_free:
404 auxtrace_buffer__free(buffer);
405 return err;
406}
407
408int auxtrace_queues__add_event(struct auxtrace_queues *queues,
409 struct perf_session *session,
410 union perf_event *event, off_t data_offset,
411 struct auxtrace_buffer **buffer_ptr)
412{
413 struct auxtrace_buffer buffer = {
414 .pid = -1,
415 .tid = event->auxtrace.tid,
416 .cpu = event->auxtrace.cpu,
417 .data_offset = data_offset,
418 .offset = event->auxtrace.offset,
419 .reference = event->auxtrace.reference,
420 .size = event->auxtrace.size,
421 };
422 unsigned int idx = event->auxtrace.idx;
423
424 return auxtrace_queues__add_buffer(queues, session, idx, &buffer,
425 buffer_ptr);
426}
427
428static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
429 struct perf_session *session,
430 off_t file_offset, size_t sz)
431{
432 union perf_event *event;
433 int err;
434 char buf[PERF_SAMPLE_MAX_SIZE];
435
436 err = perf_session__peek_event(session, file_offset, buf,
437 PERF_SAMPLE_MAX_SIZE, &event, NULL);
438 if (err)
439 return err;
440
441 if (event->header.type == PERF_RECORD_AUXTRACE) {
442 if (event->header.size < sizeof(struct perf_record_auxtrace) ||
443 event->header.size != sz) {
444 err = -EINVAL;
445 goto out;
446 }
447 file_offset += event->header.size;
448 err = auxtrace_queues__add_event(queues, session, event,
449 file_offset, NULL);
450 }
451out:
452 return err;
453}
454
455void auxtrace_queues__free(struct auxtrace_queues *queues)
456{
457 unsigned int i;
458
459 for (i = 0; i < queues->nr_queues; i++) {
460 while (!list_empty(&queues->queue_array[i].head)) {
461 struct auxtrace_buffer *buffer;
462
463 buffer = list_entry(queues->queue_array[i].head.next,
464 struct auxtrace_buffer, list);
465 list_del_init(&buffer->list);
466 auxtrace_buffer__free(buffer);
467 }
468 }
469
470 zfree(&queues->queue_array);
471 queues->nr_queues = 0;
472}
473
474static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
475 unsigned int pos, unsigned int queue_nr,
476 u64 ordinal)
477{
478 unsigned int parent;
479
480 while (pos) {
481 parent = (pos - 1) >> 1;
482 if (heap_array[parent].ordinal <= ordinal)
483 break;
484 heap_array[pos] = heap_array[parent];
485 pos = parent;
486 }
487 heap_array[pos].queue_nr = queue_nr;
488 heap_array[pos].ordinal = ordinal;
489}
490
491int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
492 u64 ordinal)
493{
494 struct auxtrace_heap_item *heap_array;
495
496 if (queue_nr >= heap->heap_sz) {
497 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
498
499 while (heap_sz <= queue_nr)
500 heap_sz <<= 1;
501 heap_array = realloc(heap->heap_array,
502 heap_sz * sizeof(struct auxtrace_heap_item));
503 if (!heap_array)
504 return -ENOMEM;
505 heap->heap_array = heap_array;
506 heap->heap_sz = heap_sz;
507 }
508
509 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
510
511 return 0;
512}
513
514void auxtrace_heap__free(struct auxtrace_heap *heap)
515{
516 zfree(&heap->heap_array);
517 heap->heap_cnt = 0;
518 heap->heap_sz = 0;
519}
520
521void auxtrace_heap__pop(struct auxtrace_heap *heap)
522{
523 unsigned int pos, last, heap_cnt = heap->heap_cnt;
524 struct auxtrace_heap_item *heap_array;
525
526 if (!heap_cnt)
527 return;
528
529 heap->heap_cnt -= 1;
530
531 heap_array = heap->heap_array;
532
533 pos = 0;
534 while (1) {
535 unsigned int left, right;
536
537 left = (pos << 1) + 1;
538 if (left >= heap_cnt)
539 break;
540 right = left + 1;
541 if (right >= heap_cnt) {
542 heap_array[pos] = heap_array[left];
543 return;
544 }
545 if (heap_array[left].ordinal < heap_array[right].ordinal) {
546 heap_array[pos] = heap_array[left];
547 pos = left;
548 } else {
549 heap_array[pos] = heap_array[right];
550 pos = right;
551 }
552 }
553
554 last = heap_cnt - 1;
555 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
556 heap_array[last].ordinal);
557}
558
559size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
560 struct evlist *evlist)
561{
562 if (itr)
563 return itr->info_priv_size(itr, evlist);
564 return 0;
565}
566
567static int auxtrace_not_supported(void)
568{
569 pr_err("AUX area tracing is not supported on this architecture\n");
570 return -EINVAL;
571}
572
573int auxtrace_record__info_fill(struct auxtrace_record *itr,
574 struct perf_session *session,
575 struct perf_record_auxtrace_info *auxtrace_info,
576 size_t priv_size)
577{
578 if (itr)
579 return itr->info_fill(itr, session, auxtrace_info, priv_size);
580 return auxtrace_not_supported();
581}
582
583void auxtrace_record__free(struct auxtrace_record *itr)
584{
585 if (itr)
586 itr->free(itr);
587}
588
589int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
590{
591 if (itr && itr->snapshot_start)
592 return itr->snapshot_start(itr);
593 return 0;
594}
595
596int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit)
597{
598 if (!on_exit && itr && itr->snapshot_finish)
599 return itr->snapshot_finish(itr);
600 return 0;
601}
602
603int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
604 struct auxtrace_mmap *mm,
605 unsigned char *data, u64 *head, u64 *old)
606{
607 if (itr && itr->find_snapshot)
608 return itr->find_snapshot(itr, idx, mm, data, head, old);
609 return 0;
610}
611
612int auxtrace_record__options(struct auxtrace_record *itr,
613 struct evlist *evlist,
614 struct record_opts *opts)
615{
616 if (itr) {
617 itr->evlist = evlist;
618 return itr->recording_options(itr, evlist, opts);
619 }
620 return 0;
621}
622
623u64 auxtrace_record__reference(struct auxtrace_record *itr)
624{
625 if (itr)
626 return itr->reference(itr);
627 return 0;
628}
629
630int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
631 struct record_opts *opts, const char *str)
632{
633 if (!str)
634 return 0;
635
636
637 switch (*str) {
638 case 'e':
639 opts->auxtrace_snapshot_on_exit = true;
640 str++;
641 break;
642 default:
643 break;
644 }
645
646 if (itr && itr->parse_snapshot_options)
647 return itr->parse_snapshot_options(itr, opts, str);
648
649 pr_err("No AUX area tracing to snapshot\n");
650 return -EINVAL;
651}
652
653int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
654{
655 struct evsel *evsel;
656
657 if (!itr->evlist || !itr->pmu)
658 return -EINVAL;
659
660 evlist__for_each_entry(itr->evlist, evsel) {
661 if (evsel->core.attr.type == itr->pmu->type) {
662 if (evsel->disabled)
663 return 0;
664 return evlist__enable_event_idx(itr->evlist, evsel, idx);
665 }
666 }
667 return -EINVAL;
668}
669
670
671
672
673
674
675#define MAX_AUX_SAMPLE_SIZE (60 * 1024)
676
677
678#define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024)
679
680static int auxtrace_validate_aux_sample_size(struct evlist *evlist,
681 struct record_opts *opts)
682{
683 struct evsel *evsel;
684 bool has_aux_leader = false;
685 u32 sz;
686
687 evlist__for_each_entry(evlist, evsel) {
688 sz = evsel->core.attr.aux_sample_size;
689 if (evsel__is_group_leader(evsel)) {
690 has_aux_leader = evsel__is_aux_event(evsel);
691 if (sz) {
692 if (has_aux_leader)
693 pr_err("Cannot add AUX area sampling to an AUX area event\n");
694 else
695 pr_err("Cannot add AUX area sampling to a group leader\n");
696 return -EINVAL;
697 }
698 }
699 if (sz > MAX_AUX_SAMPLE_SIZE) {
700 pr_err("AUX area sample size %u too big, max. %d\n",
701 sz, MAX_AUX_SAMPLE_SIZE);
702 return -EINVAL;
703 }
704 if (sz) {
705 if (!has_aux_leader) {
706 pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
707 return -EINVAL;
708 }
709 evsel__set_sample_bit(evsel, AUX);
710 opts->auxtrace_sample_mode = true;
711 } else {
712 evsel__reset_sample_bit(evsel, AUX);
713 }
714 }
715
716 if (!opts->auxtrace_sample_mode) {
717 pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n");
718 return -EINVAL;
719 }
720
721 if (!perf_can_aux_sample()) {
722 pr_err("AUX area sampling is not supported by kernel\n");
723 return -EINVAL;
724 }
725
726 return 0;
727}
728
729int auxtrace_parse_sample_options(struct auxtrace_record *itr,
730 struct evlist *evlist,
731 struct record_opts *opts, const char *str)
732{
733 struct evsel_config_term *term;
734 struct evsel *aux_evsel;
735 bool has_aux_sample_size = false;
736 bool has_aux_leader = false;
737 struct evsel *evsel;
738 char *endptr;
739 unsigned long sz;
740
741 if (!str)
742 goto no_opt;
743
744 if (!itr) {
745 pr_err("No AUX area event to sample\n");
746 return -EINVAL;
747 }
748
749 sz = strtoul(str, &endptr, 0);
750 if (*endptr || sz > UINT_MAX) {
751 pr_err("Bad AUX area sampling option: '%s'\n", str);
752 return -EINVAL;
753 }
754
755 if (!sz)
756 sz = itr->default_aux_sample_size;
757
758 if (!sz)
759 sz = DEFAULT_AUX_SAMPLE_SIZE;
760
761
762 evlist__for_each_entry(evlist, evsel) {
763 if (evsel__is_group_leader(evsel)) {
764 has_aux_leader = evsel__is_aux_event(evsel);
765 } else if (has_aux_leader) {
766 evsel->core.attr.aux_sample_size = sz;
767 }
768 }
769no_opt:
770 aux_evsel = NULL;
771
772 evlist__for_each_entry(evlist, evsel) {
773 if (evsel__is_aux_event(evsel))
774 aux_evsel = evsel;
775 term = evsel__get_config_term(evsel, AUX_SAMPLE_SIZE);
776 if (term) {
777 has_aux_sample_size = true;
778 evsel->core.attr.aux_sample_size = term->val.aux_sample_size;
779
780 if (aux_evsel && evsel->core.attr.aux_sample_size)
781 evlist__regroup(evlist, aux_evsel, evsel);
782 }
783 }
784
785 if (!str && !has_aux_sample_size)
786 return 0;
787
788 if (!itr) {
789 pr_err("No AUX area event to sample\n");
790 return -EINVAL;
791 }
792
793 return auxtrace_validate_aux_sample_size(evlist, opts);
794}
795
796void auxtrace_regroup_aux_output(struct evlist *evlist)
797{
798 struct evsel *evsel, *aux_evsel = NULL;
799 struct evsel_config_term *term;
800
801 evlist__for_each_entry(evlist, evsel) {
802 if (evsel__is_aux_event(evsel))
803 aux_evsel = evsel;
804 term = evsel__get_config_term(evsel, AUX_OUTPUT);
805
806 if (term && aux_evsel)
807 evlist__regroup(evlist, aux_evsel, evsel);
808 }
809}
810
811struct auxtrace_record *__weak
812auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err)
813{
814 *err = 0;
815 return NULL;
816}
817
818static int auxtrace_index__alloc(struct list_head *head)
819{
820 struct auxtrace_index *auxtrace_index;
821
822 auxtrace_index = malloc(sizeof(struct auxtrace_index));
823 if (!auxtrace_index)
824 return -ENOMEM;
825
826 auxtrace_index->nr = 0;
827 INIT_LIST_HEAD(&auxtrace_index->list);
828
829 list_add_tail(&auxtrace_index->list, head);
830
831 return 0;
832}
833
834void auxtrace_index__free(struct list_head *head)
835{
836 struct auxtrace_index *auxtrace_index, *n;
837
838 list_for_each_entry_safe(auxtrace_index, n, head, list) {
839 list_del_init(&auxtrace_index->list);
840 free(auxtrace_index);
841 }
842}
843
844static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
845{
846 struct auxtrace_index *auxtrace_index;
847 int err;
848
849 if (list_empty(head)) {
850 err = auxtrace_index__alloc(head);
851 if (err)
852 return NULL;
853 }
854
855 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
856
857 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
858 err = auxtrace_index__alloc(head);
859 if (err)
860 return NULL;
861 auxtrace_index = list_entry(head->prev, struct auxtrace_index,
862 list);
863 }
864
865 return auxtrace_index;
866}
867
868int auxtrace_index__auxtrace_event(struct list_head *head,
869 union perf_event *event, off_t file_offset)
870{
871 struct auxtrace_index *auxtrace_index;
872 size_t nr;
873
874 auxtrace_index = auxtrace_index__last(head);
875 if (!auxtrace_index)
876 return -ENOMEM;
877
878 nr = auxtrace_index->nr;
879 auxtrace_index->entries[nr].file_offset = file_offset;
880 auxtrace_index->entries[nr].sz = event->header.size;
881 auxtrace_index->nr += 1;
882
883 return 0;
884}
885
886static int auxtrace_index__do_write(int fd,
887 struct auxtrace_index *auxtrace_index)
888{
889 struct auxtrace_index_entry ent;
890 size_t i;
891
892 for (i = 0; i < auxtrace_index->nr; i++) {
893 ent.file_offset = auxtrace_index->entries[i].file_offset;
894 ent.sz = auxtrace_index->entries[i].sz;
895 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
896 return -errno;
897 }
898 return 0;
899}
900
901int auxtrace_index__write(int fd, struct list_head *head)
902{
903 struct auxtrace_index *auxtrace_index;
904 u64 total = 0;
905 int err;
906
907 list_for_each_entry(auxtrace_index, head, list)
908 total += auxtrace_index->nr;
909
910 if (writen(fd, &total, sizeof(total)) != sizeof(total))
911 return -errno;
912
913 list_for_each_entry(auxtrace_index, head, list) {
914 err = auxtrace_index__do_write(fd, auxtrace_index);
915 if (err)
916 return err;
917 }
918
919 return 0;
920}
921
922static int auxtrace_index__process_entry(int fd, struct list_head *head,
923 bool needs_swap)
924{
925 struct auxtrace_index *auxtrace_index;
926 struct auxtrace_index_entry ent;
927 size_t nr;
928
929 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
930 return -1;
931
932 auxtrace_index = auxtrace_index__last(head);
933 if (!auxtrace_index)
934 return -1;
935
936 nr = auxtrace_index->nr;
937 if (needs_swap) {
938 auxtrace_index->entries[nr].file_offset =
939 bswap_64(ent.file_offset);
940 auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
941 } else {
942 auxtrace_index->entries[nr].file_offset = ent.file_offset;
943 auxtrace_index->entries[nr].sz = ent.sz;
944 }
945
946 auxtrace_index->nr = nr + 1;
947
948 return 0;
949}
950
951int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
952 bool needs_swap)
953{
954 struct list_head *head = &session->auxtrace_index;
955 u64 nr;
956
957 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
958 return -1;
959
960 if (needs_swap)
961 nr = bswap_64(nr);
962
963 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
964 return -1;
965
966 while (nr--) {
967 int err;
968
969 err = auxtrace_index__process_entry(fd, head, needs_swap);
970 if (err)
971 return -1;
972 }
973
974 return 0;
975}
976
977static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
978 struct perf_session *session,
979 struct auxtrace_index_entry *ent)
980{
981 return auxtrace_queues__add_indexed_event(queues, session,
982 ent->file_offset, ent->sz);
983}
984
985int auxtrace_queues__process_index(struct auxtrace_queues *queues,
986 struct perf_session *session)
987{
988 struct auxtrace_index *auxtrace_index;
989 struct auxtrace_index_entry *ent;
990 size_t i;
991 int err;
992
993 if (auxtrace__dont_decode(session))
994 return 0;
995
996 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
997 for (i = 0; i < auxtrace_index->nr; i++) {
998 ent = &auxtrace_index->entries[i];
999 err = auxtrace_queues__process_index_entry(queues,
1000 session,
1001 ent);
1002 if (err)
1003 return err;
1004 }
1005 }
1006 return 0;
1007}
1008
1009struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
1010 struct auxtrace_buffer *buffer)
1011{
1012 if (buffer) {
1013 if (list_is_last(&buffer->list, &queue->head))
1014 return NULL;
1015 return list_entry(buffer->list.next, struct auxtrace_buffer,
1016 list);
1017 } else {
1018 if (list_empty(&queue->head))
1019 return NULL;
1020 return list_entry(queue->head.next, struct auxtrace_buffer,
1021 list);
1022 }
1023}
1024
1025struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
1026 struct perf_sample *sample,
1027 struct perf_session *session)
1028{
1029 struct perf_sample_id *sid;
1030 unsigned int idx;
1031 u64 id;
1032
1033 id = sample->id;
1034 if (!id)
1035 return NULL;
1036
1037 sid = evlist__id2sid(session->evlist, id);
1038 if (!sid)
1039 return NULL;
1040
1041 idx = sid->idx;
1042
1043 if (idx >= queues->nr_queues)
1044 return NULL;
1045
1046 return &queues->queue_array[idx];
1047}
1048
1049int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
1050 struct perf_session *session,
1051 struct perf_sample *sample, u64 data_offset,
1052 u64 reference)
1053{
1054 struct auxtrace_buffer buffer = {
1055 .pid = -1,
1056 .data_offset = data_offset,
1057 .reference = reference,
1058 .size = sample->aux_sample.size,
1059 };
1060 struct perf_sample_id *sid;
1061 u64 id = sample->id;
1062 unsigned int idx;
1063
1064 if (!id)
1065 return -EINVAL;
1066
1067 sid = evlist__id2sid(session->evlist, id);
1068 if (!sid)
1069 return -ENOENT;
1070
1071 idx = sid->idx;
1072 buffer.tid = sid->tid;
1073 buffer.cpu = sid->cpu;
1074
1075 return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL);
1076}
1077
1078struct queue_data {
1079 bool samples;
1080 bool events;
1081};
1082
1083static int auxtrace_queue_data_cb(struct perf_session *session,
1084 union perf_event *event, u64 offset,
1085 void *data)
1086{
1087 struct queue_data *qd = data;
1088 struct perf_sample sample;
1089 int err;
1090
1091 if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) {
1092 if (event->header.size < sizeof(struct perf_record_auxtrace))
1093 return -EINVAL;
1094 offset += event->header.size;
1095 return session->auxtrace->queue_data(session, NULL, event,
1096 offset);
1097 }
1098
1099 if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE)
1100 return 0;
1101
1102 err = evlist__parse_sample(session->evlist, event, &sample);
1103 if (err)
1104 return err;
1105
1106 if (!sample.aux_sample.size)
1107 return 0;
1108
1109 offset += sample.aux_sample.data - (void *)event;
1110
1111 return session->auxtrace->queue_data(session, &sample, NULL, offset);
1112}
1113
1114int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
1115{
1116 struct queue_data qd = {
1117 .samples = samples,
1118 .events = events,
1119 };
1120
1121 if (auxtrace__dont_decode(session))
1122 return 0;
1123
1124 if (!session->auxtrace || !session->auxtrace->queue_data)
1125 return -EINVAL;
1126
1127 return perf_session__peek_events(session, session->header.data_offset,
1128 session->header.data_size,
1129 auxtrace_queue_data_cb, &qd);
1130}
1131
1132void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw)
1133{
1134 int prot = rw ? PROT_READ | PROT_WRITE : PROT_READ;
1135 size_t adj = buffer->data_offset & (page_size - 1);
1136 size_t size = buffer->size + adj;
1137 off_t file_offset = buffer->data_offset - adj;
1138 void *addr;
1139
1140 if (buffer->data)
1141 return buffer->data;
1142
1143 addr = mmap(NULL, size, prot, MAP_SHARED, fd, file_offset);
1144 if (addr == MAP_FAILED)
1145 return NULL;
1146
1147 buffer->mmap_addr = addr;
1148 buffer->mmap_size = size;
1149
1150 buffer->data = addr + adj;
1151
1152 return buffer->data;
1153}
1154
1155void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
1156{
1157 if (!buffer->data || !buffer->mmap_addr)
1158 return;
1159 munmap(buffer->mmap_addr, buffer->mmap_size);
1160 buffer->mmap_addr = NULL;
1161 buffer->mmap_size = 0;
1162 buffer->data = NULL;
1163 buffer->use_data = NULL;
1164}
1165
1166void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
1167{
1168 auxtrace_buffer__put_data(buffer);
1169 if (buffer->data_needs_freeing) {
1170 buffer->data_needs_freeing = false;
1171 zfree(&buffer->data);
1172 buffer->use_data = NULL;
1173 buffer->size = 0;
1174 }
1175}
1176
1177void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
1178{
1179 auxtrace_buffer__drop_data(buffer);
1180 free(buffer);
1181}
1182
1183void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
1184 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
1185 const char *msg, u64 timestamp)
1186{
1187 size_t size;
1188
1189 memset(auxtrace_error, 0, sizeof(struct perf_record_auxtrace_error));
1190
1191 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
1192 auxtrace_error->type = type;
1193 auxtrace_error->code = code;
1194 auxtrace_error->cpu = cpu;
1195 auxtrace_error->pid = pid;
1196 auxtrace_error->tid = tid;
1197 auxtrace_error->fmt = 1;
1198 auxtrace_error->ip = ip;
1199 auxtrace_error->time = timestamp;
1200 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
1201
1202 size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
1203 strlen(auxtrace_error->msg) + 1;
1204 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
1205}
1206
1207int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
1208 struct perf_tool *tool,
1209 struct perf_session *session,
1210 perf_event__handler_t process)
1211{
1212 union perf_event *ev;
1213 size_t priv_size;
1214 int err;
1215
1216 pr_debug2("Synthesizing auxtrace information\n");
1217 priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
1218 ev = zalloc(sizeof(struct perf_record_auxtrace_info) + priv_size);
1219 if (!ev)
1220 return -ENOMEM;
1221
1222 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
1223 ev->auxtrace_info.header.size = sizeof(struct perf_record_auxtrace_info) +
1224 priv_size;
1225 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
1226 priv_size);
1227 if (err)
1228 goto out_free;
1229
1230 err = process(tool, ev, NULL, NULL);
1231out_free:
1232 free(ev);
1233 return err;
1234}
1235
1236static void unleader_evsel(struct evlist *evlist, struct evsel *leader)
1237{
1238 struct evsel *new_leader = NULL;
1239 struct evsel *evsel;
1240
1241
1242 evlist__for_each_entry(evlist, evsel) {
1243 if (!evsel__has_leader(evsel, leader) || evsel == leader)
1244 continue;
1245 if (!new_leader)
1246 new_leader = evsel;
1247 evsel__set_leader(evsel, new_leader);
1248 }
1249
1250
1251 if (new_leader) {
1252 zfree(&new_leader->group_name);
1253 new_leader->group_name = leader->group_name;
1254 leader->group_name = NULL;
1255
1256 new_leader->core.nr_members = leader->core.nr_members - 1;
1257 leader->core.nr_members = 1;
1258 }
1259}
1260
1261static void unleader_auxtrace(struct perf_session *session)
1262{
1263 struct evsel *evsel;
1264
1265 evlist__for_each_entry(session->evlist, evsel) {
1266 if (auxtrace__evsel_is_auxtrace(session, evsel) &&
1267 evsel__is_group_leader(evsel)) {
1268 unleader_evsel(session->evlist, evsel);
1269 }
1270 }
1271}
1272
1273int perf_event__process_auxtrace_info(struct perf_session *session,
1274 union perf_event *event)
1275{
1276 enum auxtrace_type type = event->auxtrace_info.type;
1277 int err;
1278
1279 if (dump_trace)
1280 fprintf(stdout, " type: %u\n", type);
1281
1282 switch (type) {
1283 case PERF_AUXTRACE_INTEL_PT:
1284 err = intel_pt_process_auxtrace_info(event, session);
1285 break;
1286 case PERF_AUXTRACE_INTEL_BTS:
1287 err = intel_bts_process_auxtrace_info(event, session);
1288 break;
1289 case PERF_AUXTRACE_ARM_SPE:
1290 err = arm_spe_process_auxtrace_info(event, session);
1291 break;
1292 case PERF_AUXTRACE_CS_ETM:
1293 err = cs_etm__process_auxtrace_info(event, session);
1294 break;
1295 case PERF_AUXTRACE_S390_CPUMSF:
1296 err = s390_cpumsf_process_auxtrace_info(event, session);
1297 break;
1298 case PERF_AUXTRACE_UNKNOWN:
1299 default:
1300 return -EINVAL;
1301 }
1302
1303 if (err)
1304 return err;
1305
1306 unleader_auxtrace(session);
1307
1308 return 0;
1309}
1310
1311s64 perf_event__process_auxtrace(struct perf_session *session,
1312 union perf_event *event)
1313{
1314 s64 err;
1315
1316 if (dump_trace)
1317 fprintf(stdout, " size: %#"PRI_lx64" offset: %#"PRI_lx64" ref: %#"PRI_lx64" idx: %u tid: %d cpu: %d\n",
1318 event->auxtrace.size, event->auxtrace.offset,
1319 event->auxtrace.reference, event->auxtrace.idx,
1320 event->auxtrace.tid, event->auxtrace.cpu);
1321
1322 if (auxtrace__dont_decode(session))
1323 return event->auxtrace.size;
1324
1325 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
1326 return -EINVAL;
1327
1328 err = session->auxtrace->process_auxtrace_event(session, event, session->tool);
1329 if (err < 0)
1330 return err;
1331
1332 return event->auxtrace.size;
1333}
1334
1335#define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
1336#define PERF_ITRACE_DEFAULT_PERIOD 100000
1337#define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
1338#define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
1339#define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
1340#define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
1341
1342void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
1343 bool no_sample)
1344{
1345 synth_opts->branches = true;
1346 synth_opts->transactions = true;
1347 synth_opts->ptwrites = true;
1348 synth_opts->pwr_events = true;
1349 synth_opts->other_events = true;
1350 synth_opts->errors = true;
1351 synth_opts->flc = true;
1352 synth_opts->llc = true;
1353 synth_opts->tlb = true;
1354 synth_opts->mem = true;
1355 synth_opts->remote_access = true;
1356
1357 if (no_sample) {
1358 synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
1359 synth_opts->period = 1;
1360 synth_opts->calls = true;
1361 } else {
1362 synth_opts->instructions = true;
1363 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1364 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1365 }
1366 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1367 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1368 synth_opts->initial_skip = 0;
1369}
1370
1371static int get_flag(const char **ptr, unsigned int *flags)
1372{
1373 while (1) {
1374 char c = **ptr;
1375
1376 if (c >= 'a' && c <= 'z') {
1377 *flags |= 1 << (c - 'a');
1378 ++*ptr;
1379 return 0;
1380 } else if (c == ' ') {
1381 ++*ptr;
1382 continue;
1383 } else {
1384 return -1;
1385 }
1386 }
1387}
1388
1389static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *minus_flags)
1390{
1391 while (1) {
1392 switch (**ptr) {
1393 case '+':
1394 ++*ptr;
1395 if (get_flag(ptr, plus_flags))
1396 return -1;
1397 break;
1398 case '-':
1399 ++*ptr;
1400 if (get_flag(ptr, minus_flags))
1401 return -1;
1402 break;
1403 case ' ':
1404 ++*ptr;
1405 break;
1406 default:
1407 return 0;
1408 }
1409 }
1410}
1411
1412
1413
1414
1415
1416
1417int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
1418 const char *str, int unset)
1419{
1420 const char *p;
1421 char *endptr;
1422 bool period_type_set = false;
1423 bool period_set = false;
1424
1425 synth_opts->set = true;
1426
1427 if (unset) {
1428 synth_opts->dont_decode = true;
1429 return 0;
1430 }
1431
1432 if (!str) {
1433 itrace_synth_opts__set_default(synth_opts,
1434 synth_opts->default_no_sample);
1435 return 0;
1436 }
1437
1438 for (p = str; *p;) {
1439 switch (*p++) {
1440 case 'i':
1441 synth_opts->instructions = true;
1442 while (*p == ' ' || *p == ',')
1443 p += 1;
1444 if (isdigit(*p)) {
1445 synth_opts->period = strtoull(p, &endptr, 10);
1446 period_set = true;
1447 p = endptr;
1448 while (*p == ' ' || *p == ',')
1449 p += 1;
1450 switch (*p++) {
1451 case 'i':
1452 synth_opts->period_type =
1453 PERF_ITRACE_PERIOD_INSTRUCTIONS;
1454 period_type_set = true;
1455 break;
1456 case 't':
1457 synth_opts->period_type =
1458 PERF_ITRACE_PERIOD_TICKS;
1459 period_type_set = true;
1460 break;
1461 case 'm':
1462 synth_opts->period *= 1000;
1463
1464 case 'u':
1465 synth_opts->period *= 1000;
1466
1467 case 'n':
1468 if (*p++ != 's')
1469 goto out_err;
1470 synth_opts->period_type =
1471 PERF_ITRACE_PERIOD_NANOSECS;
1472 period_type_set = true;
1473 break;
1474 case '\0':
1475 goto out;
1476 default:
1477 goto out_err;
1478 }
1479 }
1480 break;
1481 case 'b':
1482 synth_opts->branches = true;
1483 break;
1484 case 'x':
1485 synth_opts->transactions = true;
1486 break;
1487 case 'w':
1488 synth_opts->ptwrites = true;
1489 break;
1490 case 'p':
1491 synth_opts->pwr_events = true;
1492 break;
1493 case 'o':
1494 synth_opts->other_events = true;
1495 break;
1496 case 'e':
1497 synth_opts->errors = true;
1498 if (get_flags(&p, &synth_opts->error_plus_flags,
1499 &synth_opts->error_minus_flags))
1500 goto out_err;
1501 break;
1502 case 'd':
1503 synth_opts->log = true;
1504 if (get_flags(&p, &synth_opts->log_plus_flags,
1505 &synth_opts->log_minus_flags))
1506 goto out_err;
1507 break;
1508 case 'c':
1509 synth_opts->branches = true;
1510 synth_opts->calls = true;
1511 break;
1512 case 'r':
1513 synth_opts->branches = true;
1514 synth_opts->returns = true;
1515 break;
1516 case 'G':
1517 case 'g':
1518 if (p[-1] == 'G')
1519 synth_opts->add_callchain = true;
1520 else
1521 synth_opts->callchain = true;
1522 synth_opts->callchain_sz =
1523 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1524 while (*p == ' ' || *p == ',')
1525 p += 1;
1526 if (isdigit(*p)) {
1527 unsigned int val;
1528
1529 val = strtoul(p, &endptr, 10);
1530 p = endptr;
1531 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1532 goto out_err;
1533 synth_opts->callchain_sz = val;
1534 }
1535 break;
1536 case 'L':
1537 case 'l':
1538 if (p[-1] == 'L')
1539 synth_opts->add_last_branch = true;
1540 else
1541 synth_opts->last_branch = true;
1542 synth_opts->last_branch_sz =
1543 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1544 while (*p == ' ' || *p == ',')
1545 p += 1;
1546 if (isdigit(*p)) {
1547 unsigned int val;
1548
1549 val = strtoul(p, &endptr, 10);
1550 p = endptr;
1551 if (!val ||
1552 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1553 goto out_err;
1554 synth_opts->last_branch_sz = val;
1555 }
1556 break;
1557 case 's':
1558 synth_opts->initial_skip = strtoul(p, &endptr, 10);
1559 if (p == endptr)
1560 goto out_err;
1561 p = endptr;
1562 break;
1563 case 'f':
1564 synth_opts->flc = true;
1565 break;
1566 case 'm':
1567 synth_opts->llc = true;
1568 break;
1569 case 't':
1570 synth_opts->tlb = true;
1571 break;
1572 case 'a':
1573 synth_opts->remote_access = true;
1574 break;
1575 case 'M':
1576 synth_opts->mem = true;
1577 break;
1578 case 'q':
1579 synth_opts->quick += 1;
1580 break;
1581 case 'Z':
1582 synth_opts->timeless_decoding = true;
1583 break;
1584 case ' ':
1585 case ',':
1586 break;
1587 default:
1588 goto out_err;
1589 }
1590 }
1591out:
1592 if (synth_opts->instructions) {
1593 if (!period_type_set)
1594 synth_opts->period_type =
1595 PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1596 if (!period_set)
1597 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1598 }
1599
1600 return 0;
1601
1602out_err:
1603 pr_err("Bad Instruction Tracing options '%s'\n", str);
1604 return -EINVAL;
1605}
1606
1607int itrace_parse_synth_opts(const struct option *opt, const char *str, int unset)
1608{
1609 return itrace_do_parse_synth_opts(opt->value, str, unset);
1610}
1611
1612static const char * const auxtrace_error_type_name[] = {
1613 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1614};
1615
1616static const char *auxtrace_error_name(int type)
1617{
1618 const char *error_type_name = NULL;
1619
1620 if (type < PERF_AUXTRACE_ERROR_MAX)
1621 error_type_name = auxtrace_error_type_name[type];
1622 if (!error_type_name)
1623 error_type_name = "unknown AUX";
1624 return error_type_name;
1625}
1626
1627size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1628{
1629 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1630 unsigned long long nsecs = e->time;
1631 const char *msg = e->msg;
1632 int ret;
1633
1634 ret = fprintf(fp, " %s error type %u",
1635 auxtrace_error_name(e->type), e->type);
1636
1637 if (e->fmt && nsecs) {
1638 unsigned long secs = nsecs / NSEC_PER_SEC;
1639
1640 nsecs -= secs * NSEC_PER_SEC;
1641 ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
1642 } else {
1643 ret += fprintf(fp, " time 0");
1644 }
1645
1646 if (!e->fmt)
1647 msg = (const char *)&e->time;
1648
1649 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n",
1650 e->cpu, e->pid, e->tid, e->ip, e->code, msg);
1651 return ret;
1652}
1653
1654void perf_session__auxtrace_error_inc(struct perf_session *session,
1655 union perf_event *event)
1656{
1657 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1658
1659 if (e->type < PERF_AUXTRACE_ERROR_MAX)
1660 session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1661}
1662
1663void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1664{
1665 int i;
1666
1667 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1668 if (!stats->nr_auxtrace_errors[i])
1669 continue;
1670 ui__warning("%u %s errors\n",
1671 stats->nr_auxtrace_errors[i],
1672 auxtrace_error_name(i));
1673 }
1674}
1675
1676int perf_event__process_auxtrace_error(struct perf_session *session,
1677 union perf_event *event)
1678{
1679 if (auxtrace__dont_decode(session))
1680 return 0;
1681
1682 perf_event__fprintf_auxtrace_error(event, stdout);
1683 return 0;
1684}
1685
1686static int __auxtrace_mmap__read(struct mmap *map,
1687 struct auxtrace_record *itr,
1688 struct perf_tool *tool, process_auxtrace_t fn,
1689 bool snapshot, size_t snapshot_size)
1690{
1691 struct auxtrace_mmap *mm = &map->auxtrace_mmap;
1692 u64 head, old = mm->prev, offset, ref;
1693 unsigned char *data = mm->base;
1694 size_t size, head_off, old_off, len1, len2, padding;
1695 union perf_event ev;
1696 void *data1, *data2;
1697
1698 if (snapshot) {
1699 head = auxtrace_mmap__read_snapshot_head(mm);
1700 if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data,
1701 &head, &old))
1702 return -1;
1703 } else {
1704 head = auxtrace_mmap__read_head(mm);
1705 }
1706
1707 if (old == head)
1708 return 0;
1709
1710 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1711 mm->idx, old, head, head - old);
1712
1713 if (mm->mask) {
1714 head_off = head & mm->mask;
1715 old_off = old & mm->mask;
1716 } else {
1717 head_off = head % mm->len;
1718 old_off = old % mm->len;
1719 }
1720
1721 if (head_off > old_off)
1722 size = head_off - old_off;
1723 else
1724 size = mm->len - (old_off - head_off);
1725
1726 if (snapshot && size > snapshot_size)
1727 size = snapshot_size;
1728
1729 ref = auxtrace_record__reference(itr);
1730
1731 if (head > old || size <= head || mm->mask) {
1732 offset = head - size;
1733 } else {
1734
1735
1736
1737
1738
1739 u64 rem = (0ULL - mm->len) % mm->len;
1740
1741 offset = head - size - rem;
1742 }
1743
1744 if (size > head_off) {
1745 len1 = size - head_off;
1746 data1 = &data[mm->len - len1];
1747 len2 = head_off;
1748 data2 = &data[0];
1749 } else {
1750 len1 = size;
1751 data1 = &data[head_off - len1];
1752 len2 = 0;
1753 data2 = NULL;
1754 }
1755
1756 if (itr->alignment) {
1757 unsigned int unwanted = len1 % itr->alignment;
1758
1759 len1 -= unwanted;
1760 size -= unwanted;
1761 }
1762
1763
1764 padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
1765 if (padding)
1766 padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
1767
1768 memset(&ev, 0, sizeof(ev));
1769 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1770 ev.auxtrace.header.size = sizeof(ev.auxtrace);
1771 ev.auxtrace.size = size + padding;
1772 ev.auxtrace.offset = offset;
1773 ev.auxtrace.reference = ref;
1774 ev.auxtrace.idx = mm->idx;
1775 ev.auxtrace.tid = mm->tid;
1776 ev.auxtrace.cpu = mm->cpu;
1777
1778 if (fn(tool, map, &ev, data1, len1, data2, len2))
1779 return -1;
1780
1781 mm->prev = head;
1782
1783 if (!snapshot) {
1784 auxtrace_mmap__write_tail(mm, head);
1785 if (itr->read_finish) {
1786 int err;
1787
1788 err = itr->read_finish(itr, mm->idx);
1789 if (err < 0)
1790 return err;
1791 }
1792 }
1793
1794 return 1;
1795}
1796
1797int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
1798 struct perf_tool *tool, process_auxtrace_t fn)
1799{
1800 return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
1801}
1802
1803int auxtrace_mmap__read_snapshot(struct mmap *map,
1804 struct auxtrace_record *itr,
1805 struct perf_tool *tool, process_auxtrace_t fn,
1806 size_t snapshot_size)
1807{
1808 return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size);
1809}
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821struct auxtrace_cache {
1822 struct hlist_head *hashtable;
1823 size_t sz;
1824 size_t entry_size;
1825 size_t limit;
1826 size_t cnt;
1827 unsigned int bits;
1828};
1829
1830struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1831 unsigned int limit_percent)
1832{
1833 struct auxtrace_cache *c;
1834 struct hlist_head *ht;
1835 size_t sz, i;
1836
1837 c = zalloc(sizeof(struct auxtrace_cache));
1838 if (!c)
1839 return NULL;
1840
1841 sz = 1UL << bits;
1842
1843 ht = calloc(sz, sizeof(struct hlist_head));
1844 if (!ht)
1845 goto out_free;
1846
1847 for (i = 0; i < sz; i++)
1848 INIT_HLIST_HEAD(&ht[i]);
1849
1850 c->hashtable = ht;
1851 c->sz = sz;
1852 c->entry_size = entry_size;
1853 c->limit = (c->sz * limit_percent) / 100;
1854 c->bits = bits;
1855
1856 return c;
1857
1858out_free:
1859 free(c);
1860 return NULL;
1861}
1862
1863static void auxtrace_cache__drop(struct auxtrace_cache *c)
1864{
1865 struct auxtrace_cache_entry *entry;
1866 struct hlist_node *tmp;
1867 size_t i;
1868
1869 if (!c)
1870 return;
1871
1872 for (i = 0; i < c->sz; i++) {
1873 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
1874 hlist_del(&entry->hash);
1875 auxtrace_cache__free_entry(c, entry);
1876 }
1877 }
1878
1879 c->cnt = 0;
1880}
1881
1882void auxtrace_cache__free(struct auxtrace_cache *c)
1883{
1884 if (!c)
1885 return;
1886
1887 auxtrace_cache__drop(c);
1888 zfree(&c->hashtable);
1889 free(c);
1890}
1891
1892void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
1893{
1894 return malloc(c->entry_size);
1895}
1896
1897void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
1898 void *entry)
1899{
1900 free(entry);
1901}
1902
1903int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
1904 struct auxtrace_cache_entry *entry)
1905{
1906 if (c->limit && ++c->cnt > c->limit)
1907 auxtrace_cache__drop(c);
1908
1909 entry->key = key;
1910 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
1911
1912 return 0;
1913}
1914
1915static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c,
1916 u32 key)
1917{
1918 struct auxtrace_cache_entry *entry;
1919 struct hlist_head *hlist;
1920 struct hlist_node *n;
1921
1922 if (!c)
1923 return NULL;
1924
1925 hlist = &c->hashtable[hash_32(key, c->bits)];
1926 hlist_for_each_entry_safe(entry, n, hlist, hash) {
1927 if (entry->key == key) {
1928 hlist_del(&entry->hash);
1929 return entry;
1930 }
1931 }
1932
1933 return NULL;
1934}
1935
1936void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key)
1937{
1938 struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key);
1939
1940 auxtrace_cache__free_entry(c, entry);
1941}
1942
1943void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
1944{
1945 struct auxtrace_cache_entry *entry;
1946 struct hlist_head *hlist;
1947
1948 if (!c)
1949 return NULL;
1950
1951 hlist = &c->hashtable[hash_32(key, c->bits)];
1952 hlist_for_each_entry(entry, hlist, hash) {
1953 if (entry->key == key)
1954 return entry;
1955 }
1956
1957 return NULL;
1958}
1959
1960static void addr_filter__free_str(struct addr_filter *filt)
1961{
1962 zfree(&filt->str);
1963 filt->action = NULL;
1964 filt->sym_from = NULL;
1965 filt->sym_to = NULL;
1966 filt->filename = NULL;
1967}
1968
1969static struct addr_filter *addr_filter__new(void)
1970{
1971 struct addr_filter *filt = zalloc(sizeof(*filt));
1972
1973 if (filt)
1974 INIT_LIST_HEAD(&filt->list);
1975
1976 return filt;
1977}
1978
1979static void addr_filter__free(struct addr_filter *filt)
1980{
1981 if (filt)
1982 addr_filter__free_str(filt);
1983 free(filt);
1984}
1985
1986static void addr_filters__add(struct addr_filters *filts,
1987 struct addr_filter *filt)
1988{
1989 list_add_tail(&filt->list, &filts->head);
1990 filts->cnt += 1;
1991}
1992
1993static void addr_filters__del(struct addr_filters *filts,
1994 struct addr_filter *filt)
1995{
1996 list_del_init(&filt->list);
1997 filts->cnt -= 1;
1998}
1999
2000void addr_filters__init(struct addr_filters *filts)
2001{
2002 INIT_LIST_HEAD(&filts->head);
2003 filts->cnt = 0;
2004}
2005
2006void addr_filters__exit(struct addr_filters *filts)
2007{
2008 struct addr_filter *filt, *n;
2009
2010 list_for_each_entry_safe(filt, n, &filts->head, list) {
2011 addr_filters__del(filts, filt);
2012 addr_filter__free(filt);
2013 }
2014}
2015
2016static int parse_num_or_str(char **inp, u64 *num, const char **str,
2017 const char *str_delim)
2018{
2019 *inp += strspn(*inp, " ");
2020
2021 if (isdigit(**inp)) {
2022 char *endptr;
2023
2024 if (!num)
2025 return -EINVAL;
2026 errno = 0;
2027 *num = strtoull(*inp, &endptr, 0);
2028 if (errno)
2029 return -errno;
2030 if (endptr == *inp)
2031 return -EINVAL;
2032 *inp = endptr;
2033 } else {
2034 size_t n;
2035
2036 if (!str)
2037 return -EINVAL;
2038 *inp += strspn(*inp, " ");
2039 *str = *inp;
2040 n = strcspn(*inp, str_delim);
2041 if (!n)
2042 return -EINVAL;
2043 *inp += n;
2044 if (**inp) {
2045 **inp = '\0';
2046 *inp += 1;
2047 }
2048 }
2049 return 0;
2050}
2051
2052static int parse_action(struct addr_filter *filt)
2053{
2054 if (!strcmp(filt->action, "filter")) {
2055 filt->start = true;
2056 filt->range = true;
2057 } else if (!strcmp(filt->action, "start")) {
2058 filt->start = true;
2059 } else if (!strcmp(filt->action, "stop")) {
2060 filt->start = false;
2061 } else if (!strcmp(filt->action, "tracestop")) {
2062 filt->start = false;
2063 filt->range = true;
2064 filt->action += 5;
2065 } else {
2066 return -EINVAL;
2067 }
2068 return 0;
2069}
2070
2071static int parse_sym_idx(char **inp, int *idx)
2072{
2073 *idx = -1;
2074
2075 *inp += strspn(*inp, " ");
2076
2077 if (**inp != '#')
2078 return 0;
2079
2080 *inp += 1;
2081
2082 if (**inp == 'g' || **inp == 'G') {
2083 *inp += 1;
2084 *idx = 0;
2085 } else {
2086 unsigned long num;
2087 char *endptr;
2088
2089 errno = 0;
2090 num = strtoul(*inp, &endptr, 0);
2091 if (errno)
2092 return -errno;
2093 if (endptr == *inp || num > INT_MAX)
2094 return -EINVAL;
2095 *inp = endptr;
2096 *idx = num;
2097 }
2098
2099 return 0;
2100}
2101
2102static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx)
2103{
2104 int err = parse_num_or_str(inp, num, str, " ");
2105
2106 if (!err && *str)
2107 err = parse_sym_idx(inp, idx);
2108
2109 return err;
2110}
2111
2112static int parse_one_filter(struct addr_filter *filt, const char **filter_inp)
2113{
2114 char *fstr;
2115 int err;
2116
2117 filt->str = fstr = strdup(*filter_inp);
2118 if (!fstr)
2119 return -ENOMEM;
2120
2121 err = parse_num_or_str(&fstr, NULL, &filt->action, " ");
2122 if (err)
2123 goto out_err;
2124
2125 err = parse_action(filt);
2126 if (err)
2127 goto out_err;
2128
2129 err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from,
2130 &filt->sym_from_idx);
2131 if (err)
2132 goto out_err;
2133
2134 fstr += strspn(fstr, " ");
2135
2136 if (*fstr == '/') {
2137 fstr += 1;
2138 err = parse_addr_size(&fstr, &filt->size, &filt->sym_to,
2139 &filt->sym_to_idx);
2140 if (err)
2141 goto out_err;
2142 filt->range = true;
2143 }
2144
2145 fstr += strspn(fstr, " ");
2146
2147 if (*fstr == '@') {
2148 fstr += 1;
2149 err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,");
2150 if (err)
2151 goto out_err;
2152 }
2153
2154 fstr += strspn(fstr, " ,");
2155
2156 *filter_inp += fstr - filt->str;
2157
2158 return 0;
2159
2160out_err:
2161 addr_filter__free_str(filt);
2162
2163 return err;
2164}
2165
2166int addr_filters__parse_bare_filter(struct addr_filters *filts,
2167 const char *filter)
2168{
2169 struct addr_filter *filt;
2170 const char *fstr = filter;
2171 int err;
2172
2173 while (*fstr) {
2174 filt = addr_filter__new();
2175 err = parse_one_filter(filt, &fstr);
2176 if (err) {
2177 addr_filter__free(filt);
2178 addr_filters__exit(filts);
2179 return err;
2180 }
2181 addr_filters__add(filts, filt);
2182 }
2183
2184 return 0;
2185}
2186
2187struct sym_args {
2188 const char *name;
2189 u64 start;
2190 u64 size;
2191 int idx;
2192 int cnt;
2193 bool started;
2194 bool global;
2195 bool selected;
2196 bool duplicate;
2197 bool near;
2198};
2199
2200static bool kern_sym_match(struct sym_args *args, const char *name, char type)
2201{
2202
2203 return kallsyms__is_function(type) &&
2204 !strcmp(name, args->name) &&
2205 ((args->global && isupper(type)) ||
2206 (args->selected && ++(args->cnt) == args->idx) ||
2207 (!args->global && !args->selected));
2208}
2209
2210static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2211{
2212 struct sym_args *args = arg;
2213
2214 if (args->started) {
2215 if (!args->size)
2216 args->size = start - args->start;
2217 if (args->selected) {
2218 if (args->size)
2219 return 1;
2220 } else if (kern_sym_match(args, name, type)) {
2221 args->duplicate = true;
2222 return 1;
2223 }
2224 } else if (kern_sym_match(args, name, type)) {
2225 args->started = true;
2226 args->start = start;
2227 }
2228
2229 return 0;
2230}
2231
2232static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2233{
2234 struct sym_args *args = arg;
2235
2236 if (kern_sym_match(args, name, type)) {
2237 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2238 ++args->cnt, start, type, name);
2239 args->near = true;
2240 } else if (args->near) {
2241 args->near = false;
2242 pr_err("\t\twhich is near\t\t%s\n", name);
2243 }
2244
2245 return 0;
2246}
2247
2248static int sym_not_found_error(const char *sym_name, int idx)
2249{
2250 if (idx > 0) {
2251 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
2252 idx, sym_name);
2253 } else if (!idx) {
2254 pr_err("Global symbol '%s' not found.\n", sym_name);
2255 } else {
2256 pr_err("Symbol '%s' not found.\n", sym_name);
2257 }
2258 pr_err("Note that symbols must be functions.\n");
2259
2260 return -EINVAL;
2261}
2262
2263static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx)
2264{
2265 struct sym_args args = {
2266 .name = sym_name,
2267 .idx = idx,
2268 .global = !idx,
2269 .selected = idx > 0,
2270 };
2271 int err;
2272
2273 *start = 0;
2274 *size = 0;
2275
2276 err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb);
2277 if (err < 0) {
2278 pr_err("Failed to parse /proc/kallsyms\n");
2279 return err;
2280 }
2281
2282 if (args.duplicate) {
2283 pr_err("Multiple kernel symbols with name '%s'\n", sym_name);
2284 args.cnt = 0;
2285 kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb);
2286 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2287 sym_name);
2288 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2289 return -EINVAL;
2290 }
2291
2292 if (!args.started) {
2293 pr_err("Kernel symbol lookup: ");
2294 return sym_not_found_error(sym_name, idx);
2295 }
2296
2297 *start = args.start;
2298 *size = args.size;
2299
2300 return 0;
2301}
2302
2303static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
2304 char type, u64 start)
2305{
2306 struct sym_args *args = arg;
2307
2308 if (!kallsyms__is_function(type))
2309 return 0;
2310
2311 if (!args->started) {
2312 args->started = true;
2313 args->start = start;
2314 }
2315
2316 args->size = round_up(start, page_size) + page_size - args->start;
2317
2318 return 0;
2319}
2320
2321static int addr_filter__entire_kernel(struct addr_filter *filt)
2322{
2323 struct sym_args args = { .started = false };
2324 int err;
2325
2326 err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb);
2327 if (err < 0 || !args.started) {
2328 pr_err("Failed to parse /proc/kallsyms\n");
2329 return err;
2330 }
2331
2332 filt->addr = args.start;
2333 filt->size = args.size;
2334
2335 return 0;
2336}
2337
2338static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size)
2339{
2340 if (start + size >= filt->addr)
2341 return 0;
2342
2343 if (filt->sym_from) {
2344 pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n",
2345 filt->sym_to, start, filt->sym_from, filt->addr);
2346 } else {
2347 pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n",
2348 filt->sym_to, start, filt->addr);
2349 }
2350
2351 return -EINVAL;
2352}
2353
2354static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
2355{
2356 bool no_size = false;
2357 u64 start, size;
2358 int err;
2359
2360 if (symbol_conf.kptr_restrict) {
2361 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
2362 return -EINVAL;
2363 }
2364
2365 if (filt->sym_from && !strcmp(filt->sym_from, "*"))
2366 return addr_filter__entire_kernel(filt);
2367
2368 if (filt->sym_from) {
2369 err = find_kern_sym(filt->sym_from, &start, &size,
2370 filt->sym_from_idx);
2371 if (err)
2372 return err;
2373 filt->addr = start;
2374 if (filt->range && !filt->size && !filt->sym_to) {
2375 filt->size = size;
2376 no_size = !size;
2377 }
2378 }
2379
2380 if (filt->sym_to) {
2381 err = find_kern_sym(filt->sym_to, &start, &size,
2382 filt->sym_to_idx);
2383 if (err)
2384 return err;
2385
2386 err = check_end_after_start(filt, start, size);
2387 if (err)
2388 return err;
2389 filt->size = start + size - filt->addr;
2390 no_size = !size;
2391 }
2392
2393
2394 if (no_size) {
2395 pr_err("Cannot determine size of symbol '%s'\n",
2396 filt->sym_to ? filt->sym_to : filt->sym_from);
2397 return -EINVAL;
2398 }
2399
2400 return 0;
2401}
2402
2403static struct dso *load_dso(const char *name)
2404{
2405 struct map *map;
2406 struct dso *dso;
2407
2408 map = dso__new_map(name);
2409 if (!map)
2410 return NULL;
2411
2412 if (map__load(map) < 0)
2413 pr_err("File '%s' not found or has no symbols.\n", name);
2414
2415 dso = dso__get(map->dso);
2416
2417 map__put(map);
2418
2419 return dso;
2420}
2421
2422static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt,
2423 int idx)
2424{
2425
2426 return !arch__compare_symbol_names(name, sym->name) &&
2427 ((!idx && sym->binding == STB_GLOBAL) ||
2428 (idx > 0 && ++*cnt == idx) ||
2429 idx < 0);
2430}
2431
2432static void print_duplicate_syms(struct dso *dso, const char *sym_name)
2433{
2434 struct symbol *sym;
2435 bool near = false;
2436 int cnt = 0;
2437
2438 pr_err("Multiple symbols with name '%s'\n", sym_name);
2439
2440 sym = dso__first_symbol(dso);
2441 while (sym) {
2442 if (dso_sym_match(sym, sym_name, &cnt, -1)) {
2443 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2444 ++cnt, sym->start,
2445 sym->binding == STB_GLOBAL ? 'g' :
2446 sym->binding == STB_LOCAL ? 'l' : 'w',
2447 sym->name);
2448 near = true;
2449 } else if (near) {
2450 near = false;
2451 pr_err("\t\twhich is near\t\t%s\n", sym->name);
2452 }
2453 sym = dso__next_symbol(sym);
2454 }
2455
2456 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2457 sym_name);
2458 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2459}
2460
2461static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
2462 u64 *size, int idx)
2463{
2464 struct symbol *sym;
2465 int cnt = 0;
2466
2467 *start = 0;
2468 *size = 0;
2469
2470 sym = dso__first_symbol(dso);
2471 while (sym) {
2472 if (*start) {
2473 if (!*size)
2474 *size = sym->start - *start;
2475 if (idx > 0) {
2476 if (*size)
2477 return 1;
2478 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2479 print_duplicate_syms(dso, sym_name);
2480 return -EINVAL;
2481 }
2482 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2483 *start = sym->start;
2484 *size = sym->end - sym->start;
2485 }
2486 sym = dso__next_symbol(sym);
2487 }
2488
2489 if (!*start)
2490 return sym_not_found_error(sym_name, idx);
2491
2492 return 0;
2493}
2494
2495static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
2496{
2497 if (dso__data_file_size(dso, NULL)) {
2498 pr_err("Failed to determine filter for %s\nCannot determine file size.\n",
2499 filt->filename);
2500 return -EINVAL;
2501 }
2502
2503 filt->addr = 0;
2504 filt->size = dso->data.file_size;
2505
2506 return 0;
2507}
2508
2509static int addr_filter__resolve_syms(struct addr_filter *filt)
2510{
2511 u64 start, size;
2512 struct dso *dso;
2513 int err = 0;
2514
2515 if (!filt->sym_from && !filt->sym_to)
2516 return 0;
2517
2518 if (!filt->filename)
2519 return addr_filter__resolve_kernel_syms(filt);
2520
2521 dso = load_dso(filt->filename);
2522 if (!dso) {
2523 pr_err("Failed to load symbols from: %s\n", filt->filename);
2524 return -EINVAL;
2525 }
2526
2527 if (filt->sym_from && !strcmp(filt->sym_from, "*")) {
2528 err = addr_filter__entire_dso(filt, dso);
2529 goto put_dso;
2530 }
2531
2532 if (filt->sym_from) {
2533 err = find_dso_sym(dso, filt->sym_from, &start, &size,
2534 filt->sym_from_idx);
2535 if (err)
2536 goto put_dso;
2537 filt->addr = start;
2538 if (filt->range && !filt->size && !filt->sym_to)
2539 filt->size = size;
2540 }
2541
2542 if (filt->sym_to) {
2543 err = find_dso_sym(dso, filt->sym_to, &start, &size,
2544 filt->sym_to_idx);
2545 if (err)
2546 goto put_dso;
2547
2548 err = check_end_after_start(filt, start, size);
2549 if (err)
2550 return err;
2551
2552 filt->size = start + size - filt->addr;
2553 }
2554
2555put_dso:
2556 dso__put(dso);
2557
2558 return err;
2559}
2560
2561static char *addr_filter__to_str(struct addr_filter *filt)
2562{
2563 char filename_buf[PATH_MAX];
2564 const char *at = "";
2565 const char *fn = "";
2566 char *filter;
2567 int err;
2568
2569 if (filt->filename) {
2570 at = "@";
2571 fn = realpath(filt->filename, filename_buf);
2572 if (!fn)
2573 return NULL;
2574 }
2575
2576 if (filt->range) {
2577 err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s",
2578 filt->action, filt->addr, filt->size, at, fn);
2579 } else {
2580 err = asprintf(&filter, "%s 0x%"PRIx64"%s%s",
2581 filt->action, filt->addr, at, fn);
2582 }
2583
2584 return err < 0 ? NULL : filter;
2585}
2586
2587static int parse_addr_filter(struct evsel *evsel, const char *filter,
2588 int max_nr)
2589{
2590 struct addr_filters filts;
2591 struct addr_filter *filt;
2592 int err;
2593
2594 addr_filters__init(&filts);
2595
2596 err = addr_filters__parse_bare_filter(&filts, filter);
2597 if (err)
2598 goto out_exit;
2599
2600 if (filts.cnt > max_nr) {
2601 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
2602 filts.cnt, max_nr);
2603 err = -EINVAL;
2604 goto out_exit;
2605 }
2606
2607 list_for_each_entry(filt, &filts.head, list) {
2608 char *new_filter;
2609
2610 err = addr_filter__resolve_syms(filt);
2611 if (err)
2612 goto out_exit;
2613
2614 new_filter = addr_filter__to_str(filt);
2615 if (!new_filter) {
2616 err = -ENOMEM;
2617 goto out_exit;
2618 }
2619
2620 if (evsel__append_addr_filter(evsel, new_filter)) {
2621 err = -ENOMEM;
2622 goto out_exit;
2623 }
2624 }
2625
2626out_exit:
2627 addr_filters__exit(&filts);
2628
2629 if (err) {
2630 pr_err("Failed to parse address filter: '%s'\n", filter);
2631 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
2632 pr_err("Where multiple filters are separated by space or comma.\n");
2633 }
2634
2635 return err;
2636}
2637
2638static int evsel__nr_addr_filter(struct evsel *evsel)
2639{
2640 struct perf_pmu *pmu = evsel__find_pmu(evsel);
2641 int nr_addr_filters = 0;
2642
2643 if (!pmu)
2644 return 0;
2645
2646 perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters);
2647
2648 return nr_addr_filters;
2649}
2650
2651int auxtrace_parse_filters(struct evlist *evlist)
2652{
2653 struct evsel *evsel;
2654 char *filter;
2655 int err, max_nr;
2656
2657 evlist__for_each_entry(evlist, evsel) {
2658 filter = evsel->filter;
2659 max_nr = evsel__nr_addr_filter(evsel);
2660 if (!filter || !max_nr)
2661 continue;
2662 evsel->filter = NULL;
2663 err = parse_addr_filter(evsel, filter, max_nr);
2664 free(filter);
2665 if (err)
2666 return err;
2667 pr_debug("Address filter: %s\n", evsel->filter);
2668 }
2669
2670 return 0;
2671}
2672
2673int auxtrace__process_event(struct perf_session *session, union perf_event *event,
2674 struct perf_sample *sample, struct perf_tool *tool)
2675{
2676 if (!session->auxtrace)
2677 return 0;
2678
2679 return session->auxtrace->process_event(session, event, sample, tool);
2680}
2681
2682void auxtrace__dump_auxtrace_sample(struct perf_session *session,
2683 struct perf_sample *sample)
2684{
2685 if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample ||
2686 auxtrace__dont_decode(session))
2687 return;
2688
2689 session->auxtrace->dump_auxtrace_sample(session, sample);
2690}
2691
2692int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool)
2693{
2694 if (!session->auxtrace)
2695 return 0;
2696
2697 return session->auxtrace->flush_events(session, tool);
2698}
2699
2700void auxtrace__free_events(struct perf_session *session)
2701{
2702 if (!session->auxtrace)
2703 return;
2704
2705 return session->auxtrace->free_events(session);
2706}
2707
2708void auxtrace__free(struct perf_session *session)
2709{
2710 if (!session->auxtrace)
2711 return;
2712
2713 return session->auxtrace->free(session);
2714}
2715
2716bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
2717 struct evsel *evsel)
2718{
2719 if (!session->auxtrace || !session->auxtrace->evsel_is_auxtrace)
2720 return false;
2721
2722 return session->auxtrace->evsel_is_auxtrace(session, evsel);
2723}
2724