1
2#include <errno.h>
3#include <inttypes.h>
4#include <linux/kernel.h>
5#include <linux/zalloc.h>
6#include <traceevent/event-parse.h>
7#include <api/fs/fs.h>
8
9#include <byteswap.h>
10#include <unistd.h>
11#include <sys/types.h>
12#include <sys/mman.h>
13
14#include "evlist.h"
15#include "evsel.h"
16#include "memswap.h"
17#include "map.h"
18#include "symbol.h"
19#include "session.h"
20#include "tool.h"
21#include "sort.h"
22#include "cpumap.h"
23#include "perf_regs.h"
24#include "asm/bug.h"
25#include "auxtrace.h"
26#include "thread.h"
27#include "thread-stack.h"
28#include "sample-raw.h"
29#include "stat.h"
30#include "arch/common.h"
31
32#ifdef HAVE_ZSTD_SUPPORT
33static int perf_session__process_compressed_event(struct perf_session *session,
34 union perf_event *event, u64 file_offset)
35{
36 void *src;
37 size_t decomp_size, src_size;
38 u64 decomp_last_rem = 0;
39 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
40 struct decomp *decomp, *decomp_last = session->decomp_last;
41
42 if (decomp_last) {
43 decomp_last_rem = decomp_last->size - decomp_last->head;
44 decomp_len += decomp_last_rem;
45 }
46
47 mmap_len = sizeof(struct decomp) + decomp_len;
48 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
49 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
50 if (decomp == MAP_FAILED) {
51 pr_err("Couldn't allocate memory for decompression\n");
52 return -1;
53 }
54
55 decomp->file_pos = file_offset;
56 decomp->mmap_len = mmap_len;
57 decomp->head = 0;
58
59 if (decomp_last_rem) {
60 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
61 decomp->size = decomp_last_rem;
62 }
63
64 src = (void *)event + sizeof(struct compressed_event);
65 src_size = event->pack.header.size - sizeof(struct compressed_event);
66
67 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
68 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
69 if (!decomp_size) {
70 munmap(decomp, mmap_len);
71 pr_err("Couldn't decompress data\n");
72 return -1;
73 }
74
75 decomp->size += decomp_size;
76
77 if (session->decomp == NULL) {
78 session->decomp = decomp;
79 session->decomp_last = decomp;
80 } else {
81 session->decomp_last->next = decomp;
82 session->decomp_last = decomp;
83 }
84
85 pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
86
87 return 0;
88}
89#else
90#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
91#endif
92
93static int perf_session__deliver_event(struct perf_session *session,
94 union perf_event *event,
95 struct perf_tool *tool,
96 u64 file_offset);
97
98static int perf_session__open(struct perf_session *session)
99{
100 struct perf_data *data = session->data;
101
102 if (perf_session__read_header(session) < 0) {
103 pr_err("incompatible file format (rerun with -v to learn more)\n");
104 return -1;
105 }
106
107 if (perf_data__is_pipe(data))
108 return 0;
109
110 if (perf_header__has_feat(&session->header, HEADER_STAT))
111 return 0;
112
113 if (!perf_evlist__valid_sample_type(session->evlist)) {
114 pr_err("non matching sample_type\n");
115 return -1;
116 }
117
118 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
119 pr_err("non matching sample_id_all\n");
120 return -1;
121 }
122
123 if (!perf_evlist__valid_read_format(session->evlist)) {
124 pr_err("non matching read_format\n");
125 return -1;
126 }
127
128 return 0;
129}
130
131void perf_session__set_id_hdr_size(struct perf_session *session)
132{
133 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
134
135 machines__set_id_hdr_size(&session->machines, id_hdr_size);
136}
137
138int perf_session__create_kernel_maps(struct perf_session *session)
139{
140 int ret = machine__create_kernel_maps(&session->machines.host);
141
142 if (ret >= 0)
143 ret = machines__create_guest_kernel_maps(&session->machines);
144 return ret;
145}
146
147static void perf_session__destroy_kernel_maps(struct perf_session *session)
148{
149 machines__destroy_kernel_maps(&session->machines);
150}
151
152static bool perf_session__has_comm_exec(struct perf_session *session)
153{
154 struct perf_evsel *evsel;
155
156 evlist__for_each_entry(session->evlist, evsel) {
157 if (evsel->attr.comm_exec)
158 return true;
159 }
160
161 return false;
162}
163
164static void perf_session__set_comm_exec(struct perf_session *session)
165{
166 bool comm_exec = perf_session__has_comm_exec(session);
167
168 machines__set_comm_exec(&session->machines, comm_exec);
169}
170
171static int ordered_events__deliver_event(struct ordered_events *oe,
172 struct ordered_event *event)
173{
174 struct perf_session *session = container_of(oe, struct perf_session,
175 ordered_events);
176
177 return perf_session__deliver_event(session, event->event,
178 session->tool, event->file_offset);
179}
180
181struct perf_session *perf_session__new(struct perf_data *data,
182 bool repipe, struct perf_tool *tool)
183{
184 struct perf_session *session = zalloc(sizeof(*session));
185
186 if (!session)
187 goto out;
188
189 session->repipe = repipe;
190 session->tool = tool;
191 INIT_LIST_HEAD(&session->auxtrace_index);
192 machines__init(&session->machines);
193 ordered_events__init(&session->ordered_events,
194 ordered_events__deliver_event, NULL);
195
196 perf_env__init(&session->header.env);
197 if (data) {
198 if (perf_data__open(data))
199 goto out_delete;
200
201 session->data = data;
202
203 if (perf_data__is_read(data)) {
204 if (perf_session__open(session) < 0)
205 goto out_delete;
206
207
208
209
210
211 if (!data->is_pipe) {
212 perf_session__set_id_hdr_size(session);
213 perf_session__set_comm_exec(session);
214 }
215
216 perf_evlist__init_trace_event_sample_raw(session->evlist);
217
218
219 if (data->is_dir && perf_data__open_dir(data))
220 goto out_delete;
221 }
222 } else {
223 session->machines.host.env = &perf_env;
224 }
225
226 session->machines.host.single_address_space =
227 perf_env__single_address_space(session->machines.host.env);
228
229 if (!data || perf_data__is_write(data)) {
230
231
232
233
234 if (perf_session__create_kernel_maps(session) < 0)
235 pr_warning("Cannot read kernel map\n");
236 }
237
238
239
240
241
242 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
243 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
244 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
245 tool->ordered_events = false;
246 }
247
248 return session;
249
250 out_delete:
251 perf_session__delete(session);
252 out:
253 return NULL;
254}
255
256static void perf_session__delete_threads(struct perf_session *session)
257{
258 machine__delete_threads(&session->machines.host);
259}
260
261static void perf_session__release_decomp_events(struct perf_session *session)
262{
263 struct decomp *next, *decomp;
264 size_t mmap_len;
265 next = session->decomp;
266 do {
267 decomp = next;
268 if (decomp == NULL)
269 break;
270 next = decomp->next;
271 mmap_len = decomp->mmap_len;
272 munmap(decomp, mmap_len);
273 } while (1);
274}
275
276void perf_session__delete(struct perf_session *session)
277{
278 if (session == NULL)
279 return;
280 auxtrace__free(session);
281 auxtrace_index__free(&session->auxtrace_index);
282 perf_session__destroy_kernel_maps(session);
283 perf_session__delete_threads(session);
284 perf_session__release_decomp_events(session);
285 perf_env__exit(&session->header.env);
286 machines__exit(&session->machines);
287 if (session->data)
288 perf_data__close(session->data);
289 free(session);
290}
291
292static int process_event_synth_tracing_data_stub(struct perf_session *session
293 __maybe_unused,
294 union perf_event *event
295 __maybe_unused)
296{
297 dump_printf(": unhandled!\n");
298 return 0;
299}
300
301static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
302 union perf_event *event __maybe_unused,
303 struct perf_evlist **pevlist
304 __maybe_unused)
305{
306 dump_printf(": unhandled!\n");
307 return 0;
308}
309
310static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
311 union perf_event *event __maybe_unused,
312 struct perf_evlist **pevlist
313 __maybe_unused)
314{
315 if (dump_trace)
316 perf_event__fprintf_event_update(event, stdout);
317
318 dump_printf(": unhandled!\n");
319 return 0;
320}
321
322static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
323 union perf_event *event __maybe_unused,
324 struct perf_sample *sample __maybe_unused,
325 struct perf_evsel *evsel __maybe_unused,
326 struct machine *machine __maybe_unused)
327{
328 dump_printf(": unhandled!\n");
329 return 0;
330}
331
332static int process_event_stub(struct perf_tool *tool __maybe_unused,
333 union perf_event *event __maybe_unused,
334 struct perf_sample *sample __maybe_unused,
335 struct machine *machine __maybe_unused)
336{
337 dump_printf(": unhandled!\n");
338 return 0;
339}
340
341static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
342 union perf_event *event __maybe_unused,
343 struct ordered_events *oe __maybe_unused)
344{
345 dump_printf(": unhandled!\n");
346 return 0;
347}
348
349static int process_finished_round(struct perf_tool *tool,
350 union perf_event *event,
351 struct ordered_events *oe);
352
353static int skipn(int fd, off_t n)
354{
355 char buf[4096];
356 ssize_t ret;
357
358 while (n > 0) {
359 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
360 if (ret <= 0)
361 return ret;
362 n -= ret;
363 }
364
365 return 0;
366}
367
368static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
369 union perf_event *event)
370{
371 dump_printf(": unhandled!\n");
372 if (perf_data__is_pipe(session->data))
373 skipn(perf_data__fd(session->data), event->auxtrace.size);
374 return event->auxtrace.size;
375}
376
377static int process_event_op2_stub(struct perf_session *session __maybe_unused,
378 union perf_event *event __maybe_unused)
379{
380 dump_printf(": unhandled!\n");
381 return 0;
382}
383
384
385static
386int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
387 union perf_event *event __maybe_unused)
388{
389 if (dump_trace)
390 perf_event__fprintf_thread_map(event, stdout);
391
392 dump_printf(": unhandled!\n");
393 return 0;
394}
395
396static
397int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
398 union perf_event *event __maybe_unused)
399{
400 if (dump_trace)
401 perf_event__fprintf_cpu_map(event, stdout);
402
403 dump_printf(": unhandled!\n");
404 return 0;
405}
406
407static
408int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
409 union perf_event *event __maybe_unused)
410{
411 if (dump_trace)
412 perf_event__fprintf_stat_config(event, stdout);
413
414 dump_printf(": unhandled!\n");
415 return 0;
416}
417
418static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
419 union perf_event *event)
420{
421 if (dump_trace)
422 perf_event__fprintf_stat(event, stdout);
423
424 dump_printf(": unhandled!\n");
425 return 0;
426}
427
428static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
429 union perf_event *event)
430{
431 if (dump_trace)
432 perf_event__fprintf_stat_round(event, stdout);
433
434 dump_printf(": unhandled!\n");
435 return 0;
436}
437
438static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
439 union perf_event *event __maybe_unused,
440 u64 file_offset __maybe_unused)
441{
442 dump_printf(": unhandled!\n");
443 return 0;
444}
445
446void perf_tool__fill_defaults(struct perf_tool *tool)
447{
448 if (tool->sample == NULL)
449 tool->sample = process_event_sample_stub;
450 if (tool->mmap == NULL)
451 tool->mmap = process_event_stub;
452 if (tool->mmap2 == NULL)
453 tool->mmap2 = process_event_stub;
454 if (tool->comm == NULL)
455 tool->comm = process_event_stub;
456 if (tool->namespaces == NULL)
457 tool->namespaces = process_event_stub;
458 if (tool->fork == NULL)
459 tool->fork = process_event_stub;
460 if (tool->exit == NULL)
461 tool->exit = process_event_stub;
462 if (tool->lost == NULL)
463 tool->lost = perf_event__process_lost;
464 if (tool->lost_samples == NULL)
465 tool->lost_samples = perf_event__process_lost_samples;
466 if (tool->aux == NULL)
467 tool->aux = perf_event__process_aux;
468 if (tool->itrace_start == NULL)
469 tool->itrace_start = perf_event__process_itrace_start;
470 if (tool->context_switch == NULL)
471 tool->context_switch = perf_event__process_switch;
472 if (tool->ksymbol == NULL)
473 tool->ksymbol = perf_event__process_ksymbol;
474 if (tool->bpf_event == NULL)
475 tool->bpf_event = perf_event__process_bpf_event;
476 if (tool->read == NULL)
477 tool->read = process_event_sample_stub;
478 if (tool->throttle == NULL)
479 tool->throttle = process_event_stub;
480 if (tool->unthrottle == NULL)
481 tool->unthrottle = process_event_stub;
482 if (tool->attr == NULL)
483 tool->attr = process_event_synth_attr_stub;
484 if (tool->event_update == NULL)
485 tool->event_update = process_event_synth_event_update_stub;
486 if (tool->tracing_data == NULL)
487 tool->tracing_data = process_event_synth_tracing_data_stub;
488 if (tool->build_id == NULL)
489 tool->build_id = process_event_op2_stub;
490 if (tool->finished_round == NULL) {
491 if (tool->ordered_events)
492 tool->finished_round = process_finished_round;
493 else
494 tool->finished_round = process_finished_round_stub;
495 }
496 if (tool->id_index == NULL)
497 tool->id_index = process_event_op2_stub;
498 if (tool->auxtrace_info == NULL)
499 tool->auxtrace_info = process_event_op2_stub;
500 if (tool->auxtrace == NULL)
501 tool->auxtrace = process_event_auxtrace_stub;
502 if (tool->auxtrace_error == NULL)
503 tool->auxtrace_error = process_event_op2_stub;
504 if (tool->thread_map == NULL)
505 tool->thread_map = process_event_thread_map_stub;
506 if (tool->cpu_map == NULL)
507 tool->cpu_map = process_event_cpu_map_stub;
508 if (tool->stat_config == NULL)
509 tool->stat_config = process_event_stat_config_stub;
510 if (tool->stat == NULL)
511 tool->stat = process_stat_stub;
512 if (tool->stat_round == NULL)
513 tool->stat_round = process_stat_round_stub;
514 if (tool->time_conv == NULL)
515 tool->time_conv = process_event_op2_stub;
516 if (tool->feature == NULL)
517 tool->feature = process_event_op2_stub;
518 if (tool->compressed == NULL)
519 tool->compressed = perf_session__process_compressed_event;
520}
521
522static void swap_sample_id_all(union perf_event *event, void *data)
523{
524 void *end = (void *) event + event->header.size;
525 int size = end - data;
526
527 BUG_ON(size % sizeof(u64));
528 mem_bswap_64(data, size);
529}
530
531static void perf_event__all64_swap(union perf_event *event,
532 bool sample_id_all __maybe_unused)
533{
534 struct perf_event_header *hdr = &event->header;
535 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
536}
537
538static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
539{
540 event->comm.pid = bswap_32(event->comm.pid);
541 event->comm.tid = bswap_32(event->comm.tid);
542
543 if (sample_id_all) {
544 void *data = &event->comm.comm;
545
546 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
547 swap_sample_id_all(event, data);
548 }
549}
550
551static void perf_event__mmap_swap(union perf_event *event,
552 bool sample_id_all)
553{
554 event->mmap.pid = bswap_32(event->mmap.pid);
555 event->mmap.tid = bswap_32(event->mmap.tid);
556 event->mmap.start = bswap_64(event->mmap.start);
557 event->mmap.len = bswap_64(event->mmap.len);
558 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
559
560 if (sample_id_all) {
561 void *data = &event->mmap.filename;
562
563 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
564 swap_sample_id_all(event, data);
565 }
566}
567
568static void perf_event__mmap2_swap(union perf_event *event,
569 bool sample_id_all)
570{
571 event->mmap2.pid = bswap_32(event->mmap2.pid);
572 event->mmap2.tid = bswap_32(event->mmap2.tid);
573 event->mmap2.start = bswap_64(event->mmap2.start);
574 event->mmap2.len = bswap_64(event->mmap2.len);
575 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
576 event->mmap2.maj = bswap_32(event->mmap2.maj);
577 event->mmap2.min = bswap_32(event->mmap2.min);
578 event->mmap2.ino = bswap_64(event->mmap2.ino);
579
580 if (sample_id_all) {
581 void *data = &event->mmap2.filename;
582
583 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
584 swap_sample_id_all(event, data);
585 }
586}
587static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
588{
589 event->fork.pid = bswap_32(event->fork.pid);
590 event->fork.tid = bswap_32(event->fork.tid);
591 event->fork.ppid = bswap_32(event->fork.ppid);
592 event->fork.ptid = bswap_32(event->fork.ptid);
593 event->fork.time = bswap_64(event->fork.time);
594
595 if (sample_id_all)
596 swap_sample_id_all(event, &event->fork + 1);
597}
598
599static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
600{
601 event->read.pid = bswap_32(event->read.pid);
602 event->read.tid = bswap_32(event->read.tid);
603 event->read.value = bswap_64(event->read.value);
604 event->read.time_enabled = bswap_64(event->read.time_enabled);
605 event->read.time_running = bswap_64(event->read.time_running);
606 event->read.id = bswap_64(event->read.id);
607
608 if (sample_id_all)
609 swap_sample_id_all(event, &event->read + 1);
610}
611
612static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
613{
614 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
615 event->aux.aux_size = bswap_64(event->aux.aux_size);
616 event->aux.flags = bswap_64(event->aux.flags);
617
618 if (sample_id_all)
619 swap_sample_id_all(event, &event->aux + 1);
620}
621
622static void perf_event__itrace_start_swap(union perf_event *event,
623 bool sample_id_all)
624{
625 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
626 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
627
628 if (sample_id_all)
629 swap_sample_id_all(event, &event->itrace_start + 1);
630}
631
632static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
633{
634 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
635 event->context_switch.next_prev_pid =
636 bswap_32(event->context_switch.next_prev_pid);
637 event->context_switch.next_prev_tid =
638 bswap_32(event->context_switch.next_prev_tid);
639 }
640
641 if (sample_id_all)
642 swap_sample_id_all(event, &event->context_switch + 1);
643}
644
645static void perf_event__throttle_swap(union perf_event *event,
646 bool sample_id_all)
647{
648 event->throttle.time = bswap_64(event->throttle.time);
649 event->throttle.id = bswap_64(event->throttle.id);
650 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
651
652 if (sample_id_all)
653 swap_sample_id_all(event, &event->throttle + 1);
654}
655
656static void perf_event__namespaces_swap(union perf_event *event,
657 bool sample_id_all)
658{
659 u64 i;
660
661 event->namespaces.pid = bswap_32(event->namespaces.pid);
662 event->namespaces.tid = bswap_32(event->namespaces.tid);
663 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
664
665 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
666 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
667
668 ns->dev = bswap_64(ns->dev);
669 ns->ino = bswap_64(ns->ino);
670 }
671
672 if (sample_id_all)
673 swap_sample_id_all(event, &event->namespaces.link_info[i]);
674}
675
676static u8 revbyte(u8 b)
677{
678 int rev = (b >> 4) | ((b & 0xf) << 4);
679 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
680 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
681 return (u8) rev;
682}
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698static void swap_bitfield(u8 *p, unsigned len)
699{
700 unsigned i;
701
702 for (i = 0; i < len; i++) {
703 *p = revbyte(*p);
704 p++;
705 }
706}
707
708
709void perf_event__attr_swap(struct perf_event_attr *attr)
710{
711 attr->type = bswap_32(attr->type);
712 attr->size = bswap_32(attr->size);
713
714#define bswap_safe(f, n) \
715 (attr->size > (offsetof(struct perf_event_attr, f) + \
716 sizeof(attr->f) * (n)))
717#define bswap_field(f, sz) \
718do { \
719 if (bswap_safe(f, 0)) \
720 attr->f = bswap_##sz(attr->f); \
721} while(0)
722#define bswap_field_16(f) bswap_field(f, 16)
723#define bswap_field_32(f) bswap_field(f, 32)
724#define bswap_field_64(f) bswap_field(f, 64)
725
726 bswap_field_64(config);
727 bswap_field_64(sample_period);
728 bswap_field_64(sample_type);
729 bswap_field_64(read_format);
730 bswap_field_32(wakeup_events);
731 bswap_field_32(bp_type);
732 bswap_field_64(bp_addr);
733 bswap_field_64(bp_len);
734 bswap_field_64(branch_sample_type);
735 bswap_field_64(sample_regs_user);
736 bswap_field_32(sample_stack_user);
737 bswap_field_32(aux_watermark);
738 bswap_field_16(sample_max_stack);
739
740
741
742
743
744 if (bswap_safe(read_format, 1))
745 swap_bitfield((u8 *) (&attr->read_format + 1),
746 sizeof(u64));
747#undef bswap_field_64
748#undef bswap_field_32
749#undef bswap_field
750#undef bswap_safe
751}
752
753static void perf_event__hdr_attr_swap(union perf_event *event,
754 bool sample_id_all __maybe_unused)
755{
756 size_t size;
757
758 perf_event__attr_swap(&event->attr.attr);
759
760 size = event->header.size;
761 size -= (void *)&event->attr.id - (void *)event;
762 mem_bswap_64(event->attr.id, size);
763}
764
765static void perf_event__event_update_swap(union perf_event *event,
766 bool sample_id_all __maybe_unused)
767{
768 event->event_update.type = bswap_64(event->event_update.type);
769 event->event_update.id = bswap_64(event->event_update.id);
770}
771
772static void perf_event__event_type_swap(union perf_event *event,
773 bool sample_id_all __maybe_unused)
774{
775 event->event_type.event_type.event_id =
776 bswap_64(event->event_type.event_type.event_id);
777}
778
779static void perf_event__tracing_data_swap(union perf_event *event,
780 bool sample_id_all __maybe_unused)
781{
782 event->tracing_data.size = bswap_32(event->tracing_data.size);
783}
784
785static void perf_event__auxtrace_info_swap(union perf_event *event,
786 bool sample_id_all __maybe_unused)
787{
788 size_t size;
789
790 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
791
792 size = event->header.size;
793 size -= (void *)&event->auxtrace_info.priv - (void *)event;
794 mem_bswap_64(event->auxtrace_info.priv, size);
795}
796
797static void perf_event__auxtrace_swap(union perf_event *event,
798 bool sample_id_all __maybe_unused)
799{
800 event->auxtrace.size = bswap_64(event->auxtrace.size);
801 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
802 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
803 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
804 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
805 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
806}
807
808static void perf_event__auxtrace_error_swap(union perf_event *event,
809 bool sample_id_all __maybe_unused)
810{
811 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
812 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
813 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
814 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
815 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
816 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
817 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
818 if (event->auxtrace_error.fmt)
819 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
820}
821
822static void perf_event__thread_map_swap(union perf_event *event,
823 bool sample_id_all __maybe_unused)
824{
825 unsigned i;
826
827 event->thread_map.nr = bswap_64(event->thread_map.nr);
828
829 for (i = 0; i < event->thread_map.nr; i++)
830 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
831}
832
833static void perf_event__cpu_map_swap(union perf_event *event,
834 bool sample_id_all __maybe_unused)
835{
836 struct cpu_map_data *data = &event->cpu_map.data;
837 struct cpu_map_entries *cpus;
838 struct cpu_map_mask *mask;
839 unsigned i;
840
841 data->type = bswap_64(data->type);
842
843 switch (data->type) {
844 case PERF_CPU_MAP__CPUS:
845 cpus = (struct cpu_map_entries *)data->data;
846
847 cpus->nr = bswap_16(cpus->nr);
848
849 for (i = 0; i < cpus->nr; i++)
850 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
851 break;
852 case PERF_CPU_MAP__MASK:
853 mask = (struct cpu_map_mask *) data->data;
854
855 mask->nr = bswap_16(mask->nr);
856 mask->long_size = bswap_16(mask->long_size);
857
858 switch (mask->long_size) {
859 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
860 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
861 default:
862 pr_err("cpu_map swap: unsupported long size\n");
863 }
864 default:
865 break;
866 }
867}
868
869static void perf_event__stat_config_swap(union perf_event *event,
870 bool sample_id_all __maybe_unused)
871{
872 u64 size;
873
874 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
875 size += 1;
876 mem_bswap_64(&event->stat_config.nr, size);
877}
878
879static void perf_event__stat_swap(union perf_event *event,
880 bool sample_id_all __maybe_unused)
881{
882 event->stat.id = bswap_64(event->stat.id);
883 event->stat.thread = bswap_32(event->stat.thread);
884 event->stat.cpu = bswap_32(event->stat.cpu);
885 event->stat.val = bswap_64(event->stat.val);
886 event->stat.ena = bswap_64(event->stat.ena);
887 event->stat.run = bswap_64(event->stat.run);
888}
889
890static void perf_event__stat_round_swap(union perf_event *event,
891 bool sample_id_all __maybe_unused)
892{
893 event->stat_round.type = bswap_64(event->stat_round.type);
894 event->stat_round.time = bswap_64(event->stat_round.time);
895}
896
897typedef void (*perf_event__swap_op)(union perf_event *event,
898 bool sample_id_all);
899
900static perf_event__swap_op perf_event__swap_ops[] = {
901 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
902 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
903 [PERF_RECORD_COMM] = perf_event__comm_swap,
904 [PERF_RECORD_FORK] = perf_event__task_swap,
905 [PERF_RECORD_EXIT] = perf_event__task_swap,
906 [PERF_RECORD_LOST] = perf_event__all64_swap,
907 [PERF_RECORD_READ] = perf_event__read_swap,
908 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
909 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
910 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
911 [PERF_RECORD_AUX] = perf_event__aux_swap,
912 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
913 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
914 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
915 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
916 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
917 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
918 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
919 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
920 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
921 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
922 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
923 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
924 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
925 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
926 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
927 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
928 [PERF_RECORD_STAT] = perf_event__stat_swap,
929 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
930 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
931 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
932 [PERF_RECORD_HEADER_MAX] = NULL,
933};
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974static int process_finished_round(struct perf_tool *tool __maybe_unused,
975 union perf_event *event __maybe_unused,
976 struct ordered_events *oe)
977{
978 if (dump_trace)
979 fprintf(stdout, "\n");
980 return ordered_events__flush(oe, OE_FLUSH__ROUND);
981}
982
983int perf_session__queue_event(struct perf_session *s, union perf_event *event,
984 u64 timestamp, u64 file_offset)
985{
986 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
987}
988
989static void callchain__lbr_callstack_printf(struct perf_sample *sample)
990{
991 struct ip_callchain *callchain = sample->callchain;
992 struct branch_stack *lbr_stack = sample->branch_stack;
993 u64 kernel_callchain_nr = callchain->nr;
994 unsigned int i;
995
996 for (i = 0; i < kernel_callchain_nr; i++) {
997 if (callchain->ips[i] == PERF_CONTEXT_USER)
998 break;
999 }
1000
1001 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1002 u64 total_nr;
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019 total_nr = i + 1 + lbr_stack->nr + 1;
1020 kernel_callchain_nr = i + 1;
1021
1022 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1023
1024 for (i = 0; i < kernel_callchain_nr; i++)
1025 printf("..... %2d: %016" PRIx64 "\n",
1026 i, callchain->ips[i]);
1027
1028 printf("..... %2d: %016" PRIx64 "\n",
1029 (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
1030 for (i = 0; i < lbr_stack->nr; i++)
1031 printf("..... %2d: %016" PRIx64 "\n",
1032 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
1033 }
1034}
1035
1036static void callchain__printf(struct perf_evsel *evsel,
1037 struct perf_sample *sample)
1038{
1039 unsigned int i;
1040 struct ip_callchain *callchain = sample->callchain;
1041
1042 if (perf_evsel__has_branch_callstack(evsel))
1043 callchain__lbr_callstack_printf(sample);
1044
1045 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1046
1047 for (i = 0; i < callchain->nr; i++)
1048 printf("..... %2d: %016" PRIx64 "\n",
1049 i, callchain->ips[i]);
1050}
1051
1052static void branch_stack__printf(struct perf_sample *sample)
1053{
1054 uint64_t i;
1055
1056 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
1057
1058 for (i = 0; i < sample->branch_stack->nr; i++) {
1059 struct branch_entry *e = &sample->branch_stack->entries[i];
1060
1061 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1062 i, e->from, e->to,
1063 (unsigned short)e->flags.cycles,
1064 e->flags.mispred ? "M" : " ",
1065 e->flags.predicted ? "P" : " ",
1066 e->flags.abort ? "A" : " ",
1067 e->flags.in_tx ? "T" : " ",
1068 (unsigned)e->flags.reserved);
1069 }
1070}
1071
1072static void regs_dump__printf(u64 mask, u64 *regs)
1073{
1074 unsigned rid, i = 0;
1075
1076 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1077 u64 val = regs[i++];
1078
1079 printf(".... %-5s 0x%" PRIx64 "\n",
1080 perf_reg_name(rid), val);
1081 }
1082}
1083
1084static const char *regs_abi[] = {
1085 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1086 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1087 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1088};
1089
1090static inline const char *regs_dump_abi(struct regs_dump *d)
1091{
1092 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1093 return "unknown";
1094
1095 return regs_abi[d->abi];
1096}
1097
1098static void regs__printf(const char *type, struct regs_dump *regs)
1099{
1100 u64 mask = regs->mask;
1101
1102 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1103 type,
1104 mask,
1105 regs_dump_abi(regs));
1106
1107 regs_dump__printf(mask, regs->regs);
1108}
1109
1110static void regs_user__printf(struct perf_sample *sample)
1111{
1112 struct regs_dump *user_regs = &sample->user_regs;
1113
1114 if (user_regs->regs)
1115 regs__printf("user", user_regs);
1116}
1117
1118static void regs_intr__printf(struct perf_sample *sample)
1119{
1120 struct regs_dump *intr_regs = &sample->intr_regs;
1121
1122 if (intr_regs->regs)
1123 regs__printf("intr", intr_regs);
1124}
1125
1126static void stack_user__printf(struct stack_dump *dump)
1127{
1128 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1129 dump->size, dump->offset);
1130}
1131
1132static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
1133 union perf_event *event,
1134 struct perf_sample *sample)
1135{
1136 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1137
1138 if (event->header.type != PERF_RECORD_SAMPLE &&
1139 !perf_evlist__sample_id_all(evlist)) {
1140 fputs("-1 -1 ", stdout);
1141 return;
1142 }
1143
1144 if ((sample_type & PERF_SAMPLE_CPU))
1145 printf("%u ", sample->cpu);
1146
1147 if (sample_type & PERF_SAMPLE_TIME)
1148 printf("%" PRIu64 " ", sample->time);
1149}
1150
1151static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1152{
1153 printf("... sample_read:\n");
1154
1155 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1156 printf("...... time enabled %016" PRIx64 "\n",
1157 sample->read.time_enabled);
1158
1159 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1160 printf("...... time running %016" PRIx64 "\n",
1161 sample->read.time_running);
1162
1163 if (read_format & PERF_FORMAT_GROUP) {
1164 u64 i;
1165
1166 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1167
1168 for (i = 0; i < sample->read.group.nr; i++) {
1169 struct sample_read_value *value;
1170
1171 value = &sample->read.group.values[i];
1172 printf("..... id %016" PRIx64
1173 ", value %016" PRIx64 "\n",
1174 value->id, value->value);
1175 }
1176 } else
1177 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1178 sample->read.one.id, sample->read.one.value);
1179}
1180
1181static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1182 u64 file_offset, struct perf_sample *sample)
1183{
1184 if (!dump_trace)
1185 return;
1186
1187 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1188 file_offset, event->header.size, event->header.type);
1189
1190 trace_event(event);
1191 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1192 evlist->trace_event_sample_raw(evlist, event, sample);
1193
1194 if (sample)
1195 perf_evlist__print_tstamp(evlist, event, sample);
1196
1197 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1198 event->header.size, perf_event__name(event->header.type));
1199}
1200
1201static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1202 struct perf_sample *sample)
1203{
1204 u64 sample_type;
1205
1206 if (!dump_trace)
1207 return;
1208
1209 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1210 event->header.misc, sample->pid, sample->tid, sample->ip,
1211 sample->period, sample->addr);
1212
1213 sample_type = evsel->attr.sample_type;
1214
1215 if (evsel__has_callchain(evsel))
1216 callchain__printf(evsel, sample);
1217
1218 if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1219 branch_stack__printf(sample);
1220
1221 if (sample_type & PERF_SAMPLE_REGS_USER)
1222 regs_user__printf(sample);
1223
1224 if (sample_type & PERF_SAMPLE_REGS_INTR)
1225 regs_intr__printf(sample);
1226
1227 if (sample_type & PERF_SAMPLE_STACK_USER)
1228 stack_user__printf(&sample->user_stack);
1229
1230 if (sample_type & PERF_SAMPLE_WEIGHT)
1231 printf("... weight: %" PRIu64 "\n", sample->weight);
1232
1233 if (sample_type & PERF_SAMPLE_DATA_SRC)
1234 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1235
1236 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1237 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1238
1239 if (sample_type & PERF_SAMPLE_TRANSACTION)
1240 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1241
1242 if (sample_type & PERF_SAMPLE_READ)
1243 sample_read__printf(sample, evsel->attr.read_format);
1244}
1245
1246static void dump_read(struct perf_evsel *evsel, union perf_event *event)
1247{
1248 struct read_event *read_event = &event->read;
1249 u64 read_format;
1250
1251 if (!dump_trace)
1252 return;
1253
1254 printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
1255 perf_evsel__name(evsel),
1256 event->read.value);
1257
1258 if (!evsel)
1259 return;
1260
1261 read_format = evsel->attr.read_format;
1262
1263 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1264 printf("... time enabled : %" PRIu64 "\n", read_event->time_enabled);
1265
1266 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1267 printf("... time running : %" PRIu64 "\n", read_event->time_running);
1268
1269 if (read_format & PERF_FORMAT_ID)
1270 printf("... id : %" PRIu64 "\n", read_event->id);
1271}
1272
1273static struct machine *machines__find_for_cpumode(struct machines *machines,
1274 union perf_event *event,
1275 struct perf_sample *sample)
1276{
1277 struct machine *machine;
1278
1279 if (perf_guest &&
1280 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1281 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1282 u32 pid;
1283
1284 if (event->header.type == PERF_RECORD_MMAP
1285 || event->header.type == PERF_RECORD_MMAP2)
1286 pid = event->mmap.pid;
1287 else
1288 pid = sample->pid;
1289
1290 machine = machines__find(machines, pid);
1291 if (!machine)
1292 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1293 return machine;
1294 }
1295
1296 return &machines->host;
1297}
1298
1299static int deliver_sample_value(struct perf_evlist *evlist,
1300 struct perf_tool *tool,
1301 union perf_event *event,
1302 struct perf_sample *sample,
1303 struct sample_read_value *v,
1304 struct machine *machine)
1305{
1306 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1307
1308 if (sid) {
1309 sample->id = v->id;
1310 sample->period = v->value - sid->period;
1311 sid->period = v->value;
1312 }
1313
1314 if (!sid || sid->evsel == NULL) {
1315 ++evlist->stats.nr_unknown_id;
1316 return 0;
1317 }
1318
1319
1320
1321
1322
1323 if (!sample->period)
1324 return 0;
1325
1326 return tool->sample(tool, event, sample, sid->evsel, machine);
1327}
1328
1329static int deliver_sample_group(struct perf_evlist *evlist,
1330 struct perf_tool *tool,
1331 union perf_event *event,
1332 struct perf_sample *sample,
1333 struct machine *machine)
1334{
1335 int ret = -EINVAL;
1336 u64 i;
1337
1338 for (i = 0; i < sample->read.group.nr; i++) {
1339 ret = deliver_sample_value(evlist, tool, event, sample,
1340 &sample->read.group.values[i],
1341 machine);
1342 if (ret)
1343 break;
1344 }
1345
1346 return ret;
1347}
1348
1349static int
1350 perf_evlist__deliver_sample(struct perf_evlist *evlist,
1351 struct perf_tool *tool,
1352 union perf_event *event,
1353 struct perf_sample *sample,
1354 struct perf_evsel *evsel,
1355 struct machine *machine)
1356{
1357
1358 u64 sample_type = evsel->attr.sample_type;
1359 u64 read_format = evsel->attr.read_format;
1360
1361
1362 if (!(sample_type & PERF_SAMPLE_READ))
1363 return tool->sample(tool, event, sample, evsel, machine);
1364
1365
1366 if (read_format & PERF_FORMAT_GROUP)
1367 return deliver_sample_group(evlist, tool, event, sample,
1368 machine);
1369 else
1370 return deliver_sample_value(evlist, tool, event, sample,
1371 &sample->read.one, machine);
1372}
1373
1374static int machines__deliver_event(struct machines *machines,
1375 struct perf_evlist *evlist,
1376 union perf_event *event,
1377 struct perf_sample *sample,
1378 struct perf_tool *tool, u64 file_offset)
1379{
1380 struct perf_evsel *evsel;
1381 struct machine *machine;
1382
1383 dump_event(evlist, event, file_offset, sample);
1384
1385 evsel = perf_evlist__id2evsel(evlist, sample->id);
1386
1387 machine = machines__find_for_cpumode(machines, event, sample);
1388
1389 switch (event->header.type) {
1390 case PERF_RECORD_SAMPLE:
1391 if (evsel == NULL) {
1392 ++evlist->stats.nr_unknown_id;
1393 return 0;
1394 }
1395 dump_sample(evsel, event, sample);
1396 if (machine == NULL) {
1397 ++evlist->stats.nr_unprocessable_samples;
1398 return 0;
1399 }
1400 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1401 case PERF_RECORD_MMAP:
1402 return tool->mmap(tool, event, sample, machine);
1403 case PERF_RECORD_MMAP2:
1404 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1405 ++evlist->stats.nr_proc_map_timeout;
1406 return tool->mmap2(tool, event, sample, machine);
1407 case PERF_RECORD_COMM:
1408 return tool->comm(tool, event, sample, machine);
1409 case PERF_RECORD_NAMESPACES:
1410 return tool->namespaces(tool, event, sample, machine);
1411 case PERF_RECORD_FORK:
1412 return tool->fork(tool, event, sample, machine);
1413 case PERF_RECORD_EXIT:
1414 return tool->exit(tool, event, sample, machine);
1415 case PERF_RECORD_LOST:
1416 if (tool->lost == perf_event__process_lost)
1417 evlist->stats.total_lost += event->lost.lost;
1418 return tool->lost(tool, event, sample, machine);
1419 case PERF_RECORD_LOST_SAMPLES:
1420 if (tool->lost_samples == perf_event__process_lost_samples)
1421 evlist->stats.total_lost_samples += event->lost_samples.lost;
1422 return tool->lost_samples(tool, event, sample, machine);
1423 case PERF_RECORD_READ:
1424 dump_read(evsel, event);
1425 return tool->read(tool, event, sample, evsel, machine);
1426 case PERF_RECORD_THROTTLE:
1427 return tool->throttle(tool, event, sample, machine);
1428 case PERF_RECORD_UNTHROTTLE:
1429 return tool->unthrottle(tool, event, sample, machine);
1430 case PERF_RECORD_AUX:
1431 if (tool->aux == perf_event__process_aux) {
1432 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1433 evlist->stats.total_aux_lost += 1;
1434 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1435 evlist->stats.total_aux_partial += 1;
1436 }
1437 return tool->aux(tool, event, sample, machine);
1438 case PERF_RECORD_ITRACE_START:
1439 return tool->itrace_start(tool, event, sample, machine);
1440 case PERF_RECORD_SWITCH:
1441 case PERF_RECORD_SWITCH_CPU_WIDE:
1442 return tool->context_switch(tool, event, sample, machine);
1443 case PERF_RECORD_KSYMBOL:
1444 return tool->ksymbol(tool, event, sample, machine);
1445 case PERF_RECORD_BPF_EVENT:
1446 return tool->bpf_event(tool, event, sample, machine);
1447 default:
1448 ++evlist->stats.nr_unknown_events;
1449 return -1;
1450 }
1451}
1452
1453static int perf_session__deliver_event(struct perf_session *session,
1454 union perf_event *event,
1455 struct perf_tool *tool,
1456 u64 file_offset)
1457{
1458 struct perf_sample sample;
1459 int ret;
1460
1461 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1462 if (ret) {
1463 pr_err("Can't parse sample, err = %d\n", ret);
1464 return ret;
1465 }
1466
1467 ret = auxtrace__process_event(session, event, &sample, tool);
1468 if (ret < 0)
1469 return ret;
1470 if (ret > 0)
1471 return 0;
1472
1473 return machines__deliver_event(&session->machines, session->evlist,
1474 event, &sample, tool, file_offset);
1475}
1476
1477static s64 perf_session__process_user_event(struct perf_session *session,
1478 union perf_event *event,
1479 u64 file_offset)
1480{
1481 struct ordered_events *oe = &session->ordered_events;
1482 struct perf_tool *tool = session->tool;
1483 struct perf_sample sample = { .time = 0, };
1484 int fd = perf_data__fd(session->data);
1485 int err;
1486
1487 if (event->header.type != PERF_RECORD_COMPRESSED ||
1488 tool->compressed == perf_session__process_compressed_event_stub)
1489 dump_event(session->evlist, event, file_offset, &sample);
1490
1491
1492 switch (event->header.type) {
1493 case PERF_RECORD_HEADER_ATTR:
1494 err = tool->attr(tool, event, &session->evlist);
1495 if (err == 0) {
1496 perf_session__set_id_hdr_size(session);
1497 perf_session__set_comm_exec(session);
1498 }
1499 return err;
1500 case PERF_RECORD_EVENT_UPDATE:
1501 return tool->event_update(tool, event, &session->evlist);
1502 case PERF_RECORD_HEADER_EVENT_TYPE:
1503
1504
1505
1506
1507 return 0;
1508 case PERF_RECORD_HEADER_TRACING_DATA:
1509
1510 lseek(fd, file_offset, SEEK_SET);
1511 return tool->tracing_data(session, event);
1512 case PERF_RECORD_HEADER_BUILD_ID:
1513 return tool->build_id(session, event);
1514 case PERF_RECORD_FINISHED_ROUND:
1515 return tool->finished_round(tool, event, oe);
1516 case PERF_RECORD_ID_INDEX:
1517 return tool->id_index(session, event);
1518 case PERF_RECORD_AUXTRACE_INFO:
1519 return tool->auxtrace_info(session, event);
1520 case PERF_RECORD_AUXTRACE:
1521
1522 lseek(fd, file_offset + event->header.size, SEEK_SET);
1523 return tool->auxtrace(session, event);
1524 case PERF_RECORD_AUXTRACE_ERROR:
1525 perf_session__auxtrace_error_inc(session, event);
1526 return tool->auxtrace_error(session, event);
1527 case PERF_RECORD_THREAD_MAP:
1528 return tool->thread_map(session, event);
1529 case PERF_RECORD_CPU_MAP:
1530 return tool->cpu_map(session, event);
1531 case PERF_RECORD_STAT_CONFIG:
1532 return tool->stat_config(session, event);
1533 case PERF_RECORD_STAT:
1534 return tool->stat(session, event);
1535 case PERF_RECORD_STAT_ROUND:
1536 return tool->stat_round(session, event);
1537 case PERF_RECORD_TIME_CONV:
1538 session->time_conv = event->time_conv;
1539 return tool->time_conv(session, event);
1540 case PERF_RECORD_HEADER_FEATURE:
1541 return tool->feature(session, event);
1542 case PERF_RECORD_COMPRESSED:
1543 err = tool->compressed(session, event, file_offset);
1544 if (err)
1545 dump_event(session->evlist, event, file_offset, &sample);
1546 return err;
1547 default:
1548 return -EINVAL;
1549 }
1550}
1551
1552int perf_session__deliver_synth_event(struct perf_session *session,
1553 union perf_event *event,
1554 struct perf_sample *sample)
1555{
1556 struct perf_evlist *evlist = session->evlist;
1557 struct perf_tool *tool = session->tool;
1558
1559 events_stats__inc(&evlist->stats, event->header.type);
1560
1561 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1562 return perf_session__process_user_event(session, event, 0);
1563
1564 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1565}
1566
1567static void event_swap(union perf_event *event, bool sample_id_all)
1568{
1569 perf_event__swap_op swap;
1570
1571 swap = perf_event__swap_ops[event->header.type];
1572 if (swap)
1573 swap(event, sample_id_all);
1574}
1575
1576int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1577 void *buf, size_t buf_sz,
1578 union perf_event **event_ptr,
1579 struct perf_sample *sample)
1580{
1581 union perf_event *event;
1582 size_t hdr_sz, rest;
1583 int fd;
1584
1585 if (session->one_mmap && !session->header.needs_swap) {
1586 event = file_offset - session->one_mmap_offset +
1587 session->one_mmap_addr;
1588 goto out_parse_sample;
1589 }
1590
1591 if (perf_data__is_pipe(session->data))
1592 return -1;
1593
1594 fd = perf_data__fd(session->data);
1595 hdr_sz = sizeof(struct perf_event_header);
1596
1597 if (buf_sz < hdr_sz)
1598 return -1;
1599
1600 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1601 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1602 return -1;
1603
1604 event = (union perf_event *)buf;
1605
1606 if (session->header.needs_swap)
1607 perf_event_header__bswap(&event->header);
1608
1609 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1610 return -1;
1611
1612 rest = event->header.size - hdr_sz;
1613
1614 if (readn(fd, buf, rest) != (ssize_t)rest)
1615 return -1;
1616
1617 if (session->header.needs_swap)
1618 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1619
1620out_parse_sample:
1621
1622 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1623 perf_evlist__parse_sample(session->evlist, event, sample))
1624 return -1;
1625
1626 *event_ptr = event;
1627
1628 return 0;
1629}
1630
1631static s64 perf_session__process_event(struct perf_session *session,
1632 union perf_event *event, u64 file_offset)
1633{
1634 struct perf_evlist *evlist = session->evlist;
1635 struct perf_tool *tool = session->tool;
1636 int ret;
1637
1638 if (session->header.needs_swap)
1639 event_swap(event, perf_evlist__sample_id_all(evlist));
1640
1641 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1642 return -EINVAL;
1643
1644 events_stats__inc(&evlist->stats, event->header.type);
1645
1646 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1647 return perf_session__process_user_event(session, event, file_offset);
1648
1649 if (tool->ordered_events) {
1650 u64 timestamp = -1ULL;
1651
1652 ret = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp);
1653 if (ret && ret != -1)
1654 return ret;
1655
1656 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1657 if (ret != -ETIME)
1658 return ret;
1659 }
1660
1661 return perf_session__deliver_event(session, event, tool, file_offset);
1662}
1663
1664void perf_event_header__bswap(struct perf_event_header *hdr)
1665{
1666 hdr->type = bswap_32(hdr->type);
1667 hdr->misc = bswap_16(hdr->misc);
1668 hdr->size = bswap_16(hdr->size);
1669}
1670
1671struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1672{
1673 return machine__findnew_thread(&session->machines.host, -1, pid);
1674}
1675
1676
1677
1678
1679
1680
1681
1682
1683int perf_session__register_idle_thread(struct perf_session *session)
1684{
1685 struct thread *thread;
1686 int err = 0;
1687
1688 thread = machine__findnew_thread(&session->machines.host, 0, 0);
1689 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1690 pr_err("problem inserting idle task.\n");
1691 err = -1;
1692 }
1693
1694 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1695 pr_err("problem inserting idle task.\n");
1696 err = -1;
1697 }
1698
1699
1700 thread__put(thread);
1701 return err;
1702}
1703
1704static void
1705perf_session__warn_order(const struct perf_session *session)
1706{
1707 const struct ordered_events *oe = &session->ordered_events;
1708 struct perf_evsel *evsel;
1709 bool should_warn = true;
1710
1711 evlist__for_each_entry(session->evlist, evsel) {
1712 if (evsel->attr.write_backward)
1713 should_warn = false;
1714 }
1715
1716 if (!should_warn)
1717 return;
1718 if (oe->nr_unordered_events != 0)
1719 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1720}
1721
1722static void perf_session__warn_about_errors(const struct perf_session *session)
1723{
1724 const struct events_stats *stats = &session->evlist->stats;
1725
1726 if (session->tool->lost == perf_event__process_lost &&
1727 stats->nr_events[PERF_RECORD_LOST] != 0) {
1728 ui__warning("Processed %d events and lost %d chunks!\n\n"
1729 "Check IO/CPU overload!\n\n",
1730 stats->nr_events[0],
1731 stats->nr_events[PERF_RECORD_LOST]);
1732 }
1733
1734 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1735 double drop_rate;
1736
1737 drop_rate = (double)stats->total_lost_samples /
1738 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1739 if (drop_rate > 0.05) {
1740 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1741 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1742 drop_rate * 100.0);
1743 }
1744 }
1745
1746 if (session->tool->aux == perf_event__process_aux &&
1747 stats->total_aux_lost != 0) {
1748 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1749 stats->total_aux_lost,
1750 stats->nr_events[PERF_RECORD_AUX]);
1751 }
1752
1753 if (session->tool->aux == perf_event__process_aux &&
1754 stats->total_aux_partial != 0) {
1755 bool vmm_exclusive = false;
1756
1757 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1758 &vmm_exclusive);
1759
1760 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1761 "Are you running a KVM guest in the background?%s\n\n",
1762 stats->total_aux_partial,
1763 stats->nr_events[PERF_RECORD_AUX],
1764 vmm_exclusive ?
1765 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1766 "will reduce the gaps to only guest's timeslices." :
1767 "");
1768 }
1769
1770 if (stats->nr_unknown_events != 0) {
1771 ui__warning("Found %u unknown events!\n\n"
1772 "Is this an older tool processing a perf.data "
1773 "file generated by a more recent tool?\n\n"
1774 "If that is not the case, consider "
1775 "reporting to linux-kernel@vger.kernel.org.\n\n",
1776 stats->nr_unknown_events);
1777 }
1778
1779 if (stats->nr_unknown_id != 0) {
1780 ui__warning("%u samples with id not present in the header\n",
1781 stats->nr_unknown_id);
1782 }
1783
1784 if (stats->nr_invalid_chains != 0) {
1785 ui__warning("Found invalid callchains!\n\n"
1786 "%u out of %u events were discarded for this reason.\n\n"
1787 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1788 stats->nr_invalid_chains,
1789 stats->nr_events[PERF_RECORD_SAMPLE]);
1790 }
1791
1792 if (stats->nr_unprocessable_samples != 0) {
1793 ui__warning("%u unprocessable samples recorded.\n"
1794 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1795 stats->nr_unprocessable_samples);
1796 }
1797
1798 perf_session__warn_order(session);
1799
1800 events_stats__auxtrace_error_warn(stats);
1801
1802 if (stats->nr_proc_map_timeout != 0) {
1803 ui__warning("%d map information files for pre-existing threads were\n"
1804 "not processed, if there are samples for addresses they\n"
1805 "will not be resolved, you may find out which are these\n"
1806 "threads by running with -v and redirecting the output\n"
1807 "to a file.\n"
1808 "The time limit to process proc map is too short?\n"
1809 "Increase it by --proc-map-timeout\n",
1810 stats->nr_proc_map_timeout);
1811 }
1812}
1813
1814static int perf_session__flush_thread_stack(struct thread *thread,
1815 void *p __maybe_unused)
1816{
1817 return thread_stack__flush(thread);
1818}
1819
1820static int perf_session__flush_thread_stacks(struct perf_session *session)
1821{
1822 return machines__for_each_thread(&session->machines,
1823 perf_session__flush_thread_stack,
1824 NULL);
1825}
1826
1827volatile int session_done;
1828
1829static int __perf_session__process_decomp_events(struct perf_session *session);
1830
1831static int __perf_session__process_pipe_events(struct perf_session *session)
1832{
1833 struct ordered_events *oe = &session->ordered_events;
1834 struct perf_tool *tool = session->tool;
1835 int fd = perf_data__fd(session->data);
1836 union perf_event *event;
1837 uint32_t size, cur_size = 0;
1838 void *buf = NULL;
1839 s64 skip = 0;
1840 u64 head;
1841 ssize_t err;
1842 void *p;
1843
1844 perf_tool__fill_defaults(tool);
1845
1846 head = 0;
1847 cur_size = sizeof(union perf_event);
1848
1849 buf = malloc(cur_size);
1850 if (!buf)
1851 return -errno;
1852 ordered_events__set_copy_on_queue(oe, true);
1853more:
1854 event = buf;
1855 err = readn(fd, event, sizeof(struct perf_event_header));
1856 if (err <= 0) {
1857 if (err == 0)
1858 goto done;
1859
1860 pr_err("failed to read event header\n");
1861 goto out_err;
1862 }
1863
1864 if (session->header.needs_swap)
1865 perf_event_header__bswap(&event->header);
1866
1867 size = event->header.size;
1868 if (size < sizeof(struct perf_event_header)) {
1869 pr_err("bad event header size\n");
1870 goto out_err;
1871 }
1872
1873 if (size > cur_size) {
1874 void *new = realloc(buf, size);
1875 if (!new) {
1876 pr_err("failed to allocate memory to read event\n");
1877 goto out_err;
1878 }
1879 buf = new;
1880 cur_size = size;
1881 event = buf;
1882 }
1883 p = event;
1884 p += sizeof(struct perf_event_header);
1885
1886 if (size - sizeof(struct perf_event_header)) {
1887 err = readn(fd, p, size - sizeof(struct perf_event_header));
1888 if (err <= 0) {
1889 if (err == 0) {
1890 pr_err("unexpected end of event stream\n");
1891 goto done;
1892 }
1893
1894 pr_err("failed to read event data\n");
1895 goto out_err;
1896 }
1897 }
1898
1899 if ((skip = perf_session__process_event(session, event, head)) < 0) {
1900 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1901 head, event->header.size, event->header.type);
1902 err = -EINVAL;
1903 goto out_err;
1904 }
1905
1906 head += size;
1907
1908 if (skip > 0)
1909 head += skip;
1910
1911 err = __perf_session__process_decomp_events(session);
1912 if (err)
1913 goto out_err;
1914
1915 if (!session_done())
1916 goto more;
1917done:
1918
1919 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1920 if (err)
1921 goto out_err;
1922 err = auxtrace__flush_events(session, tool);
1923 if (err)
1924 goto out_err;
1925 err = perf_session__flush_thread_stacks(session);
1926out_err:
1927 free(buf);
1928 if (!tool->no_warn)
1929 perf_session__warn_about_errors(session);
1930 ordered_events__free(&session->ordered_events);
1931 auxtrace__free_events(session);
1932 return err;
1933}
1934
1935static union perf_event *
1936fetch_mmaped_event(struct perf_session *session,
1937 u64 head, size_t mmap_size, char *buf)
1938{
1939 union perf_event *event;
1940
1941
1942
1943
1944
1945 if (head + sizeof(event->header) > mmap_size)
1946 return NULL;
1947
1948 event = (union perf_event *)(buf + head);
1949
1950 if (session->header.needs_swap)
1951 perf_event_header__bswap(&event->header);
1952
1953 if (head + event->header.size > mmap_size) {
1954
1955 if (session->header.needs_swap)
1956 perf_event_header__bswap(&event->header);
1957 return NULL;
1958 }
1959
1960 return event;
1961}
1962
1963static int __perf_session__process_decomp_events(struct perf_session *session)
1964{
1965 s64 skip;
1966 u64 size, file_pos = 0;
1967 struct decomp *decomp = session->decomp_last;
1968
1969 if (!decomp)
1970 return 0;
1971
1972 while (decomp->head < decomp->size && !session_done()) {
1973 union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data);
1974
1975 if (!event)
1976 break;
1977
1978 size = event->header.size;
1979
1980 if (size < sizeof(struct perf_event_header) ||
1981 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1982 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1983 decomp->file_pos + decomp->head, event->header.size, event->header.type);
1984 return -EINVAL;
1985 }
1986
1987 if (skip)
1988 size += skip;
1989
1990 decomp->head += size;
1991 }
1992
1993 return 0;
1994}
1995
1996
1997
1998
1999
2000#if BITS_PER_LONG == 64
2001#define MMAP_SIZE ULLONG_MAX
2002#define NUM_MMAPS 1
2003#else
2004#define MMAP_SIZE (32 * 1024 * 1024ULL)
2005#define NUM_MMAPS 128
2006#endif
2007
2008struct reader;
2009
2010typedef s64 (*reader_cb_t)(struct perf_session *session,
2011 union perf_event *event,
2012 u64 file_offset);
2013
2014struct reader {
2015 int fd;
2016 u64 data_size;
2017 u64 data_offset;
2018 reader_cb_t process;
2019};
2020
2021static int
2022reader__process_events(struct reader *rd, struct perf_session *session,
2023 struct ui_progress *prog)
2024{
2025 u64 data_size = rd->data_size;
2026 u64 head, page_offset, file_offset, file_pos, size;
2027 int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2028 size_t mmap_size;
2029 char *buf, *mmaps[NUM_MMAPS];
2030 union perf_event *event;
2031 s64 skip;
2032
2033 page_offset = page_size * (rd->data_offset / page_size);
2034 file_offset = page_offset;
2035 head = rd->data_offset - page_offset;
2036
2037 ui_progress__init_size(prog, data_size, "Processing events...");
2038
2039 data_size += rd->data_offset;
2040
2041 mmap_size = MMAP_SIZE;
2042 if (mmap_size > data_size) {
2043 mmap_size = data_size;
2044 session->one_mmap = true;
2045 }
2046
2047 memset(mmaps, 0, sizeof(mmaps));
2048
2049 mmap_prot = PROT_READ;
2050 mmap_flags = MAP_SHARED;
2051
2052 if (session->header.needs_swap) {
2053 mmap_prot |= PROT_WRITE;
2054 mmap_flags = MAP_PRIVATE;
2055 }
2056remap:
2057 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2058 file_offset);
2059 if (buf == MAP_FAILED) {
2060 pr_err("failed to mmap file\n");
2061 err = -errno;
2062 goto out;
2063 }
2064 mmaps[map_idx] = buf;
2065 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2066 file_pos = file_offset + head;
2067 if (session->one_mmap) {
2068 session->one_mmap_addr = buf;
2069 session->one_mmap_offset = file_offset;
2070 }
2071
2072more:
2073 event = fetch_mmaped_event(session, head, mmap_size, buf);
2074 if (!event) {
2075 if (mmaps[map_idx]) {
2076 munmap(mmaps[map_idx], mmap_size);
2077 mmaps[map_idx] = NULL;
2078 }
2079
2080 page_offset = page_size * (head / page_size);
2081 file_offset += page_offset;
2082 head -= page_offset;
2083 goto remap;
2084 }
2085
2086 size = event->header.size;
2087
2088 skip = -EINVAL;
2089
2090 if (size < sizeof(struct perf_event_header) ||
2091 (skip = rd->process(session, event, file_pos)) < 0) {
2092 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2093 file_offset + head, event->header.size,
2094 event->header.type, strerror(-skip));
2095 err = skip;
2096 goto out;
2097 }
2098
2099 if (skip)
2100 size += skip;
2101
2102 head += size;
2103 file_pos += size;
2104
2105 err = __perf_session__process_decomp_events(session);
2106 if (err)
2107 goto out;
2108
2109 ui_progress__update(prog, size);
2110
2111 if (session_done())
2112 goto out;
2113
2114 if (file_pos < data_size)
2115 goto more;
2116
2117out:
2118 return err;
2119}
2120
2121static s64 process_simple(struct perf_session *session,
2122 union perf_event *event,
2123 u64 file_offset)
2124{
2125 return perf_session__process_event(session, event, file_offset);
2126}
2127
2128static int __perf_session__process_events(struct perf_session *session)
2129{
2130 struct reader rd = {
2131 .fd = perf_data__fd(session->data),
2132 .data_size = session->header.data_size,
2133 .data_offset = session->header.data_offset,
2134 .process = process_simple,
2135 };
2136 struct ordered_events *oe = &session->ordered_events;
2137 struct perf_tool *tool = session->tool;
2138 struct ui_progress prog;
2139 int err;
2140
2141 perf_tool__fill_defaults(tool);
2142
2143 if (rd.data_size == 0)
2144 return -1;
2145
2146 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2147
2148 err = reader__process_events(&rd, session, &prog);
2149 if (err)
2150 goto out_err;
2151
2152 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2153 if (err)
2154 goto out_err;
2155 err = auxtrace__flush_events(session, tool);
2156 if (err)
2157 goto out_err;
2158 err = perf_session__flush_thread_stacks(session);
2159out_err:
2160 ui_progress__finish();
2161 if (!tool->no_warn)
2162 perf_session__warn_about_errors(session);
2163
2164
2165
2166
2167 ordered_events__reinit(&session->ordered_events);
2168 auxtrace__free_events(session);
2169 session->one_mmap = false;
2170 return err;
2171}
2172
2173int perf_session__process_events(struct perf_session *session)
2174{
2175 if (perf_session__register_idle_thread(session) < 0)
2176 return -ENOMEM;
2177
2178 if (perf_data__is_pipe(session->data))
2179 return __perf_session__process_pipe_events(session);
2180
2181 return __perf_session__process_events(session);
2182}
2183
2184bool perf_session__has_traces(struct perf_session *session, const char *msg)
2185{
2186 struct perf_evsel *evsel;
2187
2188 evlist__for_each_entry(session->evlist, evsel) {
2189 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
2190 return true;
2191 }
2192
2193 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2194 return false;
2195}
2196
2197int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2198{
2199 char *bracket;
2200 struct ref_reloc_sym *ref;
2201 struct kmap *kmap;
2202
2203 ref = zalloc(sizeof(struct ref_reloc_sym));
2204 if (ref == NULL)
2205 return -ENOMEM;
2206
2207 ref->name = strdup(symbol_name);
2208 if (ref->name == NULL) {
2209 free(ref);
2210 return -ENOMEM;
2211 }
2212
2213 bracket = strchr(ref->name, ']');
2214 if (bracket)
2215 *bracket = '\0';
2216
2217 ref->addr = addr;
2218
2219 kmap = map__kmap(map);
2220 if (kmap)
2221 kmap->ref_reloc_sym = ref;
2222
2223 return 0;
2224}
2225
2226size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2227{
2228 return machines__fprintf_dsos(&session->machines, fp);
2229}
2230
2231size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2232 bool (skip)(struct dso *dso, int parm), int parm)
2233{
2234 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2235}
2236
2237size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2238{
2239 size_t ret;
2240 const char *msg = "";
2241
2242 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2243 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2244
2245 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2246
2247 ret += events_stats__fprintf(&session->evlist->stats, fp);
2248 return ret;
2249}
2250
2251size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2252{
2253
2254
2255
2256
2257 return machine__fprintf(&session->machines.host, fp);
2258}
2259
2260struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
2261 unsigned int type)
2262{
2263 struct perf_evsel *pos;
2264
2265 evlist__for_each_entry(session->evlist, pos) {
2266 if (pos->attr.type == type)
2267 return pos;
2268 }
2269 return NULL;
2270}
2271
2272int perf_session__cpu_bitmap(struct perf_session *session,
2273 const char *cpu_list, unsigned long *cpu_bitmap)
2274{
2275 int i, err = -1;
2276 struct cpu_map *map;
2277
2278 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2279 struct perf_evsel *evsel;
2280
2281 evsel = perf_session__find_first_evtype(session, i);
2282 if (!evsel)
2283 continue;
2284
2285 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
2286 pr_err("File does not contain CPU events. "
2287 "Remove -C option to proceed.\n");
2288 return -1;
2289 }
2290 }
2291
2292 map = cpu_map__new(cpu_list);
2293 if (map == NULL) {
2294 pr_err("Invalid cpu_list\n");
2295 return -1;
2296 }
2297
2298 for (i = 0; i < map->nr; i++) {
2299 int cpu = map->map[i];
2300
2301 if (cpu >= MAX_NR_CPUS) {
2302 pr_err("Requested CPU %d too large. "
2303 "Consider raising MAX_NR_CPUS\n", cpu);
2304 goto out_delete_map;
2305 }
2306
2307 set_bit(cpu, cpu_bitmap);
2308 }
2309
2310 err = 0;
2311
2312out_delete_map:
2313 cpu_map__put(map);
2314 return err;
2315}
2316
2317void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2318 bool full)
2319{
2320 if (session == NULL || fp == NULL)
2321 return;
2322
2323 fprintf(fp, "# ========\n");
2324 perf_header__fprintf_info(session, fp, full);
2325 fprintf(fp, "# ========\n#\n");
2326}
2327
2328
2329int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2330 const struct perf_evsel_str_handler *assocs,
2331 size_t nr_assocs)
2332{
2333 struct perf_evsel *evsel;
2334 size_t i;
2335 int err;
2336
2337 for (i = 0; i < nr_assocs; i++) {
2338
2339
2340
2341
2342 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2343 if (evsel == NULL)
2344 continue;
2345
2346 err = -EEXIST;
2347 if (evsel->handler != NULL)
2348 goto out;
2349 evsel->handler = assocs[i].handler;
2350 }
2351
2352 err = 0;
2353out:
2354 return err;
2355}
2356
2357int perf_event__process_id_index(struct perf_session *session,
2358 union perf_event *event)
2359{
2360 struct perf_evlist *evlist = session->evlist;
2361 struct id_index_event *ie = &event->id_index;
2362 size_t i, nr, max_nr;
2363
2364 max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2365 sizeof(struct id_index_entry);
2366 nr = ie->nr;
2367 if (nr > max_nr)
2368 return -EINVAL;
2369
2370 if (dump_trace)
2371 fprintf(stdout, " nr: %zu\n", nr);
2372
2373 for (i = 0; i < nr; i++) {
2374 struct id_index_entry *e = &ie->entries[i];
2375 struct perf_sample_id *sid;
2376
2377 if (dump_trace) {
2378 fprintf(stdout, " ... id: %"PRIu64, e->id);
2379 fprintf(stdout, " idx: %"PRIu64, e->idx);
2380 fprintf(stdout, " cpu: %"PRId64, e->cpu);
2381 fprintf(stdout, " tid: %"PRId64"\n", e->tid);
2382 }
2383
2384 sid = perf_evlist__id2sid(evlist, e->id);
2385 if (!sid)
2386 return -ENOENT;
2387 sid->idx = e->idx;
2388 sid->cpu = e->cpu;
2389 sid->tid = e->tid;
2390 }
2391 return 0;
2392}
2393
2394int perf_event__synthesize_id_index(struct perf_tool *tool,
2395 perf_event__handler_t process,
2396 struct perf_evlist *evlist,
2397 struct machine *machine)
2398{
2399 union perf_event *ev;
2400 struct perf_evsel *evsel;
2401 size_t nr = 0, i = 0, sz, max_nr, n;
2402 int err;
2403
2404 pr_debug2("Synthesizing id index\n");
2405
2406 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2407 sizeof(struct id_index_entry);
2408
2409 evlist__for_each_entry(evlist, evsel)
2410 nr += evsel->ids;
2411
2412 n = nr > max_nr ? max_nr : nr;
2413 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2414 ev = zalloc(sz);
2415 if (!ev)
2416 return -ENOMEM;
2417
2418 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2419 ev->id_index.header.size = sz;
2420 ev->id_index.nr = n;
2421
2422 evlist__for_each_entry(evlist, evsel) {
2423 u32 j;
2424
2425 for (j = 0; j < evsel->ids; j++) {
2426 struct id_index_entry *e;
2427 struct perf_sample_id *sid;
2428
2429 if (i >= n) {
2430 err = process(tool, ev, NULL, machine);
2431 if (err)
2432 goto out_err;
2433 nr -= n;
2434 i = 0;
2435 }
2436
2437 e = &ev->id_index.entries[i++];
2438
2439 e->id = evsel->id[j];
2440
2441 sid = perf_evlist__id2sid(evlist, e->id);
2442 if (!sid) {
2443 free(ev);
2444 return -ENOENT;
2445 }
2446
2447 e->idx = sid->idx;
2448 e->cpu = sid->cpu;
2449 e->tid = sid->tid;
2450 }
2451 }
2452
2453 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2454 ev->id_index.header.size = sz;
2455 ev->id_index.nr = nr;
2456
2457 err = process(tool, ev, NULL, machine);
2458out_err:
2459 free(ev);
2460
2461 return err;
2462}
2463