1
2#include <errno.h>
3#include <inttypes.h>
4#include <linux/err.h>
5#include <linux/kernel.h>
6#include <linux/zalloc.h>
7#include <api/fs/fs.h>
8
9#include <byteswap.h>
10#include <unistd.h>
11#include <sys/types.h>
12#include <sys/mman.h>
13#include <perf/cpumap.h>
14
15#include "map_symbol.h"
16#include "branch.h"
17#include "debug.h"
18#include "evlist.h"
19#include "evsel.h"
20#include "memswap.h"
21#include "map.h"
22#include "symbol.h"
23#include "session.h"
24#include "tool.h"
25#include "perf_regs.h"
26#include "asm/bug.h"
27#include "auxtrace.h"
28#include "thread.h"
29#include "thread-stack.h"
30#include "sample-raw.h"
31#include "stat.h"
32#include "ui/progress.h"
33#include "../perf.h"
34#include "arch/common.h"
35#include <internal/lib.h>
36#include <linux/err.h>
37
38#ifdef HAVE_ZSTD_SUPPORT
39static int perf_session__process_compressed_event(struct perf_session *session,
40 union perf_event *event, u64 file_offset)
41{
42 void *src;
43 size_t decomp_size, src_size;
44 u64 decomp_last_rem = 0;
45 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
46 struct decomp *decomp, *decomp_last = session->decomp_last;
47
48 if (decomp_last) {
49 decomp_last_rem = decomp_last->size - decomp_last->head;
50 decomp_len += decomp_last_rem;
51 }
52
53 mmap_len = sizeof(struct decomp) + decomp_len;
54 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
55 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
56 if (decomp == MAP_FAILED) {
57 pr_err("Couldn't allocate memory for decompression\n");
58 return -1;
59 }
60
61 decomp->file_pos = file_offset;
62 decomp->mmap_len = mmap_len;
63 decomp->head = 0;
64
65 if (decomp_last_rem) {
66 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
67 decomp->size = decomp_last_rem;
68 }
69
70 src = (void *)event + sizeof(struct perf_record_compressed);
71 src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
72
73 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
74 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
75 if (!decomp_size) {
76 munmap(decomp, mmap_len);
77 pr_err("Couldn't decompress data\n");
78 return -1;
79 }
80
81 decomp->size += decomp_size;
82
83 if (session->decomp == NULL) {
84 session->decomp = decomp;
85 session->decomp_last = decomp;
86 } else {
87 session->decomp_last->next = decomp;
88 session->decomp_last = decomp;
89 }
90
91 pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
92
93 return 0;
94}
95#else
96#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
97#endif
98
99static int perf_session__deliver_event(struct perf_session *session,
100 union perf_event *event,
101 struct perf_tool *tool,
102 u64 file_offset);
103
104static int perf_session__open(struct perf_session *session)
105{
106 struct perf_data *data = session->data;
107
108 if (perf_session__read_header(session) < 0) {
109 pr_err("incompatible file format (rerun with -v to learn more)\n");
110 return -1;
111 }
112
113 if (perf_data__is_pipe(data))
114 return 0;
115
116 if (perf_header__has_feat(&session->header, HEADER_STAT))
117 return 0;
118
119 if (!perf_evlist__valid_sample_type(session->evlist)) {
120 pr_err("non matching sample_type\n");
121 return -1;
122 }
123
124 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
125 pr_err("non matching sample_id_all\n");
126 return -1;
127 }
128
129 if (!perf_evlist__valid_read_format(session->evlist)) {
130 pr_err("non matching read_format\n");
131 return -1;
132 }
133
134 return 0;
135}
136
137void perf_session__set_id_hdr_size(struct perf_session *session)
138{
139 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
140
141 machines__set_id_hdr_size(&session->machines, id_hdr_size);
142}
143
144int perf_session__create_kernel_maps(struct perf_session *session)
145{
146 int ret = machine__create_kernel_maps(&session->machines.host);
147
148 if (ret >= 0)
149 ret = machines__create_guest_kernel_maps(&session->machines);
150 return ret;
151}
152
153static void perf_session__destroy_kernel_maps(struct perf_session *session)
154{
155 machines__destroy_kernel_maps(&session->machines);
156}
157
158static bool perf_session__has_comm_exec(struct perf_session *session)
159{
160 struct evsel *evsel;
161
162 evlist__for_each_entry(session->evlist, evsel) {
163 if (evsel->core.attr.comm_exec)
164 return true;
165 }
166
167 return false;
168}
169
170static void perf_session__set_comm_exec(struct perf_session *session)
171{
172 bool comm_exec = perf_session__has_comm_exec(session);
173
174 machines__set_comm_exec(&session->machines, comm_exec);
175}
176
177static int ordered_events__deliver_event(struct ordered_events *oe,
178 struct ordered_event *event)
179{
180 struct perf_session *session = container_of(oe, struct perf_session,
181 ordered_events);
182
183 return perf_session__deliver_event(session, event->event,
184 session->tool, event->file_offset);
185}
186
187struct perf_session *perf_session__new(struct perf_data *data,
188 bool repipe, struct perf_tool *tool)
189{
190 int ret = -ENOMEM;
191 struct perf_session *session = zalloc(sizeof(*session));
192
193 if (!session)
194 goto out;
195
196 session->repipe = repipe;
197 session->tool = tool;
198 INIT_LIST_HEAD(&session->auxtrace_index);
199 machines__init(&session->machines);
200 ordered_events__init(&session->ordered_events,
201 ordered_events__deliver_event, NULL);
202
203 perf_env__init(&session->header.env);
204 if (data) {
205 ret = perf_data__open(data);
206 if (ret < 0)
207 goto out_delete;
208
209 session->data = data;
210
211 if (perf_data__is_read(data)) {
212 ret = perf_session__open(session);
213 if (ret < 0)
214 goto out_delete;
215
216
217
218
219
220 if (!data->is_pipe) {
221 perf_session__set_id_hdr_size(session);
222 perf_session__set_comm_exec(session);
223 }
224
225 perf_evlist__init_trace_event_sample_raw(session->evlist);
226
227
228 if (data->is_dir) {
229 ret = perf_data__open_dir(data);
230 if (ret)
231 goto out_delete;
232 }
233
234 if (!symbol_conf.kallsyms_name &&
235 !symbol_conf.vmlinux_name)
236 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
237 }
238 } else {
239 session->machines.host.env = &perf_env;
240 }
241
242 session->machines.host.single_address_space =
243 perf_env__single_address_space(session->machines.host.env);
244
245 if (!data || perf_data__is_write(data)) {
246
247
248
249
250 if (perf_session__create_kernel_maps(session) < 0)
251 pr_warning("Cannot read kernel map\n");
252 }
253
254
255
256
257
258 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
259 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
260 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
261 tool->ordered_events = false;
262 }
263
264 return session;
265
266 out_delete:
267 perf_session__delete(session);
268 out:
269 return ERR_PTR(ret);
270}
271
272static void perf_session__delete_threads(struct perf_session *session)
273{
274 machine__delete_threads(&session->machines.host);
275}
276
277static void perf_session__release_decomp_events(struct perf_session *session)
278{
279 struct decomp *next, *decomp;
280 size_t mmap_len;
281 next = session->decomp;
282 do {
283 decomp = next;
284 if (decomp == NULL)
285 break;
286 next = decomp->next;
287 mmap_len = decomp->mmap_len;
288 munmap(decomp, mmap_len);
289 } while (1);
290}
291
292void perf_session__delete(struct perf_session *session)
293{
294 if (session == NULL)
295 return;
296 auxtrace__free(session);
297 auxtrace_index__free(&session->auxtrace_index);
298 perf_session__destroy_kernel_maps(session);
299 perf_session__delete_threads(session);
300 perf_session__release_decomp_events(session);
301 perf_env__exit(&session->header.env);
302 machines__exit(&session->machines);
303 if (session->data)
304 perf_data__close(session->data);
305 free(session);
306}
307
308static int process_event_synth_tracing_data_stub(struct perf_session *session
309 __maybe_unused,
310 union perf_event *event
311 __maybe_unused)
312{
313 dump_printf(": unhandled!\n");
314 return 0;
315}
316
317static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
318 union perf_event *event __maybe_unused,
319 struct evlist **pevlist
320 __maybe_unused)
321{
322 dump_printf(": unhandled!\n");
323 return 0;
324}
325
326static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
327 union perf_event *event __maybe_unused,
328 struct evlist **pevlist
329 __maybe_unused)
330{
331 if (dump_trace)
332 perf_event__fprintf_event_update(event, stdout);
333
334 dump_printf(": unhandled!\n");
335 return 0;
336}
337
338static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
339 union perf_event *event __maybe_unused,
340 struct perf_sample *sample __maybe_unused,
341 struct evsel *evsel __maybe_unused,
342 struct machine *machine __maybe_unused)
343{
344 dump_printf(": unhandled!\n");
345 return 0;
346}
347
348static int process_event_stub(struct perf_tool *tool __maybe_unused,
349 union perf_event *event __maybe_unused,
350 struct perf_sample *sample __maybe_unused,
351 struct machine *machine __maybe_unused)
352{
353 dump_printf(": unhandled!\n");
354 return 0;
355}
356
357static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
358 union perf_event *event __maybe_unused,
359 struct ordered_events *oe __maybe_unused)
360{
361 dump_printf(": unhandled!\n");
362 return 0;
363}
364
365static int process_finished_round(struct perf_tool *tool,
366 union perf_event *event,
367 struct ordered_events *oe);
368
369static int skipn(int fd, off_t n)
370{
371 char buf[4096];
372 ssize_t ret;
373
374 while (n > 0) {
375 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
376 if (ret <= 0)
377 return ret;
378 n -= ret;
379 }
380
381 return 0;
382}
383
384static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
385 union perf_event *event)
386{
387 dump_printf(": unhandled!\n");
388 if (perf_data__is_pipe(session->data))
389 skipn(perf_data__fd(session->data), event->auxtrace.size);
390 return event->auxtrace.size;
391}
392
393static int process_event_op2_stub(struct perf_session *session __maybe_unused,
394 union perf_event *event __maybe_unused)
395{
396 dump_printf(": unhandled!\n");
397 return 0;
398}
399
400
401static
402int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
403 union perf_event *event __maybe_unused)
404{
405 if (dump_trace)
406 perf_event__fprintf_thread_map(event, stdout);
407
408 dump_printf(": unhandled!\n");
409 return 0;
410}
411
412static
413int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
414 union perf_event *event __maybe_unused)
415{
416 if (dump_trace)
417 perf_event__fprintf_cpu_map(event, stdout);
418
419 dump_printf(": unhandled!\n");
420 return 0;
421}
422
423static
424int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
425 union perf_event *event __maybe_unused)
426{
427 if (dump_trace)
428 perf_event__fprintf_stat_config(event, stdout);
429
430 dump_printf(": unhandled!\n");
431 return 0;
432}
433
434static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
435 union perf_event *event)
436{
437 if (dump_trace)
438 perf_event__fprintf_stat(event, stdout);
439
440 dump_printf(": unhandled!\n");
441 return 0;
442}
443
444static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
445 union perf_event *event)
446{
447 if (dump_trace)
448 perf_event__fprintf_stat_round(event, stdout);
449
450 dump_printf(": unhandled!\n");
451 return 0;
452}
453
454static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
455 union perf_event *event __maybe_unused,
456 u64 file_offset __maybe_unused)
457{
458 dump_printf(": unhandled!\n");
459 return 0;
460}
461
462void perf_tool__fill_defaults(struct perf_tool *tool)
463{
464 if (tool->sample == NULL)
465 tool->sample = process_event_sample_stub;
466 if (tool->mmap == NULL)
467 tool->mmap = process_event_stub;
468 if (tool->mmap2 == NULL)
469 tool->mmap2 = process_event_stub;
470 if (tool->comm == NULL)
471 tool->comm = process_event_stub;
472 if (tool->namespaces == NULL)
473 tool->namespaces = process_event_stub;
474 if (tool->cgroup == NULL)
475 tool->cgroup = process_event_stub;
476 if (tool->fork == NULL)
477 tool->fork = process_event_stub;
478 if (tool->exit == NULL)
479 tool->exit = process_event_stub;
480 if (tool->lost == NULL)
481 tool->lost = perf_event__process_lost;
482 if (tool->lost_samples == NULL)
483 tool->lost_samples = perf_event__process_lost_samples;
484 if (tool->aux == NULL)
485 tool->aux = perf_event__process_aux;
486 if (tool->itrace_start == NULL)
487 tool->itrace_start = perf_event__process_itrace_start;
488 if (tool->context_switch == NULL)
489 tool->context_switch = perf_event__process_switch;
490 if (tool->ksymbol == NULL)
491 tool->ksymbol = perf_event__process_ksymbol;
492 if (tool->bpf == NULL)
493 tool->bpf = perf_event__process_bpf;
494 if (tool->read == NULL)
495 tool->read = process_event_sample_stub;
496 if (tool->throttle == NULL)
497 tool->throttle = process_event_stub;
498 if (tool->unthrottle == NULL)
499 tool->unthrottle = process_event_stub;
500 if (tool->attr == NULL)
501 tool->attr = process_event_synth_attr_stub;
502 if (tool->event_update == NULL)
503 tool->event_update = process_event_synth_event_update_stub;
504 if (tool->tracing_data == NULL)
505 tool->tracing_data = process_event_synth_tracing_data_stub;
506 if (tool->build_id == NULL)
507 tool->build_id = process_event_op2_stub;
508 if (tool->finished_round == NULL) {
509 if (tool->ordered_events)
510 tool->finished_round = process_finished_round;
511 else
512 tool->finished_round = process_finished_round_stub;
513 }
514 if (tool->id_index == NULL)
515 tool->id_index = process_event_op2_stub;
516 if (tool->auxtrace_info == NULL)
517 tool->auxtrace_info = process_event_op2_stub;
518 if (tool->auxtrace == NULL)
519 tool->auxtrace = process_event_auxtrace_stub;
520 if (tool->auxtrace_error == NULL)
521 tool->auxtrace_error = process_event_op2_stub;
522 if (tool->thread_map == NULL)
523 tool->thread_map = process_event_thread_map_stub;
524 if (tool->cpu_map == NULL)
525 tool->cpu_map = process_event_cpu_map_stub;
526 if (tool->stat_config == NULL)
527 tool->stat_config = process_event_stat_config_stub;
528 if (tool->stat == NULL)
529 tool->stat = process_stat_stub;
530 if (tool->stat_round == NULL)
531 tool->stat_round = process_stat_round_stub;
532 if (tool->time_conv == NULL)
533 tool->time_conv = process_event_op2_stub;
534 if (tool->feature == NULL)
535 tool->feature = process_event_op2_stub;
536 if (tool->compressed == NULL)
537 tool->compressed = perf_session__process_compressed_event;
538}
539
540static void swap_sample_id_all(union perf_event *event, void *data)
541{
542 void *end = (void *) event + event->header.size;
543 int size = end - data;
544
545 BUG_ON(size % sizeof(u64));
546 mem_bswap_64(data, size);
547}
548
549static void perf_event__all64_swap(union perf_event *event,
550 bool sample_id_all __maybe_unused)
551{
552 struct perf_event_header *hdr = &event->header;
553 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
554}
555
556static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
557{
558 event->comm.pid = bswap_32(event->comm.pid);
559 event->comm.tid = bswap_32(event->comm.tid);
560
561 if (sample_id_all) {
562 void *data = &event->comm.comm;
563
564 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
565 swap_sample_id_all(event, data);
566 }
567}
568
569static void perf_event__mmap_swap(union perf_event *event,
570 bool sample_id_all)
571{
572 event->mmap.pid = bswap_32(event->mmap.pid);
573 event->mmap.tid = bswap_32(event->mmap.tid);
574 event->mmap.start = bswap_64(event->mmap.start);
575 event->mmap.len = bswap_64(event->mmap.len);
576 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
577
578 if (sample_id_all) {
579 void *data = &event->mmap.filename;
580
581 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
582 swap_sample_id_all(event, data);
583 }
584}
585
586static void perf_event__mmap2_swap(union perf_event *event,
587 bool sample_id_all)
588{
589 event->mmap2.pid = bswap_32(event->mmap2.pid);
590 event->mmap2.tid = bswap_32(event->mmap2.tid);
591 event->mmap2.start = bswap_64(event->mmap2.start);
592 event->mmap2.len = bswap_64(event->mmap2.len);
593 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
594 event->mmap2.maj = bswap_32(event->mmap2.maj);
595 event->mmap2.min = bswap_32(event->mmap2.min);
596 event->mmap2.ino = bswap_64(event->mmap2.ino);
597
598 if (sample_id_all) {
599 void *data = &event->mmap2.filename;
600
601 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
602 swap_sample_id_all(event, data);
603 }
604}
605static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
606{
607 event->fork.pid = bswap_32(event->fork.pid);
608 event->fork.tid = bswap_32(event->fork.tid);
609 event->fork.ppid = bswap_32(event->fork.ppid);
610 event->fork.ptid = bswap_32(event->fork.ptid);
611 event->fork.time = bswap_64(event->fork.time);
612
613 if (sample_id_all)
614 swap_sample_id_all(event, &event->fork + 1);
615}
616
617static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
618{
619 event->read.pid = bswap_32(event->read.pid);
620 event->read.tid = bswap_32(event->read.tid);
621 event->read.value = bswap_64(event->read.value);
622 event->read.time_enabled = bswap_64(event->read.time_enabled);
623 event->read.time_running = bswap_64(event->read.time_running);
624 event->read.id = bswap_64(event->read.id);
625
626 if (sample_id_all)
627 swap_sample_id_all(event, &event->read + 1);
628}
629
630static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
631{
632 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
633 event->aux.aux_size = bswap_64(event->aux.aux_size);
634 event->aux.flags = bswap_64(event->aux.flags);
635
636 if (sample_id_all)
637 swap_sample_id_all(event, &event->aux + 1);
638}
639
640static void perf_event__itrace_start_swap(union perf_event *event,
641 bool sample_id_all)
642{
643 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
644 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
645
646 if (sample_id_all)
647 swap_sample_id_all(event, &event->itrace_start + 1);
648}
649
650static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
651{
652 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
653 event->context_switch.next_prev_pid =
654 bswap_32(event->context_switch.next_prev_pid);
655 event->context_switch.next_prev_tid =
656 bswap_32(event->context_switch.next_prev_tid);
657 }
658
659 if (sample_id_all)
660 swap_sample_id_all(event, &event->context_switch + 1);
661}
662
663static void perf_event__throttle_swap(union perf_event *event,
664 bool sample_id_all)
665{
666 event->throttle.time = bswap_64(event->throttle.time);
667 event->throttle.id = bswap_64(event->throttle.id);
668 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
669
670 if (sample_id_all)
671 swap_sample_id_all(event, &event->throttle + 1);
672}
673
674static void perf_event__namespaces_swap(union perf_event *event,
675 bool sample_id_all)
676{
677 u64 i;
678
679 event->namespaces.pid = bswap_32(event->namespaces.pid);
680 event->namespaces.tid = bswap_32(event->namespaces.tid);
681 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
682
683 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
684 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
685
686 ns->dev = bswap_64(ns->dev);
687 ns->ino = bswap_64(ns->ino);
688 }
689
690 if (sample_id_all)
691 swap_sample_id_all(event, &event->namespaces.link_info[i]);
692}
693
694static u8 revbyte(u8 b)
695{
696 int rev = (b >> 4) | ((b & 0xf) << 4);
697 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
698 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
699 return (u8) rev;
700}
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716static void swap_bitfield(u8 *p, unsigned len)
717{
718 unsigned i;
719
720 for (i = 0; i < len; i++) {
721 *p = revbyte(*p);
722 p++;
723 }
724}
725
726
727void perf_event__attr_swap(struct perf_event_attr *attr)
728{
729 attr->type = bswap_32(attr->type);
730 attr->size = bswap_32(attr->size);
731
732#define bswap_safe(f, n) \
733 (attr->size > (offsetof(struct perf_event_attr, f) + \
734 sizeof(attr->f) * (n)))
735#define bswap_field(f, sz) \
736do { \
737 if (bswap_safe(f, 0)) \
738 attr->f = bswap_##sz(attr->f); \
739} while(0)
740#define bswap_field_16(f) bswap_field(f, 16)
741#define bswap_field_32(f) bswap_field(f, 32)
742#define bswap_field_64(f) bswap_field(f, 64)
743
744 bswap_field_64(config);
745 bswap_field_64(sample_period);
746 bswap_field_64(sample_type);
747 bswap_field_64(read_format);
748 bswap_field_32(wakeup_events);
749 bswap_field_32(bp_type);
750 bswap_field_64(bp_addr);
751 bswap_field_64(bp_len);
752 bswap_field_64(branch_sample_type);
753 bswap_field_64(sample_regs_user);
754 bswap_field_32(sample_stack_user);
755 bswap_field_32(aux_watermark);
756 bswap_field_16(sample_max_stack);
757 bswap_field_32(aux_sample_size);
758
759
760
761
762
763 if (bswap_safe(read_format, 1))
764 swap_bitfield((u8 *) (&attr->read_format + 1),
765 sizeof(u64));
766#undef bswap_field_64
767#undef bswap_field_32
768#undef bswap_field
769#undef bswap_safe
770}
771
772static void perf_event__hdr_attr_swap(union perf_event *event,
773 bool sample_id_all __maybe_unused)
774{
775 size_t size;
776
777 perf_event__attr_swap(&event->attr.attr);
778
779 size = event->header.size;
780 size -= (void *)&event->attr.id - (void *)event;
781 mem_bswap_64(event->attr.id, size);
782}
783
784static void perf_event__event_update_swap(union perf_event *event,
785 bool sample_id_all __maybe_unused)
786{
787 event->event_update.type = bswap_64(event->event_update.type);
788 event->event_update.id = bswap_64(event->event_update.id);
789}
790
791static void perf_event__event_type_swap(union perf_event *event,
792 bool sample_id_all __maybe_unused)
793{
794 event->event_type.event_type.event_id =
795 bswap_64(event->event_type.event_type.event_id);
796}
797
798static void perf_event__tracing_data_swap(union perf_event *event,
799 bool sample_id_all __maybe_unused)
800{
801 event->tracing_data.size = bswap_32(event->tracing_data.size);
802}
803
804static void perf_event__auxtrace_info_swap(union perf_event *event,
805 bool sample_id_all __maybe_unused)
806{
807 size_t size;
808
809 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
810
811 size = event->header.size;
812 size -= (void *)&event->auxtrace_info.priv - (void *)event;
813 mem_bswap_64(event->auxtrace_info.priv, size);
814}
815
816static void perf_event__auxtrace_swap(union perf_event *event,
817 bool sample_id_all __maybe_unused)
818{
819 event->auxtrace.size = bswap_64(event->auxtrace.size);
820 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
821 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
822 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
823 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
824 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
825}
826
827static void perf_event__auxtrace_error_swap(union perf_event *event,
828 bool sample_id_all __maybe_unused)
829{
830 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
831 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
832 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
833 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
834 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
835 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
836 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
837 if (event->auxtrace_error.fmt)
838 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
839}
840
841static void perf_event__thread_map_swap(union perf_event *event,
842 bool sample_id_all __maybe_unused)
843{
844 unsigned i;
845
846 event->thread_map.nr = bswap_64(event->thread_map.nr);
847
848 for (i = 0; i < event->thread_map.nr; i++)
849 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
850}
851
852static void perf_event__cpu_map_swap(union perf_event *event,
853 bool sample_id_all __maybe_unused)
854{
855 struct perf_record_cpu_map_data *data = &event->cpu_map.data;
856 struct cpu_map_entries *cpus;
857 struct perf_record_record_cpu_map *mask;
858 unsigned i;
859
860 data->type = bswap_64(data->type);
861
862 switch (data->type) {
863 case PERF_CPU_MAP__CPUS:
864 cpus = (struct cpu_map_entries *)data->data;
865
866 cpus->nr = bswap_16(cpus->nr);
867
868 for (i = 0; i < cpus->nr; i++)
869 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
870 break;
871 case PERF_CPU_MAP__MASK:
872 mask = (struct perf_record_record_cpu_map *)data->data;
873
874 mask->nr = bswap_16(mask->nr);
875 mask->long_size = bswap_16(mask->long_size);
876
877 switch (mask->long_size) {
878 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
879 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
880 default:
881 pr_err("cpu_map swap: unsupported long size\n");
882 }
883 default:
884 break;
885 }
886}
887
888static void perf_event__stat_config_swap(union perf_event *event,
889 bool sample_id_all __maybe_unused)
890{
891 u64 size;
892
893 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
894 size += 1;
895 mem_bswap_64(&event->stat_config.nr, size);
896}
897
898static void perf_event__stat_swap(union perf_event *event,
899 bool sample_id_all __maybe_unused)
900{
901 event->stat.id = bswap_64(event->stat.id);
902 event->stat.thread = bswap_32(event->stat.thread);
903 event->stat.cpu = bswap_32(event->stat.cpu);
904 event->stat.val = bswap_64(event->stat.val);
905 event->stat.ena = bswap_64(event->stat.ena);
906 event->stat.run = bswap_64(event->stat.run);
907}
908
909static void perf_event__stat_round_swap(union perf_event *event,
910 bool sample_id_all __maybe_unused)
911{
912 event->stat_round.type = bswap_64(event->stat_round.type);
913 event->stat_round.time = bswap_64(event->stat_round.time);
914}
915
916typedef void (*perf_event__swap_op)(union perf_event *event,
917 bool sample_id_all);
918
919static perf_event__swap_op perf_event__swap_ops[] = {
920 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
921 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
922 [PERF_RECORD_COMM] = perf_event__comm_swap,
923 [PERF_RECORD_FORK] = perf_event__task_swap,
924 [PERF_RECORD_EXIT] = perf_event__task_swap,
925 [PERF_RECORD_LOST] = perf_event__all64_swap,
926 [PERF_RECORD_READ] = perf_event__read_swap,
927 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
928 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
929 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
930 [PERF_RECORD_AUX] = perf_event__aux_swap,
931 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
932 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
933 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
934 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
935 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
936 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
937 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
938 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
939 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
940 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
941 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
942 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
943 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
944 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
945 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
946 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
947 [PERF_RECORD_STAT] = perf_event__stat_swap,
948 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
949 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
950 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
951 [PERF_RECORD_HEADER_MAX] = NULL,
952};
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993static int process_finished_round(struct perf_tool *tool __maybe_unused,
994 union perf_event *event __maybe_unused,
995 struct ordered_events *oe)
996{
997 if (dump_trace)
998 fprintf(stdout, "\n");
999 return ordered_events__flush(oe, OE_FLUSH__ROUND);
1000}
1001
1002int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1003 u64 timestamp, u64 file_offset)
1004{
1005 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
1006}
1007
1008static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1009{
1010 struct ip_callchain *callchain = sample->callchain;
1011 struct branch_stack *lbr_stack = sample->branch_stack;
1012 struct branch_entry *entries = perf_sample__branch_entries(sample);
1013 u64 kernel_callchain_nr = callchain->nr;
1014 unsigned int i;
1015
1016 for (i = 0; i < kernel_callchain_nr; i++) {
1017 if (callchain->ips[i] == PERF_CONTEXT_USER)
1018 break;
1019 }
1020
1021 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1022 u64 total_nr;
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039 total_nr = i + 1 + lbr_stack->nr + 1;
1040 kernel_callchain_nr = i + 1;
1041
1042 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1043
1044 for (i = 0; i < kernel_callchain_nr; i++)
1045 printf("..... %2d: %016" PRIx64 "\n",
1046 i, callchain->ips[i]);
1047
1048 printf("..... %2d: %016" PRIx64 "\n",
1049 (int)(kernel_callchain_nr), entries[0].to);
1050 for (i = 0; i < lbr_stack->nr; i++)
1051 printf("..... %2d: %016" PRIx64 "\n",
1052 (int)(i + kernel_callchain_nr + 1), entries[i].from);
1053 }
1054}
1055
1056static void callchain__printf(struct evsel *evsel,
1057 struct perf_sample *sample)
1058{
1059 unsigned int i;
1060 struct ip_callchain *callchain = sample->callchain;
1061
1062 if (perf_evsel__has_branch_callstack(evsel))
1063 callchain__lbr_callstack_printf(sample);
1064
1065 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1066
1067 for (i = 0; i < callchain->nr; i++)
1068 printf("..... %2d: %016" PRIx64 "\n",
1069 i, callchain->ips[i]);
1070}
1071
1072static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1073{
1074 struct branch_entry *entries = perf_sample__branch_entries(sample);
1075 uint64_t i;
1076
1077 printf("%s: nr:%" PRIu64 "\n",
1078 !callstack ? "... branch stack" : "... branch callstack",
1079 sample->branch_stack->nr);
1080
1081 for (i = 0; i < sample->branch_stack->nr; i++) {
1082 struct branch_entry *e = &entries[i];
1083
1084 if (!callstack) {
1085 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1086 i, e->from, e->to,
1087 (unsigned short)e->flags.cycles,
1088 e->flags.mispred ? "M" : " ",
1089 e->flags.predicted ? "P" : " ",
1090 e->flags.abort ? "A" : " ",
1091 e->flags.in_tx ? "T" : " ",
1092 (unsigned)e->flags.reserved);
1093 } else {
1094 printf("..... %2"PRIu64": %016" PRIx64 "\n",
1095 i, i > 0 ? e->from : e->to);
1096 }
1097 }
1098}
1099
1100static void regs_dump__printf(u64 mask, u64 *regs)
1101{
1102 unsigned rid, i = 0;
1103
1104 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1105 u64 val = regs[i++];
1106
1107 printf(".... %-5s 0x%" PRIx64 "\n",
1108 perf_reg_name(rid), val);
1109 }
1110}
1111
1112static const char *regs_abi[] = {
1113 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1114 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1115 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1116};
1117
1118static inline const char *regs_dump_abi(struct regs_dump *d)
1119{
1120 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1121 return "unknown";
1122
1123 return regs_abi[d->abi];
1124}
1125
1126static void regs__printf(const char *type, struct regs_dump *regs)
1127{
1128 u64 mask = regs->mask;
1129
1130 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1131 type,
1132 mask,
1133 regs_dump_abi(regs));
1134
1135 regs_dump__printf(mask, regs->regs);
1136}
1137
1138static void regs_user__printf(struct perf_sample *sample)
1139{
1140 struct regs_dump *user_regs = &sample->user_regs;
1141
1142 if (user_regs->regs)
1143 regs__printf("user", user_regs);
1144}
1145
1146static void regs_intr__printf(struct perf_sample *sample)
1147{
1148 struct regs_dump *intr_regs = &sample->intr_regs;
1149
1150 if (intr_regs->regs)
1151 regs__printf("intr", intr_regs);
1152}
1153
1154static void stack_user__printf(struct stack_dump *dump)
1155{
1156 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1157 dump->size, dump->offset);
1158}
1159
1160static void perf_evlist__print_tstamp(struct evlist *evlist,
1161 union perf_event *event,
1162 struct perf_sample *sample)
1163{
1164 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1165
1166 if (event->header.type != PERF_RECORD_SAMPLE &&
1167 !perf_evlist__sample_id_all(evlist)) {
1168 fputs("-1 -1 ", stdout);
1169 return;
1170 }
1171
1172 if ((sample_type & PERF_SAMPLE_CPU))
1173 printf("%u ", sample->cpu);
1174
1175 if (sample_type & PERF_SAMPLE_TIME)
1176 printf("%" PRIu64 " ", sample->time);
1177}
1178
1179static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1180{
1181 printf("... sample_read:\n");
1182
1183 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1184 printf("...... time enabled %016" PRIx64 "\n",
1185 sample->read.time_enabled);
1186
1187 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1188 printf("...... time running %016" PRIx64 "\n",
1189 sample->read.time_running);
1190
1191 if (read_format & PERF_FORMAT_GROUP) {
1192 u64 i;
1193
1194 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1195
1196 for (i = 0; i < sample->read.group.nr; i++) {
1197 struct sample_read_value *value;
1198
1199 value = &sample->read.group.values[i];
1200 printf("..... id %016" PRIx64
1201 ", value %016" PRIx64 "\n",
1202 value->id, value->value);
1203 }
1204 } else
1205 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1206 sample->read.one.id, sample->read.one.value);
1207}
1208
1209static void dump_event(struct evlist *evlist, union perf_event *event,
1210 u64 file_offset, struct perf_sample *sample)
1211{
1212 if (!dump_trace)
1213 return;
1214
1215 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1216 file_offset, event->header.size, event->header.type);
1217
1218 trace_event(event);
1219 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1220 evlist->trace_event_sample_raw(evlist, event, sample);
1221
1222 if (sample)
1223 perf_evlist__print_tstamp(evlist, event, sample);
1224
1225 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1226 event->header.size, perf_event__name(event->header.type));
1227}
1228
1229static void dump_sample(struct evsel *evsel, union perf_event *event,
1230 struct perf_sample *sample)
1231{
1232 u64 sample_type;
1233
1234 if (!dump_trace)
1235 return;
1236
1237 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1238 event->header.misc, sample->pid, sample->tid, sample->ip,
1239 sample->period, sample->addr);
1240
1241 sample_type = evsel->core.attr.sample_type;
1242
1243 if (evsel__has_callchain(evsel))
1244 callchain__printf(evsel, sample);
1245
1246 if (sample_type & PERF_SAMPLE_BRANCH_STACK)
1247 branch_stack__printf(sample, perf_evsel__has_branch_callstack(evsel));
1248
1249 if (sample_type & PERF_SAMPLE_REGS_USER)
1250 regs_user__printf(sample);
1251
1252 if (sample_type & PERF_SAMPLE_REGS_INTR)
1253 regs_intr__printf(sample);
1254
1255 if (sample_type & PERF_SAMPLE_STACK_USER)
1256 stack_user__printf(&sample->user_stack);
1257
1258 if (sample_type & PERF_SAMPLE_WEIGHT)
1259 printf("... weight: %" PRIu64 "\n", sample->weight);
1260
1261 if (sample_type & PERF_SAMPLE_DATA_SRC)
1262 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1263
1264 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1265 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1266
1267 if (sample_type & PERF_SAMPLE_TRANSACTION)
1268 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1269
1270 if (sample_type & PERF_SAMPLE_READ)
1271 sample_read__printf(sample, evsel->core.attr.read_format);
1272}
1273
1274static void dump_read(struct evsel *evsel, union perf_event *event)
1275{
1276 struct perf_record_read *read_event = &event->read;
1277 u64 read_format;
1278
1279 if (!dump_trace)
1280 return;
1281
1282 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1283 perf_evsel__name(evsel),
1284 event->read.value);
1285
1286 if (!evsel)
1287 return;
1288
1289 read_format = evsel->core.attr.read_format;
1290
1291 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1292 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1293
1294 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1295 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1296
1297 if (read_format & PERF_FORMAT_ID)
1298 printf("... id : %" PRI_lu64 "\n", read_event->id);
1299}
1300
1301static struct machine *machines__find_for_cpumode(struct machines *machines,
1302 union perf_event *event,
1303 struct perf_sample *sample)
1304{
1305 struct machine *machine;
1306
1307 if (perf_guest &&
1308 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1309 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1310 u32 pid;
1311
1312 if (event->header.type == PERF_RECORD_MMAP
1313 || event->header.type == PERF_RECORD_MMAP2)
1314 pid = event->mmap.pid;
1315 else
1316 pid = sample->pid;
1317
1318 machine = machines__find(machines, pid);
1319 if (!machine)
1320 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1321 return machine;
1322 }
1323
1324 return &machines->host;
1325}
1326
1327static int deliver_sample_value(struct evlist *evlist,
1328 struct perf_tool *tool,
1329 union perf_event *event,
1330 struct perf_sample *sample,
1331 struct sample_read_value *v,
1332 struct machine *machine)
1333{
1334 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1335 struct evsel *evsel;
1336
1337 if (sid) {
1338 sample->id = v->id;
1339 sample->period = v->value - sid->period;
1340 sid->period = v->value;
1341 }
1342
1343 if (!sid || sid->evsel == NULL) {
1344 ++evlist->stats.nr_unknown_id;
1345 return 0;
1346 }
1347
1348
1349
1350
1351
1352 if (!sample->period)
1353 return 0;
1354
1355 evsel = container_of(sid->evsel, struct evsel, core);
1356 return tool->sample(tool, event, sample, evsel, machine);
1357}
1358
1359static int deliver_sample_group(struct evlist *evlist,
1360 struct perf_tool *tool,
1361 union perf_event *event,
1362 struct perf_sample *sample,
1363 struct machine *machine)
1364{
1365 int ret = -EINVAL;
1366 u64 i;
1367
1368 for (i = 0; i < sample->read.group.nr; i++) {
1369 ret = deliver_sample_value(evlist, tool, event, sample,
1370 &sample->read.group.values[i],
1371 machine);
1372 if (ret)
1373 break;
1374 }
1375
1376 return ret;
1377}
1378
1379static int
1380 perf_evlist__deliver_sample(struct evlist *evlist,
1381 struct perf_tool *tool,
1382 union perf_event *event,
1383 struct perf_sample *sample,
1384 struct evsel *evsel,
1385 struct machine *machine)
1386{
1387
1388 u64 sample_type = evsel->core.attr.sample_type;
1389 u64 read_format = evsel->core.attr.read_format;
1390
1391
1392 if (!(sample_type & PERF_SAMPLE_READ))
1393 return tool->sample(tool, event, sample, evsel, machine);
1394
1395
1396 if (read_format & PERF_FORMAT_GROUP)
1397 return deliver_sample_group(evlist, tool, event, sample,
1398 machine);
1399 else
1400 return deliver_sample_value(evlist, tool, event, sample,
1401 &sample->read.one, machine);
1402}
1403
1404static int machines__deliver_event(struct machines *machines,
1405 struct evlist *evlist,
1406 union perf_event *event,
1407 struct perf_sample *sample,
1408 struct perf_tool *tool, u64 file_offset)
1409{
1410 struct evsel *evsel;
1411 struct machine *machine;
1412
1413 dump_event(evlist, event, file_offset, sample);
1414
1415 evsel = perf_evlist__id2evsel(evlist, sample->id);
1416
1417 machine = machines__find_for_cpumode(machines, event, sample);
1418
1419 switch (event->header.type) {
1420 case PERF_RECORD_SAMPLE:
1421 if (evsel == NULL) {
1422 ++evlist->stats.nr_unknown_id;
1423 return 0;
1424 }
1425 dump_sample(evsel, event, sample);
1426 if (machine == NULL) {
1427 ++evlist->stats.nr_unprocessable_samples;
1428 return 0;
1429 }
1430 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1431 case PERF_RECORD_MMAP:
1432 return tool->mmap(tool, event, sample, machine);
1433 case PERF_RECORD_MMAP2:
1434 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1435 ++evlist->stats.nr_proc_map_timeout;
1436 return tool->mmap2(tool, event, sample, machine);
1437 case PERF_RECORD_COMM:
1438 return tool->comm(tool, event, sample, machine);
1439 case PERF_RECORD_NAMESPACES:
1440 return tool->namespaces(tool, event, sample, machine);
1441 case PERF_RECORD_CGROUP:
1442 return tool->cgroup(tool, event, sample, machine);
1443 case PERF_RECORD_FORK:
1444 return tool->fork(tool, event, sample, machine);
1445 case PERF_RECORD_EXIT:
1446 return tool->exit(tool, event, sample, machine);
1447 case PERF_RECORD_LOST:
1448 if (tool->lost == perf_event__process_lost)
1449 evlist->stats.total_lost += event->lost.lost;
1450 return tool->lost(tool, event, sample, machine);
1451 case PERF_RECORD_LOST_SAMPLES:
1452 if (tool->lost_samples == perf_event__process_lost_samples)
1453 evlist->stats.total_lost_samples += event->lost_samples.lost;
1454 return tool->lost_samples(tool, event, sample, machine);
1455 case PERF_RECORD_READ:
1456 dump_read(evsel, event);
1457 return tool->read(tool, event, sample, evsel, machine);
1458 case PERF_RECORD_THROTTLE:
1459 return tool->throttle(tool, event, sample, machine);
1460 case PERF_RECORD_UNTHROTTLE:
1461 return tool->unthrottle(tool, event, sample, machine);
1462 case PERF_RECORD_AUX:
1463 if (tool->aux == perf_event__process_aux) {
1464 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1465 evlist->stats.total_aux_lost += 1;
1466 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1467 evlist->stats.total_aux_partial += 1;
1468 }
1469 return tool->aux(tool, event, sample, machine);
1470 case PERF_RECORD_ITRACE_START:
1471 return tool->itrace_start(tool, event, sample, machine);
1472 case PERF_RECORD_SWITCH:
1473 case PERF_RECORD_SWITCH_CPU_WIDE:
1474 return tool->context_switch(tool, event, sample, machine);
1475 case PERF_RECORD_KSYMBOL:
1476 return tool->ksymbol(tool, event, sample, machine);
1477 case PERF_RECORD_BPF_EVENT:
1478 return tool->bpf(tool, event, sample, machine);
1479 default:
1480 ++evlist->stats.nr_unknown_events;
1481 return -1;
1482 }
1483}
1484
1485static int perf_session__deliver_event(struct perf_session *session,
1486 union perf_event *event,
1487 struct perf_tool *tool,
1488 u64 file_offset)
1489{
1490 struct perf_sample sample;
1491 int ret;
1492
1493 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1494 if (ret) {
1495 pr_err("Can't parse sample, err = %d\n", ret);
1496 return ret;
1497 }
1498
1499 ret = auxtrace__process_event(session, event, &sample, tool);
1500 if (ret < 0)
1501 return ret;
1502 if (ret > 0)
1503 return 0;
1504
1505 ret = machines__deliver_event(&session->machines, session->evlist,
1506 event, &sample, tool, file_offset);
1507
1508 if (dump_trace && sample.aux_sample.size)
1509 auxtrace__dump_auxtrace_sample(session, &sample);
1510
1511 return ret;
1512}
1513
1514static s64 perf_session__process_user_event(struct perf_session *session,
1515 union perf_event *event,
1516 u64 file_offset)
1517{
1518 struct ordered_events *oe = &session->ordered_events;
1519 struct perf_tool *tool = session->tool;
1520 struct perf_sample sample = { .time = 0, };
1521 int fd = perf_data__fd(session->data);
1522 int err;
1523
1524 if (event->header.type != PERF_RECORD_COMPRESSED ||
1525 tool->compressed == perf_session__process_compressed_event_stub)
1526 dump_event(session->evlist, event, file_offset, &sample);
1527
1528
1529 switch (event->header.type) {
1530 case PERF_RECORD_HEADER_ATTR:
1531 err = tool->attr(tool, event, &session->evlist);
1532 if (err == 0) {
1533 perf_session__set_id_hdr_size(session);
1534 perf_session__set_comm_exec(session);
1535 }
1536 return err;
1537 case PERF_RECORD_EVENT_UPDATE:
1538 return tool->event_update(tool, event, &session->evlist);
1539 case PERF_RECORD_HEADER_EVENT_TYPE:
1540
1541
1542
1543
1544 return 0;
1545 case PERF_RECORD_HEADER_TRACING_DATA:
1546
1547 lseek(fd, file_offset, SEEK_SET);
1548 return tool->tracing_data(session, event);
1549 case PERF_RECORD_HEADER_BUILD_ID:
1550 return tool->build_id(session, event);
1551 case PERF_RECORD_FINISHED_ROUND:
1552 return tool->finished_round(tool, event, oe);
1553 case PERF_RECORD_ID_INDEX:
1554 return tool->id_index(session, event);
1555 case PERF_RECORD_AUXTRACE_INFO:
1556 return tool->auxtrace_info(session, event);
1557 case PERF_RECORD_AUXTRACE:
1558
1559 lseek(fd, file_offset + event->header.size, SEEK_SET);
1560 return tool->auxtrace(session, event);
1561 case PERF_RECORD_AUXTRACE_ERROR:
1562 perf_session__auxtrace_error_inc(session, event);
1563 return tool->auxtrace_error(session, event);
1564 case PERF_RECORD_THREAD_MAP:
1565 return tool->thread_map(session, event);
1566 case PERF_RECORD_CPU_MAP:
1567 return tool->cpu_map(session, event);
1568 case PERF_RECORD_STAT_CONFIG:
1569 return tool->stat_config(session, event);
1570 case PERF_RECORD_STAT:
1571 return tool->stat(session, event);
1572 case PERF_RECORD_STAT_ROUND:
1573 return tool->stat_round(session, event);
1574 case PERF_RECORD_TIME_CONV:
1575 session->time_conv = event->time_conv;
1576 return tool->time_conv(session, event);
1577 case PERF_RECORD_HEADER_FEATURE:
1578 return tool->feature(session, event);
1579 case PERF_RECORD_COMPRESSED:
1580 err = tool->compressed(session, event, file_offset);
1581 if (err)
1582 dump_event(session->evlist, event, file_offset, &sample);
1583 return err;
1584 default:
1585 return -EINVAL;
1586 }
1587}
1588
1589int perf_session__deliver_synth_event(struct perf_session *session,
1590 union perf_event *event,
1591 struct perf_sample *sample)
1592{
1593 struct evlist *evlist = session->evlist;
1594 struct perf_tool *tool = session->tool;
1595
1596 events_stats__inc(&evlist->stats, event->header.type);
1597
1598 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1599 return perf_session__process_user_event(session, event, 0);
1600
1601 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1602}
1603
1604static void event_swap(union perf_event *event, bool sample_id_all)
1605{
1606 perf_event__swap_op swap;
1607
1608 swap = perf_event__swap_ops[event->header.type];
1609 if (swap)
1610 swap(event, sample_id_all);
1611}
1612
1613int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1614 void *buf, size_t buf_sz,
1615 union perf_event **event_ptr,
1616 struct perf_sample *sample)
1617{
1618 union perf_event *event;
1619 size_t hdr_sz, rest;
1620 int fd;
1621
1622 if (session->one_mmap && !session->header.needs_swap) {
1623 event = file_offset - session->one_mmap_offset +
1624 session->one_mmap_addr;
1625 goto out_parse_sample;
1626 }
1627
1628 if (perf_data__is_pipe(session->data))
1629 return -1;
1630
1631 fd = perf_data__fd(session->data);
1632 hdr_sz = sizeof(struct perf_event_header);
1633
1634 if (buf_sz < hdr_sz)
1635 return -1;
1636
1637 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1638 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1639 return -1;
1640
1641 event = (union perf_event *)buf;
1642
1643 if (session->header.needs_swap)
1644 perf_event_header__bswap(&event->header);
1645
1646 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1647 return -1;
1648
1649 rest = event->header.size - hdr_sz;
1650
1651 if (readn(fd, buf, rest) != (ssize_t)rest)
1652 return -1;
1653
1654 if (session->header.needs_swap)
1655 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1656
1657out_parse_sample:
1658
1659 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1660 perf_evlist__parse_sample(session->evlist, event, sample))
1661 return -1;
1662
1663 *event_ptr = event;
1664
1665 return 0;
1666}
1667
1668int perf_session__peek_events(struct perf_session *session, u64 offset,
1669 u64 size, peek_events_cb_t cb, void *data)
1670{
1671 u64 max_offset = offset + size;
1672 char buf[PERF_SAMPLE_MAX_SIZE];
1673 union perf_event *event;
1674 int err;
1675
1676 do {
1677 err = perf_session__peek_event(session, offset, buf,
1678 PERF_SAMPLE_MAX_SIZE, &event,
1679 NULL);
1680 if (err)
1681 return err;
1682
1683 err = cb(session, event, offset, data);
1684 if (err)
1685 return err;
1686
1687 offset += event->header.size;
1688 if (event->header.type == PERF_RECORD_AUXTRACE)
1689 offset += event->auxtrace.size;
1690
1691 } while (offset < max_offset);
1692
1693 return err;
1694}
1695
1696static s64 perf_session__process_event(struct perf_session *session,
1697 union perf_event *event, u64 file_offset)
1698{
1699 struct evlist *evlist = session->evlist;
1700 struct perf_tool *tool = session->tool;
1701 int ret;
1702
1703 if (session->header.needs_swap)
1704 event_swap(event, perf_evlist__sample_id_all(evlist));
1705
1706 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1707 return -EINVAL;
1708
1709 events_stats__inc(&evlist->stats, event->header.type);
1710
1711 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1712 return perf_session__process_user_event(session, event, file_offset);
1713
1714 if (tool->ordered_events) {
1715 u64 timestamp = -1ULL;
1716
1717 ret = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp);
1718 if (ret && ret != -1)
1719 return ret;
1720
1721 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1722 if (ret != -ETIME)
1723 return ret;
1724 }
1725
1726 return perf_session__deliver_event(session, event, tool, file_offset);
1727}
1728
1729void perf_event_header__bswap(struct perf_event_header *hdr)
1730{
1731 hdr->type = bswap_32(hdr->type);
1732 hdr->misc = bswap_16(hdr->misc);
1733 hdr->size = bswap_16(hdr->size);
1734}
1735
1736struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1737{
1738 return machine__findnew_thread(&session->machines.host, -1, pid);
1739}
1740
1741
1742
1743
1744
1745
1746
1747
1748int perf_session__register_idle_thread(struct perf_session *session)
1749{
1750 struct thread *thread;
1751 int err = 0;
1752
1753 thread = machine__findnew_thread(&session->machines.host, 0, 0);
1754 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1755 pr_err("problem inserting idle task.\n");
1756 err = -1;
1757 }
1758
1759 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1760 pr_err("problem inserting idle task.\n");
1761 err = -1;
1762 }
1763
1764
1765 thread__put(thread);
1766 return err;
1767}
1768
1769static void
1770perf_session__warn_order(const struct perf_session *session)
1771{
1772 const struct ordered_events *oe = &session->ordered_events;
1773 struct evsel *evsel;
1774 bool should_warn = true;
1775
1776 evlist__for_each_entry(session->evlist, evsel) {
1777 if (evsel->core.attr.write_backward)
1778 should_warn = false;
1779 }
1780
1781 if (!should_warn)
1782 return;
1783 if (oe->nr_unordered_events != 0)
1784 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1785}
1786
1787static void perf_session__warn_about_errors(const struct perf_session *session)
1788{
1789 const struct events_stats *stats = &session->evlist->stats;
1790
1791 if (session->tool->lost == perf_event__process_lost &&
1792 stats->nr_events[PERF_RECORD_LOST] != 0) {
1793 ui__warning("Processed %d events and lost %d chunks!\n\n"
1794 "Check IO/CPU overload!\n\n",
1795 stats->nr_events[0],
1796 stats->nr_events[PERF_RECORD_LOST]);
1797 }
1798
1799 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1800 double drop_rate;
1801
1802 drop_rate = (double)stats->total_lost_samples /
1803 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1804 if (drop_rate > 0.05) {
1805 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1806 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1807 drop_rate * 100.0);
1808 }
1809 }
1810
1811 if (session->tool->aux == perf_event__process_aux &&
1812 stats->total_aux_lost != 0) {
1813 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1814 stats->total_aux_lost,
1815 stats->nr_events[PERF_RECORD_AUX]);
1816 }
1817
1818 if (session->tool->aux == perf_event__process_aux &&
1819 stats->total_aux_partial != 0) {
1820 bool vmm_exclusive = false;
1821
1822 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1823 &vmm_exclusive);
1824
1825 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1826 "Are you running a KVM guest in the background?%s\n\n",
1827 stats->total_aux_partial,
1828 stats->nr_events[PERF_RECORD_AUX],
1829 vmm_exclusive ?
1830 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1831 "will reduce the gaps to only guest's timeslices." :
1832 "");
1833 }
1834
1835 if (stats->nr_unknown_events != 0) {
1836 ui__warning("Found %u unknown events!\n\n"
1837 "Is this an older tool processing a perf.data "
1838 "file generated by a more recent tool?\n\n"
1839 "If that is not the case, consider "
1840 "reporting to linux-kernel@vger.kernel.org.\n\n",
1841 stats->nr_unknown_events);
1842 }
1843
1844 if (stats->nr_unknown_id != 0) {
1845 ui__warning("%u samples with id not present in the header\n",
1846 stats->nr_unknown_id);
1847 }
1848
1849 if (stats->nr_invalid_chains != 0) {
1850 ui__warning("Found invalid callchains!\n\n"
1851 "%u out of %u events were discarded for this reason.\n\n"
1852 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1853 stats->nr_invalid_chains,
1854 stats->nr_events[PERF_RECORD_SAMPLE]);
1855 }
1856
1857 if (stats->nr_unprocessable_samples != 0) {
1858 ui__warning("%u unprocessable samples recorded.\n"
1859 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1860 stats->nr_unprocessable_samples);
1861 }
1862
1863 perf_session__warn_order(session);
1864
1865 events_stats__auxtrace_error_warn(stats);
1866
1867 if (stats->nr_proc_map_timeout != 0) {
1868 ui__warning("%d map information files for pre-existing threads were\n"
1869 "not processed, if there are samples for addresses they\n"
1870 "will not be resolved, you may find out which are these\n"
1871 "threads by running with -v and redirecting the output\n"
1872 "to a file.\n"
1873 "The time limit to process proc map is too short?\n"
1874 "Increase it by --proc-map-timeout\n",
1875 stats->nr_proc_map_timeout);
1876 }
1877}
1878
1879static int perf_session__flush_thread_stack(struct thread *thread,
1880 void *p __maybe_unused)
1881{
1882 return thread_stack__flush(thread);
1883}
1884
1885static int perf_session__flush_thread_stacks(struct perf_session *session)
1886{
1887 return machines__for_each_thread(&session->machines,
1888 perf_session__flush_thread_stack,
1889 NULL);
1890}
1891
1892volatile int session_done;
1893
1894static int __perf_session__process_decomp_events(struct perf_session *session);
1895
1896static int __perf_session__process_pipe_events(struct perf_session *session)
1897{
1898 struct ordered_events *oe = &session->ordered_events;
1899 struct perf_tool *tool = session->tool;
1900 int fd = perf_data__fd(session->data);
1901 union perf_event *event;
1902 uint32_t size, cur_size = 0;
1903 void *buf = NULL;
1904 s64 skip = 0;
1905 u64 head;
1906 ssize_t err;
1907 void *p;
1908
1909 perf_tool__fill_defaults(tool);
1910
1911 head = 0;
1912 cur_size = sizeof(union perf_event);
1913
1914 buf = malloc(cur_size);
1915 if (!buf)
1916 return -errno;
1917 ordered_events__set_copy_on_queue(oe, true);
1918more:
1919 event = buf;
1920 err = readn(fd, event, sizeof(struct perf_event_header));
1921 if (err <= 0) {
1922 if (err == 0)
1923 goto done;
1924
1925 pr_err("failed to read event header\n");
1926 goto out_err;
1927 }
1928
1929 if (session->header.needs_swap)
1930 perf_event_header__bswap(&event->header);
1931
1932 size = event->header.size;
1933 if (size < sizeof(struct perf_event_header)) {
1934 pr_err("bad event header size\n");
1935 goto out_err;
1936 }
1937
1938 if (size > cur_size) {
1939 void *new = realloc(buf, size);
1940 if (!new) {
1941 pr_err("failed to allocate memory to read event\n");
1942 goto out_err;
1943 }
1944 buf = new;
1945 cur_size = size;
1946 event = buf;
1947 }
1948 p = event;
1949 p += sizeof(struct perf_event_header);
1950
1951 if (size - sizeof(struct perf_event_header)) {
1952 err = readn(fd, p, size - sizeof(struct perf_event_header));
1953 if (err <= 0) {
1954 if (err == 0) {
1955 pr_err("unexpected end of event stream\n");
1956 goto done;
1957 }
1958
1959 pr_err("failed to read event data\n");
1960 goto out_err;
1961 }
1962 }
1963
1964 if ((skip = perf_session__process_event(session, event, head)) < 0) {
1965 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1966 head, event->header.size, event->header.type);
1967 err = -EINVAL;
1968 goto out_err;
1969 }
1970
1971 head += size;
1972
1973 if (skip > 0)
1974 head += skip;
1975
1976 err = __perf_session__process_decomp_events(session);
1977 if (err)
1978 goto out_err;
1979
1980 if (!session_done())
1981 goto more;
1982done:
1983
1984 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1985 if (err)
1986 goto out_err;
1987 err = auxtrace__flush_events(session, tool);
1988 if (err)
1989 goto out_err;
1990 err = perf_session__flush_thread_stacks(session);
1991out_err:
1992 free(buf);
1993 if (!tool->no_warn)
1994 perf_session__warn_about_errors(session);
1995 ordered_events__free(&session->ordered_events);
1996 auxtrace__free_events(session);
1997 return err;
1998}
1999
2000static union perf_event *
2001prefetch_event(char *buf, u64 head, size_t mmap_size,
2002 bool needs_swap, union perf_event *error)
2003{
2004 union perf_event *event;
2005
2006
2007
2008
2009
2010 if (head + sizeof(event->header) > mmap_size)
2011 return NULL;
2012
2013 event = (union perf_event *)(buf + head);
2014 if (needs_swap)
2015 perf_event_header__bswap(&event->header);
2016
2017 if (head + event->header.size <= mmap_size)
2018 return event;
2019
2020
2021 if (needs_swap)
2022 perf_event_header__bswap(&event->header);
2023
2024 pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
2025 " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
2026
2027 return error;
2028}
2029
2030static union perf_event *
2031fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2032{
2033 return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2034}
2035
2036static union perf_event *
2037fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2038{
2039 return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2040}
2041
2042static int __perf_session__process_decomp_events(struct perf_session *session)
2043{
2044 s64 skip;
2045 u64 size, file_pos = 0;
2046 struct decomp *decomp = session->decomp_last;
2047
2048 if (!decomp)
2049 return 0;
2050
2051 while (decomp->head < decomp->size && !session_done()) {
2052 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2053 session->header.needs_swap);
2054
2055 if (!event)
2056 break;
2057
2058 size = event->header.size;
2059
2060 if (size < sizeof(struct perf_event_header) ||
2061 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
2062 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2063 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2064 return -EINVAL;
2065 }
2066
2067 if (skip)
2068 size += skip;
2069
2070 decomp->head += size;
2071 }
2072
2073 return 0;
2074}
2075
2076
2077
2078
2079
2080#if BITS_PER_LONG == 64
2081#define MMAP_SIZE ULLONG_MAX
2082#define NUM_MMAPS 1
2083#else
2084#define MMAP_SIZE (32 * 1024 * 1024ULL)
2085#define NUM_MMAPS 128
2086#endif
2087
2088struct reader;
2089
2090typedef s64 (*reader_cb_t)(struct perf_session *session,
2091 union perf_event *event,
2092 u64 file_offset);
2093
2094struct reader {
2095 int fd;
2096 u64 data_size;
2097 u64 data_offset;
2098 reader_cb_t process;
2099};
2100
2101static int
2102reader__process_events(struct reader *rd, struct perf_session *session,
2103 struct ui_progress *prog)
2104{
2105 u64 data_size = rd->data_size;
2106 u64 head, page_offset, file_offset, file_pos, size;
2107 int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2108 size_t mmap_size;
2109 char *buf, *mmaps[NUM_MMAPS];
2110 union perf_event *event;
2111 s64 skip;
2112
2113 page_offset = page_size * (rd->data_offset / page_size);
2114 file_offset = page_offset;
2115 head = rd->data_offset - page_offset;
2116
2117 ui_progress__init_size(prog, data_size, "Processing events...");
2118
2119 data_size += rd->data_offset;
2120
2121 mmap_size = MMAP_SIZE;
2122 if (mmap_size > data_size) {
2123 mmap_size = data_size;
2124 session->one_mmap = true;
2125 }
2126
2127 memset(mmaps, 0, sizeof(mmaps));
2128
2129 mmap_prot = PROT_READ;
2130 mmap_flags = MAP_SHARED;
2131
2132 if (session->header.needs_swap) {
2133 mmap_prot |= PROT_WRITE;
2134 mmap_flags = MAP_PRIVATE;
2135 }
2136remap:
2137 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2138 file_offset);
2139 if (buf == MAP_FAILED) {
2140 pr_err("failed to mmap file\n");
2141 err = -errno;
2142 goto out;
2143 }
2144 mmaps[map_idx] = buf;
2145 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2146 file_pos = file_offset + head;
2147 if (session->one_mmap) {
2148 session->one_mmap_addr = buf;
2149 session->one_mmap_offset = file_offset;
2150 }
2151
2152more:
2153 event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
2154 if (IS_ERR(event))
2155 return PTR_ERR(event);
2156
2157 if (!event) {
2158 if (mmaps[map_idx]) {
2159 munmap(mmaps[map_idx], mmap_size);
2160 mmaps[map_idx] = NULL;
2161 }
2162
2163 page_offset = page_size * (head / page_size);
2164 file_offset += page_offset;
2165 head -= page_offset;
2166 goto remap;
2167 }
2168
2169 size = event->header.size;
2170
2171 skip = -EINVAL;
2172
2173 if (size < sizeof(struct perf_event_header) ||
2174 (skip = rd->process(session, event, file_pos)) < 0) {
2175 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2176 file_offset + head, event->header.size,
2177 event->header.type, strerror(-skip));
2178 err = skip;
2179 goto out;
2180 }
2181
2182 if (skip)
2183 size += skip;
2184
2185 head += size;
2186 file_pos += size;
2187
2188 err = __perf_session__process_decomp_events(session);
2189 if (err)
2190 goto out;
2191
2192 ui_progress__update(prog, size);
2193
2194 if (session_done())
2195 goto out;
2196
2197 if (file_pos < data_size)
2198 goto more;
2199
2200out:
2201 return err;
2202}
2203
2204static s64 process_simple(struct perf_session *session,
2205 union perf_event *event,
2206 u64 file_offset)
2207{
2208 return perf_session__process_event(session, event, file_offset);
2209}
2210
2211static int __perf_session__process_events(struct perf_session *session)
2212{
2213 struct reader rd = {
2214 .fd = perf_data__fd(session->data),
2215 .data_size = session->header.data_size,
2216 .data_offset = session->header.data_offset,
2217 .process = process_simple,
2218 };
2219 struct ordered_events *oe = &session->ordered_events;
2220 struct perf_tool *tool = session->tool;
2221 struct ui_progress prog;
2222 int err;
2223
2224 perf_tool__fill_defaults(tool);
2225
2226 if (rd.data_size == 0)
2227 return -1;
2228
2229 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2230
2231 err = reader__process_events(&rd, session, &prog);
2232 if (err)
2233 goto out_err;
2234
2235 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2236 if (err)
2237 goto out_err;
2238 err = auxtrace__flush_events(session, tool);
2239 if (err)
2240 goto out_err;
2241 err = perf_session__flush_thread_stacks(session);
2242out_err:
2243 ui_progress__finish();
2244 if (!tool->no_warn)
2245 perf_session__warn_about_errors(session);
2246
2247
2248
2249
2250 ordered_events__reinit(&session->ordered_events);
2251 auxtrace__free_events(session);
2252 session->one_mmap = false;
2253 return err;
2254}
2255
2256int perf_session__process_events(struct perf_session *session)
2257{
2258 if (perf_session__register_idle_thread(session) < 0)
2259 return -ENOMEM;
2260
2261 if (perf_data__is_pipe(session->data))
2262 return __perf_session__process_pipe_events(session);
2263
2264 return __perf_session__process_events(session);
2265}
2266
2267bool perf_session__has_traces(struct perf_session *session, const char *msg)
2268{
2269 struct evsel *evsel;
2270
2271 evlist__for_each_entry(session->evlist, evsel) {
2272 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2273 return true;
2274 }
2275
2276 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2277 return false;
2278}
2279
2280int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2281{
2282 char *bracket;
2283 struct ref_reloc_sym *ref;
2284 struct kmap *kmap;
2285
2286 ref = zalloc(sizeof(struct ref_reloc_sym));
2287 if (ref == NULL)
2288 return -ENOMEM;
2289
2290 ref->name = strdup(symbol_name);
2291 if (ref->name == NULL) {
2292 free(ref);
2293 return -ENOMEM;
2294 }
2295
2296 bracket = strchr(ref->name, ']');
2297 if (bracket)
2298 *bracket = '\0';
2299
2300 ref->addr = addr;
2301
2302 kmap = map__kmap(map);
2303 if (kmap)
2304 kmap->ref_reloc_sym = ref;
2305
2306 return 0;
2307}
2308
2309size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2310{
2311 return machines__fprintf_dsos(&session->machines, fp);
2312}
2313
2314size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2315 bool (skip)(struct dso *dso, int parm), int parm)
2316{
2317 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2318}
2319
2320size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2321{
2322 size_t ret;
2323 const char *msg = "";
2324
2325 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2326 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2327
2328 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2329
2330 ret += events_stats__fprintf(&session->evlist->stats, fp);
2331 return ret;
2332}
2333
2334size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2335{
2336
2337
2338
2339
2340 return machine__fprintf(&session->machines.host, fp);
2341}
2342
2343struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2344 unsigned int type)
2345{
2346 struct evsel *pos;
2347
2348 evlist__for_each_entry(session->evlist, pos) {
2349 if (pos->core.attr.type == type)
2350 return pos;
2351 }
2352 return NULL;
2353}
2354
2355int perf_session__cpu_bitmap(struct perf_session *session,
2356 const char *cpu_list, unsigned long *cpu_bitmap)
2357{
2358 int i, err = -1;
2359 struct perf_cpu_map *map;
2360 int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
2361
2362 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2363 struct evsel *evsel;
2364
2365 evsel = perf_session__find_first_evtype(session, i);
2366 if (!evsel)
2367 continue;
2368
2369 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2370 pr_err("File does not contain CPU events. "
2371 "Remove -C option to proceed.\n");
2372 return -1;
2373 }
2374 }
2375
2376 map = perf_cpu_map__new(cpu_list);
2377 if (map == NULL) {
2378 pr_err("Invalid cpu_list\n");
2379 return -1;
2380 }
2381
2382 for (i = 0; i < map->nr; i++) {
2383 int cpu = map->map[i];
2384
2385 if (cpu >= nr_cpus) {
2386 pr_err("Requested CPU %d too large. "
2387 "Consider raising MAX_NR_CPUS\n", cpu);
2388 goto out_delete_map;
2389 }
2390
2391 set_bit(cpu, cpu_bitmap);
2392 }
2393
2394 err = 0;
2395
2396out_delete_map:
2397 perf_cpu_map__put(map);
2398 return err;
2399}
2400
2401void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2402 bool full)
2403{
2404 if (session == NULL || fp == NULL)
2405 return;
2406
2407 fprintf(fp, "# ========\n");
2408 perf_header__fprintf_info(session, fp, full);
2409 fprintf(fp, "# ========\n#\n");
2410}
2411
2412int perf_event__process_id_index(struct perf_session *session,
2413 union perf_event *event)
2414{
2415 struct evlist *evlist = session->evlist;
2416 struct perf_record_id_index *ie = &event->id_index;
2417 size_t i, nr, max_nr;
2418
2419 max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2420 sizeof(struct id_index_entry);
2421 nr = ie->nr;
2422 if (nr > max_nr)
2423 return -EINVAL;
2424
2425 if (dump_trace)
2426 fprintf(stdout, " nr: %zu\n", nr);
2427
2428 for (i = 0; i < nr; i++) {
2429 struct id_index_entry *e = &ie->entries[i];
2430 struct perf_sample_id *sid;
2431
2432 if (dump_trace) {
2433 fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2434 fprintf(stdout, " idx: %"PRI_lu64, e->idx);
2435 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu);
2436 fprintf(stdout, " tid: %"PRI_ld64"\n", e->tid);
2437 }
2438
2439 sid = perf_evlist__id2sid(evlist, e->id);
2440 if (!sid)
2441 return -ENOENT;
2442 sid->idx = e->idx;
2443 sid->cpu = e->cpu;
2444 sid->tid = e->tid;
2445 }
2446 return 0;
2447}
2448