1
2#include <errno.h>
3#include <inttypes.h>
4#include <linux/err.h>
5#include <linux/kernel.h>
6#include <linux/zalloc.h>
7#include <api/fs/fs.h>
8
9#include <byteswap.h>
10#include <unistd.h>
11#include <sys/types.h>
12#include <sys/mman.h>
13#include <perf/cpumap.h>
14
15#include "map_symbol.h"
16#include "branch.h"
17#include "debug.h"
18#include "evlist.h"
19#include "evsel.h"
20#include "memswap.h"
21#include "map.h"
22#include "symbol.h"
23#include "session.h"
24#include "tool.h"
25#include "perf_regs.h"
26#include "asm/bug.h"
27#include "auxtrace.h"
28#include "thread.h"
29#include "thread-stack.h"
30#include "sample-raw.h"
31#include "stat.h"
32#include "ui/progress.h"
33#include "../perf.h"
34#include "arch/common.h"
35#include <internal/lib.h>
36#include <linux/err.h>
37
38#ifdef HAVE_ZSTD_SUPPORT
39static int perf_session__process_compressed_event(struct perf_session *session,
40 union perf_event *event, u64 file_offset)
41{
42 void *src;
43 size_t decomp_size, src_size;
44 u64 decomp_last_rem = 0;
45 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
46 struct decomp *decomp, *decomp_last = session->decomp_last;
47
48 if (decomp_last) {
49 decomp_last_rem = decomp_last->size - decomp_last->head;
50 decomp_len += decomp_last_rem;
51 }
52
53 mmap_len = sizeof(struct decomp) + decomp_len;
54 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
55 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
56 if (decomp == MAP_FAILED) {
57 pr_err("Couldn't allocate memory for decompression\n");
58 return -1;
59 }
60
61 decomp->file_pos = file_offset;
62 decomp->mmap_len = mmap_len;
63 decomp->head = 0;
64
65 if (decomp_last_rem) {
66 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
67 decomp->size = decomp_last_rem;
68 }
69
70 src = (void *)event + sizeof(struct perf_record_compressed);
71 src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
72
73 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
74 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
75 if (!decomp_size) {
76 munmap(decomp, mmap_len);
77 pr_err("Couldn't decompress data\n");
78 return -1;
79 }
80
81 decomp->size += decomp_size;
82
83 if (session->decomp == NULL) {
84 session->decomp = decomp;
85 session->decomp_last = decomp;
86 } else {
87 session->decomp_last->next = decomp;
88 session->decomp_last = decomp;
89 }
90
91 pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
92
93 return 0;
94}
95#else
96#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
97#endif
98
99static int perf_session__deliver_event(struct perf_session *session,
100 union perf_event *event,
101 struct perf_tool *tool,
102 u64 file_offset);
103
104static int perf_session__open(struct perf_session *session)
105{
106 struct perf_data *data = session->data;
107
108 if (perf_session__read_header(session) < 0) {
109 pr_err("incompatible file format (rerun with -v to learn more)\n");
110 return -1;
111 }
112
113 if (perf_data__is_pipe(data))
114 return 0;
115
116 if (perf_header__has_feat(&session->header, HEADER_STAT))
117 return 0;
118
119 if (!perf_evlist__valid_sample_type(session->evlist)) {
120 pr_err("non matching sample_type\n");
121 return -1;
122 }
123
124 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
125 pr_err("non matching sample_id_all\n");
126 return -1;
127 }
128
129 if (!perf_evlist__valid_read_format(session->evlist)) {
130 pr_err("non matching read_format\n");
131 return -1;
132 }
133
134 return 0;
135}
136
137void perf_session__set_id_hdr_size(struct perf_session *session)
138{
139 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
140
141 machines__set_id_hdr_size(&session->machines, id_hdr_size);
142}
143
144int perf_session__create_kernel_maps(struct perf_session *session)
145{
146 int ret = machine__create_kernel_maps(&session->machines.host);
147
148 if (ret >= 0)
149 ret = machines__create_guest_kernel_maps(&session->machines);
150 return ret;
151}
152
153static void perf_session__destroy_kernel_maps(struct perf_session *session)
154{
155 machines__destroy_kernel_maps(&session->machines);
156}
157
158static bool perf_session__has_comm_exec(struct perf_session *session)
159{
160 struct evsel *evsel;
161
162 evlist__for_each_entry(session->evlist, evsel) {
163 if (evsel->core.attr.comm_exec)
164 return true;
165 }
166
167 return false;
168}
169
170static void perf_session__set_comm_exec(struct perf_session *session)
171{
172 bool comm_exec = perf_session__has_comm_exec(session);
173
174 machines__set_comm_exec(&session->machines, comm_exec);
175}
176
177static int ordered_events__deliver_event(struct ordered_events *oe,
178 struct ordered_event *event)
179{
180 struct perf_session *session = container_of(oe, struct perf_session,
181 ordered_events);
182
183 return perf_session__deliver_event(session, event->event,
184 session->tool, event->file_offset);
185}
186
187struct perf_session *perf_session__new(struct perf_data *data,
188 bool repipe, struct perf_tool *tool)
189{
190 int ret = -ENOMEM;
191 struct perf_session *session = zalloc(sizeof(*session));
192
193 if (!session)
194 goto out;
195
196 session->repipe = repipe;
197 session->tool = tool;
198 INIT_LIST_HEAD(&session->auxtrace_index);
199 machines__init(&session->machines);
200 ordered_events__init(&session->ordered_events,
201 ordered_events__deliver_event, NULL);
202
203 perf_env__init(&session->header.env);
204 if (data) {
205 ret = perf_data__open(data);
206 if (ret < 0)
207 goto out_delete;
208
209 session->data = data;
210
211 if (perf_data__is_read(data)) {
212 ret = perf_session__open(session);
213 if (ret < 0)
214 goto out_delete;
215
216
217
218
219
220 if (!data->is_pipe) {
221 perf_session__set_id_hdr_size(session);
222 perf_session__set_comm_exec(session);
223 }
224
225 perf_evlist__init_trace_event_sample_raw(session->evlist);
226
227
228 if (data->is_dir) {
229 ret = perf_data__open_dir(data);
230 if (ret)
231 goto out_delete;
232 }
233
234 if (!symbol_conf.kallsyms_name &&
235 !symbol_conf.vmlinux_name)
236 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
237 }
238 } else {
239 session->machines.host.env = &perf_env;
240 }
241
242 session->machines.host.single_address_space =
243 perf_env__single_address_space(session->machines.host.env);
244
245 if (!data || perf_data__is_write(data)) {
246
247
248
249
250 if (perf_session__create_kernel_maps(session) < 0)
251 pr_warning("Cannot read kernel map\n");
252 }
253
254
255
256
257
258 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
259 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
260 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
261 tool->ordered_events = false;
262 }
263
264 return session;
265
266 out_delete:
267 perf_session__delete(session);
268 out:
269 return ERR_PTR(ret);
270}
271
272static void perf_session__delete_threads(struct perf_session *session)
273{
274 machine__delete_threads(&session->machines.host);
275}
276
277static void perf_session__release_decomp_events(struct perf_session *session)
278{
279 struct decomp *next, *decomp;
280 size_t mmap_len;
281 next = session->decomp;
282 do {
283 decomp = next;
284 if (decomp == NULL)
285 break;
286 next = decomp->next;
287 mmap_len = decomp->mmap_len;
288 munmap(decomp, mmap_len);
289 } while (1);
290}
291
292void perf_session__delete(struct perf_session *session)
293{
294 if (session == NULL)
295 return;
296 auxtrace__free(session);
297 auxtrace_index__free(&session->auxtrace_index);
298 perf_session__destroy_kernel_maps(session);
299 perf_session__delete_threads(session);
300 perf_session__release_decomp_events(session);
301 perf_env__exit(&session->header.env);
302 machines__exit(&session->machines);
303 if (session->data)
304 perf_data__close(session->data);
305 free(session);
306}
307
308static int process_event_synth_tracing_data_stub(struct perf_session *session
309 __maybe_unused,
310 union perf_event *event
311 __maybe_unused)
312{
313 dump_printf(": unhandled!\n");
314 return 0;
315}
316
317static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
318 union perf_event *event __maybe_unused,
319 struct evlist **pevlist
320 __maybe_unused)
321{
322 dump_printf(": unhandled!\n");
323 return 0;
324}
325
326static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
327 union perf_event *event __maybe_unused,
328 struct evlist **pevlist
329 __maybe_unused)
330{
331 if (dump_trace)
332 perf_event__fprintf_event_update(event, stdout);
333
334 dump_printf(": unhandled!\n");
335 return 0;
336}
337
338static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
339 union perf_event *event __maybe_unused,
340 struct perf_sample *sample __maybe_unused,
341 struct evsel *evsel __maybe_unused,
342 struct machine *machine __maybe_unused)
343{
344 dump_printf(": unhandled!\n");
345 return 0;
346}
347
348static int process_event_stub(struct perf_tool *tool __maybe_unused,
349 union perf_event *event __maybe_unused,
350 struct perf_sample *sample __maybe_unused,
351 struct machine *machine __maybe_unused)
352{
353 dump_printf(": unhandled!\n");
354 return 0;
355}
356
357static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
358 union perf_event *event __maybe_unused,
359 struct ordered_events *oe __maybe_unused)
360{
361 dump_printf(": unhandled!\n");
362 return 0;
363}
364
365static int process_finished_round(struct perf_tool *tool,
366 union perf_event *event,
367 struct ordered_events *oe);
368
369static int skipn(int fd, off_t n)
370{
371 char buf[4096];
372 ssize_t ret;
373
374 while (n > 0) {
375 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
376 if (ret <= 0)
377 return ret;
378 n -= ret;
379 }
380
381 return 0;
382}
383
384static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
385 union perf_event *event)
386{
387 dump_printf(": unhandled!\n");
388 if (perf_data__is_pipe(session->data))
389 skipn(perf_data__fd(session->data), event->auxtrace.size);
390 return event->auxtrace.size;
391}
392
393static int process_event_op2_stub(struct perf_session *session __maybe_unused,
394 union perf_event *event __maybe_unused)
395{
396 dump_printf(": unhandled!\n");
397 return 0;
398}
399
400
401static
402int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
403 union perf_event *event __maybe_unused)
404{
405 if (dump_trace)
406 perf_event__fprintf_thread_map(event, stdout);
407
408 dump_printf(": unhandled!\n");
409 return 0;
410}
411
412static
413int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
414 union perf_event *event __maybe_unused)
415{
416 if (dump_trace)
417 perf_event__fprintf_cpu_map(event, stdout);
418
419 dump_printf(": unhandled!\n");
420 return 0;
421}
422
423static
424int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
425 union perf_event *event __maybe_unused)
426{
427 if (dump_trace)
428 perf_event__fprintf_stat_config(event, stdout);
429
430 dump_printf(": unhandled!\n");
431 return 0;
432}
433
434static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
435 union perf_event *event)
436{
437 if (dump_trace)
438 perf_event__fprintf_stat(event, stdout);
439
440 dump_printf(": unhandled!\n");
441 return 0;
442}
443
444static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
445 union perf_event *event)
446{
447 if (dump_trace)
448 perf_event__fprintf_stat_round(event, stdout);
449
450 dump_printf(": unhandled!\n");
451 return 0;
452}
453
454static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
455 union perf_event *event __maybe_unused,
456 u64 file_offset __maybe_unused)
457{
458 dump_printf(": unhandled!\n");
459 return 0;
460}
461
462void perf_tool__fill_defaults(struct perf_tool *tool)
463{
464 if (tool->sample == NULL)
465 tool->sample = process_event_sample_stub;
466 if (tool->mmap == NULL)
467 tool->mmap = process_event_stub;
468 if (tool->mmap2 == NULL)
469 tool->mmap2 = process_event_stub;
470 if (tool->comm == NULL)
471 tool->comm = process_event_stub;
472 if (tool->namespaces == NULL)
473 tool->namespaces = process_event_stub;
474 if (tool->fork == NULL)
475 tool->fork = process_event_stub;
476 if (tool->exit == NULL)
477 tool->exit = process_event_stub;
478 if (tool->lost == NULL)
479 tool->lost = perf_event__process_lost;
480 if (tool->lost_samples == NULL)
481 tool->lost_samples = perf_event__process_lost_samples;
482 if (tool->aux == NULL)
483 tool->aux = perf_event__process_aux;
484 if (tool->itrace_start == NULL)
485 tool->itrace_start = perf_event__process_itrace_start;
486 if (tool->context_switch == NULL)
487 tool->context_switch = perf_event__process_switch;
488 if (tool->ksymbol == NULL)
489 tool->ksymbol = perf_event__process_ksymbol;
490 if (tool->bpf == NULL)
491 tool->bpf = perf_event__process_bpf;
492 if (tool->read == NULL)
493 tool->read = process_event_sample_stub;
494 if (tool->throttle == NULL)
495 tool->throttle = process_event_stub;
496 if (tool->unthrottle == NULL)
497 tool->unthrottle = process_event_stub;
498 if (tool->attr == NULL)
499 tool->attr = process_event_synth_attr_stub;
500 if (tool->event_update == NULL)
501 tool->event_update = process_event_synth_event_update_stub;
502 if (tool->tracing_data == NULL)
503 tool->tracing_data = process_event_synth_tracing_data_stub;
504 if (tool->build_id == NULL)
505 tool->build_id = process_event_op2_stub;
506 if (tool->finished_round == NULL) {
507 if (tool->ordered_events)
508 tool->finished_round = process_finished_round;
509 else
510 tool->finished_round = process_finished_round_stub;
511 }
512 if (tool->id_index == NULL)
513 tool->id_index = process_event_op2_stub;
514 if (tool->auxtrace_info == NULL)
515 tool->auxtrace_info = process_event_op2_stub;
516 if (tool->auxtrace == NULL)
517 tool->auxtrace = process_event_auxtrace_stub;
518 if (tool->auxtrace_error == NULL)
519 tool->auxtrace_error = process_event_op2_stub;
520 if (tool->thread_map == NULL)
521 tool->thread_map = process_event_thread_map_stub;
522 if (tool->cpu_map == NULL)
523 tool->cpu_map = process_event_cpu_map_stub;
524 if (tool->stat_config == NULL)
525 tool->stat_config = process_event_stat_config_stub;
526 if (tool->stat == NULL)
527 tool->stat = process_stat_stub;
528 if (tool->stat_round == NULL)
529 tool->stat_round = process_stat_round_stub;
530 if (tool->time_conv == NULL)
531 tool->time_conv = process_event_op2_stub;
532 if (tool->feature == NULL)
533 tool->feature = process_event_op2_stub;
534 if (tool->compressed == NULL)
535 tool->compressed = perf_session__process_compressed_event;
536}
537
538static void swap_sample_id_all(union perf_event *event, void *data)
539{
540 void *end = (void *) event + event->header.size;
541 int size = end - data;
542
543 BUG_ON(size % sizeof(u64));
544 mem_bswap_64(data, size);
545}
546
547static void perf_event__all64_swap(union perf_event *event,
548 bool sample_id_all __maybe_unused)
549{
550 struct perf_event_header *hdr = &event->header;
551 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
552}
553
554static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
555{
556 event->comm.pid = bswap_32(event->comm.pid);
557 event->comm.tid = bswap_32(event->comm.tid);
558
559 if (sample_id_all) {
560 void *data = &event->comm.comm;
561
562 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
563 swap_sample_id_all(event, data);
564 }
565}
566
567static void perf_event__mmap_swap(union perf_event *event,
568 bool sample_id_all)
569{
570 event->mmap.pid = bswap_32(event->mmap.pid);
571 event->mmap.tid = bswap_32(event->mmap.tid);
572 event->mmap.start = bswap_64(event->mmap.start);
573 event->mmap.len = bswap_64(event->mmap.len);
574 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
575
576 if (sample_id_all) {
577 void *data = &event->mmap.filename;
578
579 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
580 swap_sample_id_all(event, data);
581 }
582}
583
584static void perf_event__mmap2_swap(union perf_event *event,
585 bool sample_id_all)
586{
587 event->mmap2.pid = bswap_32(event->mmap2.pid);
588 event->mmap2.tid = bswap_32(event->mmap2.tid);
589 event->mmap2.start = bswap_64(event->mmap2.start);
590 event->mmap2.len = bswap_64(event->mmap2.len);
591 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
592 event->mmap2.maj = bswap_32(event->mmap2.maj);
593 event->mmap2.min = bswap_32(event->mmap2.min);
594 event->mmap2.ino = bswap_64(event->mmap2.ino);
595
596 if (sample_id_all) {
597 void *data = &event->mmap2.filename;
598
599 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
600 swap_sample_id_all(event, data);
601 }
602}
603static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
604{
605 event->fork.pid = bswap_32(event->fork.pid);
606 event->fork.tid = bswap_32(event->fork.tid);
607 event->fork.ppid = bswap_32(event->fork.ppid);
608 event->fork.ptid = bswap_32(event->fork.ptid);
609 event->fork.time = bswap_64(event->fork.time);
610
611 if (sample_id_all)
612 swap_sample_id_all(event, &event->fork + 1);
613}
614
615static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
616{
617 event->read.pid = bswap_32(event->read.pid);
618 event->read.tid = bswap_32(event->read.tid);
619 event->read.value = bswap_64(event->read.value);
620 event->read.time_enabled = bswap_64(event->read.time_enabled);
621 event->read.time_running = bswap_64(event->read.time_running);
622 event->read.id = bswap_64(event->read.id);
623
624 if (sample_id_all)
625 swap_sample_id_all(event, &event->read + 1);
626}
627
628static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
629{
630 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
631 event->aux.aux_size = bswap_64(event->aux.aux_size);
632 event->aux.flags = bswap_64(event->aux.flags);
633
634 if (sample_id_all)
635 swap_sample_id_all(event, &event->aux + 1);
636}
637
638static void perf_event__itrace_start_swap(union perf_event *event,
639 bool sample_id_all)
640{
641 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
642 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
643
644 if (sample_id_all)
645 swap_sample_id_all(event, &event->itrace_start + 1);
646}
647
648static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
649{
650 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
651 event->context_switch.next_prev_pid =
652 bswap_32(event->context_switch.next_prev_pid);
653 event->context_switch.next_prev_tid =
654 bswap_32(event->context_switch.next_prev_tid);
655 }
656
657 if (sample_id_all)
658 swap_sample_id_all(event, &event->context_switch + 1);
659}
660
661static void perf_event__throttle_swap(union perf_event *event,
662 bool sample_id_all)
663{
664 event->throttle.time = bswap_64(event->throttle.time);
665 event->throttle.id = bswap_64(event->throttle.id);
666 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
667
668 if (sample_id_all)
669 swap_sample_id_all(event, &event->throttle + 1);
670}
671
672static void perf_event__namespaces_swap(union perf_event *event,
673 bool sample_id_all)
674{
675 u64 i;
676
677 event->namespaces.pid = bswap_32(event->namespaces.pid);
678 event->namespaces.tid = bswap_32(event->namespaces.tid);
679 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
680
681 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
682 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
683
684 ns->dev = bswap_64(ns->dev);
685 ns->ino = bswap_64(ns->ino);
686 }
687
688 if (sample_id_all)
689 swap_sample_id_all(event, &event->namespaces.link_info[i]);
690}
691
692static u8 revbyte(u8 b)
693{
694 int rev = (b >> 4) | ((b & 0xf) << 4);
695 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
696 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
697 return (u8) rev;
698}
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714static void swap_bitfield(u8 *p, unsigned len)
715{
716 unsigned i;
717
718 for (i = 0; i < len; i++) {
719 *p = revbyte(*p);
720 p++;
721 }
722}
723
724
725void perf_event__attr_swap(struct perf_event_attr *attr)
726{
727 attr->type = bswap_32(attr->type);
728 attr->size = bswap_32(attr->size);
729
730#define bswap_safe(f, n) \
731 (attr->size > (offsetof(struct perf_event_attr, f) + \
732 sizeof(attr->f) * (n)))
733#define bswap_field(f, sz) \
734do { \
735 if (bswap_safe(f, 0)) \
736 attr->f = bswap_##sz(attr->f); \
737} while(0)
738#define bswap_field_16(f) bswap_field(f, 16)
739#define bswap_field_32(f) bswap_field(f, 32)
740#define bswap_field_64(f) bswap_field(f, 64)
741
742 bswap_field_64(config);
743 bswap_field_64(sample_period);
744 bswap_field_64(sample_type);
745 bswap_field_64(read_format);
746 bswap_field_32(wakeup_events);
747 bswap_field_32(bp_type);
748 bswap_field_64(bp_addr);
749 bswap_field_64(bp_len);
750 bswap_field_64(branch_sample_type);
751 bswap_field_64(sample_regs_user);
752 bswap_field_32(sample_stack_user);
753 bswap_field_32(aux_watermark);
754 bswap_field_16(sample_max_stack);
755 bswap_field_32(aux_sample_size);
756
757
758
759
760
761 if (bswap_safe(read_format, 1))
762 swap_bitfield((u8 *) (&attr->read_format + 1),
763 sizeof(u64));
764#undef bswap_field_64
765#undef bswap_field_32
766#undef bswap_field
767#undef bswap_safe
768}
769
770static void perf_event__hdr_attr_swap(union perf_event *event,
771 bool sample_id_all __maybe_unused)
772{
773 size_t size;
774
775 perf_event__attr_swap(&event->attr.attr);
776
777 size = event->header.size;
778 size -= (void *)&event->attr.id - (void *)event;
779 mem_bswap_64(event->attr.id, size);
780}
781
782static void perf_event__event_update_swap(union perf_event *event,
783 bool sample_id_all __maybe_unused)
784{
785 event->event_update.type = bswap_64(event->event_update.type);
786 event->event_update.id = bswap_64(event->event_update.id);
787}
788
789static void perf_event__event_type_swap(union perf_event *event,
790 bool sample_id_all __maybe_unused)
791{
792 event->event_type.event_type.event_id =
793 bswap_64(event->event_type.event_type.event_id);
794}
795
796static void perf_event__tracing_data_swap(union perf_event *event,
797 bool sample_id_all __maybe_unused)
798{
799 event->tracing_data.size = bswap_32(event->tracing_data.size);
800}
801
802static void perf_event__auxtrace_info_swap(union perf_event *event,
803 bool sample_id_all __maybe_unused)
804{
805 size_t size;
806
807 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
808
809 size = event->header.size;
810 size -= (void *)&event->auxtrace_info.priv - (void *)event;
811 mem_bswap_64(event->auxtrace_info.priv, size);
812}
813
814static void perf_event__auxtrace_swap(union perf_event *event,
815 bool sample_id_all __maybe_unused)
816{
817 event->auxtrace.size = bswap_64(event->auxtrace.size);
818 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
819 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
820 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
821 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
822 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
823}
824
825static void perf_event__auxtrace_error_swap(union perf_event *event,
826 bool sample_id_all __maybe_unused)
827{
828 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
829 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
830 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
831 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
832 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
833 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
834 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
835 if (event->auxtrace_error.fmt)
836 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
837}
838
839static void perf_event__thread_map_swap(union perf_event *event,
840 bool sample_id_all __maybe_unused)
841{
842 unsigned i;
843
844 event->thread_map.nr = bswap_64(event->thread_map.nr);
845
846 for (i = 0; i < event->thread_map.nr; i++)
847 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
848}
849
850static void perf_event__cpu_map_swap(union perf_event *event,
851 bool sample_id_all __maybe_unused)
852{
853 struct perf_record_cpu_map_data *data = &event->cpu_map.data;
854 struct cpu_map_entries *cpus;
855 struct perf_record_record_cpu_map *mask;
856 unsigned i;
857
858 data->type = bswap_64(data->type);
859
860 switch (data->type) {
861 case PERF_CPU_MAP__CPUS:
862 cpus = (struct cpu_map_entries *)data->data;
863
864 cpus->nr = bswap_16(cpus->nr);
865
866 for (i = 0; i < cpus->nr; i++)
867 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
868 break;
869 case PERF_CPU_MAP__MASK:
870 mask = (struct perf_record_record_cpu_map *)data->data;
871
872 mask->nr = bswap_16(mask->nr);
873 mask->long_size = bswap_16(mask->long_size);
874
875 switch (mask->long_size) {
876 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
877 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
878 default:
879 pr_err("cpu_map swap: unsupported long size\n");
880 }
881 default:
882 break;
883 }
884}
885
886static void perf_event__stat_config_swap(union perf_event *event,
887 bool sample_id_all __maybe_unused)
888{
889 u64 size;
890
891 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
892 size += 1;
893 mem_bswap_64(&event->stat_config.nr, size);
894}
895
896static void perf_event__stat_swap(union perf_event *event,
897 bool sample_id_all __maybe_unused)
898{
899 event->stat.id = bswap_64(event->stat.id);
900 event->stat.thread = bswap_32(event->stat.thread);
901 event->stat.cpu = bswap_32(event->stat.cpu);
902 event->stat.val = bswap_64(event->stat.val);
903 event->stat.ena = bswap_64(event->stat.ena);
904 event->stat.run = bswap_64(event->stat.run);
905}
906
907static void perf_event__stat_round_swap(union perf_event *event,
908 bool sample_id_all __maybe_unused)
909{
910 event->stat_round.type = bswap_64(event->stat_round.type);
911 event->stat_round.time = bswap_64(event->stat_round.time);
912}
913
914typedef void (*perf_event__swap_op)(union perf_event *event,
915 bool sample_id_all);
916
917static perf_event__swap_op perf_event__swap_ops[] = {
918 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
919 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
920 [PERF_RECORD_COMM] = perf_event__comm_swap,
921 [PERF_RECORD_FORK] = perf_event__task_swap,
922 [PERF_RECORD_EXIT] = perf_event__task_swap,
923 [PERF_RECORD_LOST] = perf_event__all64_swap,
924 [PERF_RECORD_READ] = perf_event__read_swap,
925 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
926 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
927 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
928 [PERF_RECORD_AUX] = perf_event__aux_swap,
929 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
930 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
931 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
932 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
933 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
934 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
935 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
936 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
937 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
938 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
939 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
940 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
941 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
942 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
943 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
944 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
945 [PERF_RECORD_STAT] = perf_event__stat_swap,
946 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
947 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
948 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
949 [PERF_RECORD_HEADER_MAX] = NULL,
950};
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991static int process_finished_round(struct perf_tool *tool __maybe_unused,
992 union perf_event *event __maybe_unused,
993 struct ordered_events *oe)
994{
995 if (dump_trace)
996 fprintf(stdout, "\n");
997 return ordered_events__flush(oe, OE_FLUSH__ROUND);
998}
999
1000int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1001 u64 timestamp, u64 file_offset)
1002{
1003 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
1004}
1005
1006static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1007{
1008 struct ip_callchain *callchain = sample->callchain;
1009 struct branch_stack *lbr_stack = sample->branch_stack;
1010 u64 kernel_callchain_nr = callchain->nr;
1011 unsigned int i;
1012
1013 for (i = 0; i < kernel_callchain_nr; i++) {
1014 if (callchain->ips[i] == PERF_CONTEXT_USER)
1015 break;
1016 }
1017
1018 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1019 u64 total_nr;
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036 total_nr = i + 1 + lbr_stack->nr + 1;
1037 kernel_callchain_nr = i + 1;
1038
1039 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1040
1041 for (i = 0; i < kernel_callchain_nr; i++)
1042 printf("..... %2d: %016" PRIx64 "\n",
1043 i, callchain->ips[i]);
1044
1045 printf("..... %2d: %016" PRIx64 "\n",
1046 (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
1047 for (i = 0; i < lbr_stack->nr; i++)
1048 printf("..... %2d: %016" PRIx64 "\n",
1049 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
1050 }
1051}
1052
1053static void callchain__printf(struct evsel *evsel,
1054 struct perf_sample *sample)
1055{
1056 unsigned int i;
1057 struct ip_callchain *callchain = sample->callchain;
1058
1059 if (perf_evsel__has_branch_callstack(evsel))
1060 callchain__lbr_callstack_printf(sample);
1061
1062 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1063
1064 for (i = 0; i < callchain->nr; i++)
1065 printf("..... %2d: %016" PRIx64 "\n",
1066 i, callchain->ips[i]);
1067}
1068
1069static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1070{
1071 uint64_t i;
1072
1073 printf("%s: nr:%" PRIu64 "\n",
1074 !callstack ? "... branch stack" : "... branch callstack",
1075 sample->branch_stack->nr);
1076
1077 for (i = 0; i < sample->branch_stack->nr; i++) {
1078 struct branch_entry *e = &sample->branch_stack->entries[i];
1079
1080 if (!callstack) {
1081 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1082 i, e->from, e->to,
1083 (unsigned short)e->flags.cycles,
1084 e->flags.mispred ? "M" : " ",
1085 e->flags.predicted ? "P" : " ",
1086 e->flags.abort ? "A" : " ",
1087 e->flags.in_tx ? "T" : " ",
1088 (unsigned)e->flags.reserved);
1089 } else {
1090 printf("..... %2"PRIu64": %016" PRIx64 "\n",
1091 i, i > 0 ? e->from : e->to);
1092 }
1093 }
1094}
1095
1096static void regs_dump__printf(u64 mask, u64 *regs)
1097{
1098 unsigned rid, i = 0;
1099
1100 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1101 u64 val = regs[i++];
1102
1103 printf(".... %-5s 0x%" PRIx64 "\n",
1104 perf_reg_name(rid), val);
1105 }
1106}
1107
1108static const char *regs_abi[] = {
1109 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1110 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1111 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1112};
1113
1114static inline const char *regs_dump_abi(struct regs_dump *d)
1115{
1116 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1117 return "unknown";
1118
1119 return regs_abi[d->abi];
1120}
1121
1122static void regs__printf(const char *type, struct regs_dump *regs)
1123{
1124 u64 mask = regs->mask;
1125
1126 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1127 type,
1128 mask,
1129 regs_dump_abi(regs));
1130
1131 regs_dump__printf(mask, regs->regs);
1132}
1133
1134static void regs_user__printf(struct perf_sample *sample)
1135{
1136 struct regs_dump *user_regs = &sample->user_regs;
1137
1138 if (user_regs->regs)
1139 regs__printf("user", user_regs);
1140}
1141
1142static void regs_intr__printf(struct perf_sample *sample)
1143{
1144 struct regs_dump *intr_regs = &sample->intr_regs;
1145
1146 if (intr_regs->regs)
1147 regs__printf("intr", intr_regs);
1148}
1149
1150static void stack_user__printf(struct stack_dump *dump)
1151{
1152 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1153 dump->size, dump->offset);
1154}
1155
1156static void perf_evlist__print_tstamp(struct evlist *evlist,
1157 union perf_event *event,
1158 struct perf_sample *sample)
1159{
1160 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1161
1162 if (event->header.type != PERF_RECORD_SAMPLE &&
1163 !perf_evlist__sample_id_all(evlist)) {
1164 fputs("-1 -1 ", stdout);
1165 return;
1166 }
1167
1168 if ((sample_type & PERF_SAMPLE_CPU))
1169 printf("%u ", sample->cpu);
1170
1171 if (sample_type & PERF_SAMPLE_TIME)
1172 printf("%" PRIu64 " ", sample->time);
1173}
1174
1175static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1176{
1177 printf("... sample_read:\n");
1178
1179 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1180 printf("...... time enabled %016" PRIx64 "\n",
1181 sample->read.time_enabled);
1182
1183 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1184 printf("...... time running %016" PRIx64 "\n",
1185 sample->read.time_running);
1186
1187 if (read_format & PERF_FORMAT_GROUP) {
1188 u64 i;
1189
1190 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1191
1192 for (i = 0; i < sample->read.group.nr; i++) {
1193 struct sample_read_value *value;
1194
1195 value = &sample->read.group.values[i];
1196 printf("..... id %016" PRIx64
1197 ", value %016" PRIx64 "\n",
1198 value->id, value->value);
1199 }
1200 } else
1201 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1202 sample->read.one.id, sample->read.one.value);
1203}
1204
1205static void dump_event(struct evlist *evlist, union perf_event *event,
1206 u64 file_offset, struct perf_sample *sample)
1207{
1208 if (!dump_trace)
1209 return;
1210
1211 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1212 file_offset, event->header.size, event->header.type);
1213
1214 trace_event(event);
1215 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1216 evlist->trace_event_sample_raw(evlist, event, sample);
1217
1218 if (sample)
1219 perf_evlist__print_tstamp(evlist, event, sample);
1220
1221 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1222 event->header.size, perf_event__name(event->header.type));
1223}
1224
1225static void dump_sample(struct evsel *evsel, union perf_event *event,
1226 struct perf_sample *sample)
1227{
1228 u64 sample_type;
1229
1230 if (!dump_trace)
1231 return;
1232
1233 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1234 event->header.misc, sample->pid, sample->tid, sample->ip,
1235 sample->period, sample->addr);
1236
1237 sample_type = evsel->core.attr.sample_type;
1238
1239 if (evsel__has_callchain(evsel))
1240 callchain__printf(evsel, sample);
1241
1242 if (sample_type & PERF_SAMPLE_BRANCH_STACK)
1243 branch_stack__printf(sample, perf_evsel__has_branch_callstack(evsel));
1244
1245 if (sample_type & PERF_SAMPLE_REGS_USER)
1246 regs_user__printf(sample);
1247
1248 if (sample_type & PERF_SAMPLE_REGS_INTR)
1249 regs_intr__printf(sample);
1250
1251 if (sample_type & PERF_SAMPLE_STACK_USER)
1252 stack_user__printf(&sample->user_stack);
1253
1254 if (sample_type & PERF_SAMPLE_WEIGHT)
1255 printf("... weight: %" PRIu64 "\n", sample->weight);
1256
1257 if (sample_type & PERF_SAMPLE_DATA_SRC)
1258 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1259
1260 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1261 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1262
1263 if (sample_type & PERF_SAMPLE_TRANSACTION)
1264 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1265
1266 if (sample_type & PERF_SAMPLE_READ)
1267 sample_read__printf(sample, evsel->core.attr.read_format);
1268}
1269
1270static void dump_read(struct evsel *evsel, union perf_event *event)
1271{
1272 struct perf_record_read *read_event = &event->read;
1273 u64 read_format;
1274
1275 if (!dump_trace)
1276 return;
1277
1278 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1279 perf_evsel__name(evsel),
1280 event->read.value);
1281
1282 if (!evsel)
1283 return;
1284
1285 read_format = evsel->core.attr.read_format;
1286
1287 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1288 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1289
1290 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1291 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1292
1293 if (read_format & PERF_FORMAT_ID)
1294 printf("... id : %" PRI_lu64 "\n", read_event->id);
1295}
1296
1297static struct machine *machines__find_for_cpumode(struct machines *machines,
1298 union perf_event *event,
1299 struct perf_sample *sample)
1300{
1301 struct machine *machine;
1302
1303 if (perf_guest &&
1304 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1305 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1306 u32 pid;
1307
1308 if (event->header.type == PERF_RECORD_MMAP
1309 || event->header.type == PERF_RECORD_MMAP2)
1310 pid = event->mmap.pid;
1311 else
1312 pid = sample->pid;
1313
1314 machine = machines__find(machines, pid);
1315 if (!machine)
1316 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1317 return machine;
1318 }
1319
1320 return &machines->host;
1321}
1322
1323static int deliver_sample_value(struct evlist *evlist,
1324 struct perf_tool *tool,
1325 union perf_event *event,
1326 struct perf_sample *sample,
1327 struct sample_read_value *v,
1328 struct machine *machine)
1329{
1330 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1331 struct evsel *evsel;
1332
1333 if (sid) {
1334 sample->id = v->id;
1335 sample->period = v->value - sid->period;
1336 sid->period = v->value;
1337 }
1338
1339 if (!sid || sid->evsel == NULL) {
1340 ++evlist->stats.nr_unknown_id;
1341 return 0;
1342 }
1343
1344
1345
1346
1347
1348 if (!sample->period)
1349 return 0;
1350
1351 evsel = container_of(sid->evsel, struct evsel, core);
1352 return tool->sample(tool, event, sample, evsel, machine);
1353}
1354
1355static int deliver_sample_group(struct evlist *evlist,
1356 struct perf_tool *tool,
1357 union perf_event *event,
1358 struct perf_sample *sample,
1359 struct machine *machine)
1360{
1361 int ret = -EINVAL;
1362 u64 i;
1363
1364 for (i = 0; i < sample->read.group.nr; i++) {
1365 ret = deliver_sample_value(evlist, tool, event, sample,
1366 &sample->read.group.values[i],
1367 machine);
1368 if (ret)
1369 break;
1370 }
1371
1372 return ret;
1373}
1374
1375static int
1376 perf_evlist__deliver_sample(struct evlist *evlist,
1377 struct perf_tool *tool,
1378 union perf_event *event,
1379 struct perf_sample *sample,
1380 struct evsel *evsel,
1381 struct machine *machine)
1382{
1383
1384 u64 sample_type = evsel->core.attr.sample_type;
1385 u64 read_format = evsel->core.attr.read_format;
1386
1387
1388 if (!(sample_type & PERF_SAMPLE_READ))
1389 return tool->sample(tool, event, sample, evsel, machine);
1390
1391
1392 if (read_format & PERF_FORMAT_GROUP)
1393 return deliver_sample_group(evlist, tool, event, sample,
1394 machine);
1395 else
1396 return deliver_sample_value(evlist, tool, event, sample,
1397 &sample->read.one, machine);
1398}
1399
1400static int machines__deliver_event(struct machines *machines,
1401 struct evlist *evlist,
1402 union perf_event *event,
1403 struct perf_sample *sample,
1404 struct perf_tool *tool, u64 file_offset)
1405{
1406 struct evsel *evsel;
1407 struct machine *machine;
1408
1409 dump_event(evlist, event, file_offset, sample);
1410
1411 evsel = perf_evlist__id2evsel(evlist, sample->id);
1412
1413 machine = machines__find_for_cpumode(machines, event, sample);
1414
1415 switch (event->header.type) {
1416 case PERF_RECORD_SAMPLE:
1417 if (evsel == NULL) {
1418 ++evlist->stats.nr_unknown_id;
1419 return 0;
1420 }
1421 dump_sample(evsel, event, sample);
1422 if (machine == NULL) {
1423 ++evlist->stats.nr_unprocessable_samples;
1424 return 0;
1425 }
1426 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1427 case PERF_RECORD_MMAP:
1428 return tool->mmap(tool, event, sample, machine);
1429 case PERF_RECORD_MMAP2:
1430 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1431 ++evlist->stats.nr_proc_map_timeout;
1432 return tool->mmap2(tool, event, sample, machine);
1433 case PERF_RECORD_COMM:
1434 return tool->comm(tool, event, sample, machine);
1435 case PERF_RECORD_NAMESPACES:
1436 return tool->namespaces(tool, event, sample, machine);
1437 case PERF_RECORD_FORK:
1438 return tool->fork(tool, event, sample, machine);
1439 case PERF_RECORD_EXIT:
1440 return tool->exit(tool, event, sample, machine);
1441 case PERF_RECORD_LOST:
1442 if (tool->lost == perf_event__process_lost)
1443 evlist->stats.total_lost += event->lost.lost;
1444 return tool->lost(tool, event, sample, machine);
1445 case PERF_RECORD_LOST_SAMPLES:
1446 if (tool->lost_samples == perf_event__process_lost_samples)
1447 evlist->stats.total_lost_samples += event->lost_samples.lost;
1448 return tool->lost_samples(tool, event, sample, machine);
1449 case PERF_RECORD_READ:
1450 dump_read(evsel, event);
1451 return tool->read(tool, event, sample, evsel, machine);
1452 case PERF_RECORD_THROTTLE:
1453 return tool->throttle(tool, event, sample, machine);
1454 case PERF_RECORD_UNTHROTTLE:
1455 return tool->unthrottle(tool, event, sample, machine);
1456 case PERF_RECORD_AUX:
1457 if (tool->aux == perf_event__process_aux) {
1458 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1459 evlist->stats.total_aux_lost += 1;
1460 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1461 evlist->stats.total_aux_partial += 1;
1462 }
1463 return tool->aux(tool, event, sample, machine);
1464 case PERF_RECORD_ITRACE_START:
1465 return tool->itrace_start(tool, event, sample, machine);
1466 case PERF_RECORD_SWITCH:
1467 case PERF_RECORD_SWITCH_CPU_WIDE:
1468 return tool->context_switch(tool, event, sample, machine);
1469 case PERF_RECORD_KSYMBOL:
1470 return tool->ksymbol(tool, event, sample, machine);
1471 case PERF_RECORD_BPF_EVENT:
1472 return tool->bpf(tool, event, sample, machine);
1473 default:
1474 ++evlist->stats.nr_unknown_events;
1475 return -1;
1476 }
1477}
1478
1479static int perf_session__deliver_event(struct perf_session *session,
1480 union perf_event *event,
1481 struct perf_tool *tool,
1482 u64 file_offset)
1483{
1484 struct perf_sample sample;
1485 int ret;
1486
1487 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1488 if (ret) {
1489 pr_err("Can't parse sample, err = %d\n", ret);
1490 return ret;
1491 }
1492
1493 ret = auxtrace__process_event(session, event, &sample, tool);
1494 if (ret < 0)
1495 return ret;
1496 if (ret > 0)
1497 return 0;
1498
1499 ret = machines__deliver_event(&session->machines, session->evlist,
1500 event, &sample, tool, file_offset);
1501
1502 if (dump_trace && sample.aux_sample.size)
1503 auxtrace__dump_auxtrace_sample(session, &sample);
1504
1505 return ret;
1506}
1507
1508static s64 perf_session__process_user_event(struct perf_session *session,
1509 union perf_event *event,
1510 u64 file_offset)
1511{
1512 struct ordered_events *oe = &session->ordered_events;
1513 struct perf_tool *tool = session->tool;
1514 struct perf_sample sample = { .time = 0, };
1515 int fd = perf_data__fd(session->data);
1516 int err;
1517
1518 if (event->header.type != PERF_RECORD_COMPRESSED ||
1519 tool->compressed == perf_session__process_compressed_event_stub)
1520 dump_event(session->evlist, event, file_offset, &sample);
1521
1522
1523 switch (event->header.type) {
1524 case PERF_RECORD_HEADER_ATTR:
1525 err = tool->attr(tool, event, &session->evlist);
1526 if (err == 0) {
1527 perf_session__set_id_hdr_size(session);
1528 perf_session__set_comm_exec(session);
1529 }
1530 return err;
1531 case PERF_RECORD_EVENT_UPDATE:
1532 return tool->event_update(tool, event, &session->evlist);
1533 case PERF_RECORD_HEADER_EVENT_TYPE:
1534
1535
1536
1537
1538 return 0;
1539 case PERF_RECORD_HEADER_TRACING_DATA:
1540
1541 lseek(fd, file_offset, SEEK_SET);
1542 return tool->tracing_data(session, event);
1543 case PERF_RECORD_HEADER_BUILD_ID:
1544 return tool->build_id(session, event);
1545 case PERF_RECORD_FINISHED_ROUND:
1546 return tool->finished_round(tool, event, oe);
1547 case PERF_RECORD_ID_INDEX:
1548 return tool->id_index(session, event);
1549 case PERF_RECORD_AUXTRACE_INFO:
1550 return tool->auxtrace_info(session, event);
1551 case PERF_RECORD_AUXTRACE:
1552
1553 lseek(fd, file_offset + event->header.size, SEEK_SET);
1554 return tool->auxtrace(session, event);
1555 case PERF_RECORD_AUXTRACE_ERROR:
1556 perf_session__auxtrace_error_inc(session, event);
1557 return tool->auxtrace_error(session, event);
1558 case PERF_RECORD_THREAD_MAP:
1559 return tool->thread_map(session, event);
1560 case PERF_RECORD_CPU_MAP:
1561 return tool->cpu_map(session, event);
1562 case PERF_RECORD_STAT_CONFIG:
1563 return tool->stat_config(session, event);
1564 case PERF_RECORD_STAT:
1565 return tool->stat(session, event);
1566 case PERF_RECORD_STAT_ROUND:
1567 return tool->stat_round(session, event);
1568 case PERF_RECORD_TIME_CONV:
1569 session->time_conv = event->time_conv;
1570 return tool->time_conv(session, event);
1571 case PERF_RECORD_HEADER_FEATURE:
1572 return tool->feature(session, event);
1573 case PERF_RECORD_COMPRESSED:
1574 err = tool->compressed(session, event, file_offset);
1575 if (err)
1576 dump_event(session->evlist, event, file_offset, &sample);
1577 return err;
1578 default:
1579 return -EINVAL;
1580 }
1581}
1582
1583int perf_session__deliver_synth_event(struct perf_session *session,
1584 union perf_event *event,
1585 struct perf_sample *sample)
1586{
1587 struct evlist *evlist = session->evlist;
1588 struct perf_tool *tool = session->tool;
1589
1590 events_stats__inc(&evlist->stats, event->header.type);
1591
1592 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1593 return perf_session__process_user_event(session, event, 0);
1594
1595 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1596}
1597
1598static void event_swap(union perf_event *event, bool sample_id_all)
1599{
1600 perf_event__swap_op swap;
1601
1602 swap = perf_event__swap_ops[event->header.type];
1603 if (swap)
1604 swap(event, sample_id_all);
1605}
1606
1607int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1608 void *buf, size_t buf_sz,
1609 union perf_event **event_ptr,
1610 struct perf_sample *sample)
1611{
1612 union perf_event *event;
1613 size_t hdr_sz, rest;
1614 int fd;
1615
1616 if (session->one_mmap && !session->header.needs_swap) {
1617 event = file_offset - session->one_mmap_offset +
1618 session->one_mmap_addr;
1619 goto out_parse_sample;
1620 }
1621
1622 if (perf_data__is_pipe(session->data))
1623 return -1;
1624
1625 fd = perf_data__fd(session->data);
1626 hdr_sz = sizeof(struct perf_event_header);
1627
1628 if (buf_sz < hdr_sz)
1629 return -1;
1630
1631 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1632 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1633 return -1;
1634
1635 event = (union perf_event *)buf;
1636
1637 if (session->header.needs_swap)
1638 perf_event_header__bswap(&event->header);
1639
1640 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1641 return -1;
1642
1643 rest = event->header.size - hdr_sz;
1644
1645 if (readn(fd, buf, rest) != (ssize_t)rest)
1646 return -1;
1647
1648 if (session->header.needs_swap)
1649 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1650
1651out_parse_sample:
1652
1653 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1654 perf_evlist__parse_sample(session->evlist, event, sample))
1655 return -1;
1656
1657 *event_ptr = event;
1658
1659 return 0;
1660}
1661
1662int perf_session__peek_events(struct perf_session *session, u64 offset,
1663 u64 size, peek_events_cb_t cb, void *data)
1664{
1665 u64 max_offset = offset + size;
1666 char buf[PERF_SAMPLE_MAX_SIZE];
1667 union perf_event *event;
1668 int err;
1669
1670 do {
1671 err = perf_session__peek_event(session, offset, buf,
1672 PERF_SAMPLE_MAX_SIZE, &event,
1673 NULL);
1674 if (err)
1675 return err;
1676
1677 err = cb(session, event, offset, data);
1678 if (err)
1679 return err;
1680
1681 offset += event->header.size;
1682 if (event->header.type == PERF_RECORD_AUXTRACE)
1683 offset += event->auxtrace.size;
1684
1685 } while (offset < max_offset);
1686
1687 return err;
1688}
1689
1690static s64 perf_session__process_event(struct perf_session *session,
1691 union perf_event *event, u64 file_offset)
1692{
1693 struct evlist *evlist = session->evlist;
1694 struct perf_tool *tool = session->tool;
1695 int ret;
1696
1697 if (session->header.needs_swap)
1698 event_swap(event, perf_evlist__sample_id_all(evlist));
1699
1700 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1701 return -EINVAL;
1702
1703 events_stats__inc(&evlist->stats, event->header.type);
1704
1705 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1706 return perf_session__process_user_event(session, event, file_offset);
1707
1708 if (tool->ordered_events) {
1709 u64 timestamp = -1ULL;
1710
1711 ret = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp);
1712 if (ret && ret != -1)
1713 return ret;
1714
1715 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1716 if (ret != -ETIME)
1717 return ret;
1718 }
1719
1720 return perf_session__deliver_event(session, event, tool, file_offset);
1721}
1722
1723void perf_event_header__bswap(struct perf_event_header *hdr)
1724{
1725 hdr->type = bswap_32(hdr->type);
1726 hdr->misc = bswap_16(hdr->misc);
1727 hdr->size = bswap_16(hdr->size);
1728}
1729
1730struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1731{
1732 return machine__findnew_thread(&session->machines.host, -1, pid);
1733}
1734
1735
1736
1737
1738
1739
1740
1741
1742int perf_session__register_idle_thread(struct perf_session *session)
1743{
1744 struct thread *thread;
1745 int err = 0;
1746
1747 thread = machine__findnew_thread(&session->machines.host, 0, 0);
1748 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1749 pr_err("problem inserting idle task.\n");
1750 err = -1;
1751 }
1752
1753 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1754 pr_err("problem inserting idle task.\n");
1755 err = -1;
1756 }
1757
1758
1759 thread__put(thread);
1760 return err;
1761}
1762
1763static void
1764perf_session__warn_order(const struct perf_session *session)
1765{
1766 const struct ordered_events *oe = &session->ordered_events;
1767 struct evsel *evsel;
1768 bool should_warn = true;
1769
1770 evlist__for_each_entry(session->evlist, evsel) {
1771 if (evsel->core.attr.write_backward)
1772 should_warn = false;
1773 }
1774
1775 if (!should_warn)
1776 return;
1777 if (oe->nr_unordered_events != 0)
1778 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1779}
1780
1781static void perf_session__warn_about_errors(const struct perf_session *session)
1782{
1783 const struct events_stats *stats = &session->evlist->stats;
1784
1785 if (session->tool->lost == perf_event__process_lost &&
1786 stats->nr_events[PERF_RECORD_LOST] != 0) {
1787 ui__warning("Processed %d events and lost %d chunks!\n\n"
1788 "Check IO/CPU overload!\n\n",
1789 stats->nr_events[0],
1790 stats->nr_events[PERF_RECORD_LOST]);
1791 }
1792
1793 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1794 double drop_rate;
1795
1796 drop_rate = (double)stats->total_lost_samples /
1797 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1798 if (drop_rate > 0.05) {
1799 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1800 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1801 drop_rate * 100.0);
1802 }
1803 }
1804
1805 if (session->tool->aux == perf_event__process_aux &&
1806 stats->total_aux_lost != 0) {
1807 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1808 stats->total_aux_lost,
1809 stats->nr_events[PERF_RECORD_AUX]);
1810 }
1811
1812 if (session->tool->aux == perf_event__process_aux &&
1813 stats->total_aux_partial != 0) {
1814 bool vmm_exclusive = false;
1815
1816 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1817 &vmm_exclusive);
1818
1819 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1820 "Are you running a KVM guest in the background?%s\n\n",
1821 stats->total_aux_partial,
1822 stats->nr_events[PERF_RECORD_AUX],
1823 vmm_exclusive ?
1824 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1825 "will reduce the gaps to only guest's timeslices." :
1826 "");
1827 }
1828
1829 if (stats->nr_unknown_events != 0) {
1830 ui__warning("Found %u unknown events!\n\n"
1831 "Is this an older tool processing a perf.data "
1832 "file generated by a more recent tool?\n\n"
1833 "If that is not the case, consider "
1834 "reporting to linux-kernel@vger.kernel.org.\n\n",
1835 stats->nr_unknown_events);
1836 }
1837
1838 if (stats->nr_unknown_id != 0) {
1839 ui__warning("%u samples with id not present in the header\n",
1840 stats->nr_unknown_id);
1841 }
1842
1843 if (stats->nr_invalid_chains != 0) {
1844 ui__warning("Found invalid callchains!\n\n"
1845 "%u out of %u events were discarded for this reason.\n\n"
1846 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1847 stats->nr_invalid_chains,
1848 stats->nr_events[PERF_RECORD_SAMPLE]);
1849 }
1850
1851 if (stats->nr_unprocessable_samples != 0) {
1852 ui__warning("%u unprocessable samples recorded.\n"
1853 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1854 stats->nr_unprocessable_samples);
1855 }
1856
1857 perf_session__warn_order(session);
1858
1859 events_stats__auxtrace_error_warn(stats);
1860
1861 if (stats->nr_proc_map_timeout != 0) {
1862 ui__warning("%d map information files for pre-existing threads were\n"
1863 "not processed, if there are samples for addresses they\n"
1864 "will not be resolved, you may find out which are these\n"
1865 "threads by running with -v and redirecting the output\n"
1866 "to a file.\n"
1867 "The time limit to process proc map is too short?\n"
1868 "Increase it by --proc-map-timeout\n",
1869 stats->nr_proc_map_timeout);
1870 }
1871}
1872
1873static int perf_session__flush_thread_stack(struct thread *thread,
1874 void *p __maybe_unused)
1875{
1876 return thread_stack__flush(thread);
1877}
1878
1879static int perf_session__flush_thread_stacks(struct perf_session *session)
1880{
1881 return machines__for_each_thread(&session->machines,
1882 perf_session__flush_thread_stack,
1883 NULL);
1884}
1885
1886volatile int session_done;
1887
1888static int __perf_session__process_decomp_events(struct perf_session *session);
1889
1890static int __perf_session__process_pipe_events(struct perf_session *session)
1891{
1892 struct ordered_events *oe = &session->ordered_events;
1893 struct perf_tool *tool = session->tool;
1894 int fd = perf_data__fd(session->data);
1895 union perf_event *event;
1896 uint32_t size, cur_size = 0;
1897 void *buf = NULL;
1898 s64 skip = 0;
1899 u64 head;
1900 ssize_t err;
1901 void *p;
1902
1903 perf_tool__fill_defaults(tool);
1904
1905 head = 0;
1906 cur_size = sizeof(union perf_event);
1907
1908 buf = malloc(cur_size);
1909 if (!buf)
1910 return -errno;
1911 ordered_events__set_copy_on_queue(oe, true);
1912more:
1913 event = buf;
1914 err = readn(fd, event, sizeof(struct perf_event_header));
1915 if (err <= 0) {
1916 if (err == 0)
1917 goto done;
1918
1919 pr_err("failed to read event header\n");
1920 goto out_err;
1921 }
1922
1923 if (session->header.needs_swap)
1924 perf_event_header__bswap(&event->header);
1925
1926 size = event->header.size;
1927 if (size < sizeof(struct perf_event_header)) {
1928 pr_err("bad event header size\n");
1929 goto out_err;
1930 }
1931
1932 if (size > cur_size) {
1933 void *new = realloc(buf, size);
1934 if (!new) {
1935 pr_err("failed to allocate memory to read event\n");
1936 goto out_err;
1937 }
1938 buf = new;
1939 cur_size = size;
1940 event = buf;
1941 }
1942 p = event;
1943 p += sizeof(struct perf_event_header);
1944
1945 if (size - sizeof(struct perf_event_header)) {
1946 err = readn(fd, p, size - sizeof(struct perf_event_header));
1947 if (err <= 0) {
1948 if (err == 0) {
1949 pr_err("unexpected end of event stream\n");
1950 goto done;
1951 }
1952
1953 pr_err("failed to read event data\n");
1954 goto out_err;
1955 }
1956 }
1957
1958 if ((skip = perf_session__process_event(session, event, head)) < 0) {
1959 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1960 head, event->header.size, event->header.type);
1961 err = -EINVAL;
1962 goto out_err;
1963 }
1964
1965 head += size;
1966
1967 if (skip > 0)
1968 head += skip;
1969
1970 err = __perf_session__process_decomp_events(session);
1971 if (err)
1972 goto out_err;
1973
1974 if (!session_done())
1975 goto more;
1976done:
1977
1978 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1979 if (err)
1980 goto out_err;
1981 err = auxtrace__flush_events(session, tool);
1982 if (err)
1983 goto out_err;
1984 err = perf_session__flush_thread_stacks(session);
1985out_err:
1986 free(buf);
1987 if (!tool->no_warn)
1988 perf_session__warn_about_errors(session);
1989 ordered_events__free(&session->ordered_events);
1990 auxtrace__free_events(session);
1991 return err;
1992}
1993
1994static union perf_event *
1995prefetch_event(char *buf, u64 head, size_t mmap_size,
1996 bool needs_swap, union perf_event *error)
1997{
1998 union perf_event *event;
1999
2000
2001
2002
2003
2004 if (head + sizeof(event->header) > mmap_size)
2005 return NULL;
2006
2007 event = (union perf_event *)(buf + head);
2008 if (needs_swap)
2009 perf_event_header__bswap(&event->header);
2010
2011 if (head + event->header.size <= mmap_size)
2012 return event;
2013
2014
2015 if (needs_swap)
2016 perf_event_header__bswap(&event->header);
2017
2018 pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
2019 " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
2020
2021 return error;
2022}
2023
2024static union perf_event *
2025fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2026{
2027 return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2028}
2029
2030static union perf_event *
2031fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2032{
2033 return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2034}
2035
2036static int __perf_session__process_decomp_events(struct perf_session *session)
2037{
2038 s64 skip;
2039 u64 size, file_pos = 0;
2040 struct decomp *decomp = session->decomp_last;
2041
2042 if (!decomp)
2043 return 0;
2044
2045 while (decomp->head < decomp->size && !session_done()) {
2046 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2047 session->header.needs_swap);
2048
2049 if (!event)
2050 break;
2051
2052 size = event->header.size;
2053
2054 if (size < sizeof(struct perf_event_header) ||
2055 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
2056 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2057 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2058 return -EINVAL;
2059 }
2060
2061 if (skip)
2062 size += skip;
2063
2064 decomp->head += size;
2065 }
2066
2067 return 0;
2068}
2069
2070
2071
2072
2073
2074#if BITS_PER_LONG == 64
2075#define MMAP_SIZE ULLONG_MAX
2076#define NUM_MMAPS 1
2077#else
2078#define MMAP_SIZE (32 * 1024 * 1024ULL)
2079#define NUM_MMAPS 128
2080#endif
2081
2082struct reader;
2083
2084typedef s64 (*reader_cb_t)(struct perf_session *session,
2085 union perf_event *event,
2086 u64 file_offset);
2087
2088struct reader {
2089 int fd;
2090 u64 data_size;
2091 u64 data_offset;
2092 reader_cb_t process;
2093};
2094
2095static int
2096reader__process_events(struct reader *rd, struct perf_session *session,
2097 struct ui_progress *prog)
2098{
2099 u64 data_size = rd->data_size;
2100 u64 head, page_offset, file_offset, file_pos, size;
2101 int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2102 size_t mmap_size;
2103 char *buf, *mmaps[NUM_MMAPS];
2104 union perf_event *event;
2105 s64 skip;
2106
2107 page_offset = page_size * (rd->data_offset / page_size);
2108 file_offset = page_offset;
2109 head = rd->data_offset - page_offset;
2110
2111 ui_progress__init_size(prog, data_size, "Processing events...");
2112
2113 data_size += rd->data_offset;
2114
2115 mmap_size = MMAP_SIZE;
2116 if (mmap_size > data_size) {
2117 mmap_size = data_size;
2118 session->one_mmap = true;
2119 }
2120
2121 memset(mmaps, 0, sizeof(mmaps));
2122
2123 mmap_prot = PROT_READ;
2124 mmap_flags = MAP_SHARED;
2125
2126 if (session->header.needs_swap) {
2127 mmap_prot |= PROT_WRITE;
2128 mmap_flags = MAP_PRIVATE;
2129 }
2130remap:
2131 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2132 file_offset);
2133 if (buf == MAP_FAILED) {
2134 pr_err("failed to mmap file\n");
2135 err = -errno;
2136 goto out;
2137 }
2138 mmaps[map_idx] = buf;
2139 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2140 file_pos = file_offset + head;
2141 if (session->one_mmap) {
2142 session->one_mmap_addr = buf;
2143 session->one_mmap_offset = file_offset;
2144 }
2145
2146more:
2147 event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
2148 if (IS_ERR(event))
2149 return PTR_ERR(event);
2150
2151 if (!event) {
2152 if (mmaps[map_idx]) {
2153 munmap(mmaps[map_idx], mmap_size);
2154 mmaps[map_idx] = NULL;
2155 }
2156
2157 page_offset = page_size * (head / page_size);
2158 file_offset += page_offset;
2159 head -= page_offset;
2160 goto remap;
2161 }
2162
2163 size = event->header.size;
2164
2165 skip = -EINVAL;
2166
2167 if (size < sizeof(struct perf_event_header) ||
2168 (skip = rd->process(session, event, file_pos)) < 0) {
2169 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2170 file_offset + head, event->header.size,
2171 event->header.type, strerror(-skip));
2172 err = skip;
2173 goto out;
2174 }
2175
2176 if (skip)
2177 size += skip;
2178
2179 head += size;
2180 file_pos += size;
2181
2182 err = __perf_session__process_decomp_events(session);
2183 if (err)
2184 goto out;
2185
2186 ui_progress__update(prog, size);
2187
2188 if (session_done())
2189 goto out;
2190
2191 if (file_pos < data_size)
2192 goto more;
2193
2194out:
2195 return err;
2196}
2197
2198static s64 process_simple(struct perf_session *session,
2199 union perf_event *event,
2200 u64 file_offset)
2201{
2202 return perf_session__process_event(session, event, file_offset);
2203}
2204
2205static int __perf_session__process_events(struct perf_session *session)
2206{
2207 struct reader rd = {
2208 .fd = perf_data__fd(session->data),
2209 .data_size = session->header.data_size,
2210 .data_offset = session->header.data_offset,
2211 .process = process_simple,
2212 };
2213 struct ordered_events *oe = &session->ordered_events;
2214 struct perf_tool *tool = session->tool;
2215 struct ui_progress prog;
2216 int err;
2217
2218 perf_tool__fill_defaults(tool);
2219
2220 if (rd.data_size == 0)
2221 return -1;
2222
2223 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2224
2225 err = reader__process_events(&rd, session, &prog);
2226 if (err)
2227 goto out_err;
2228
2229 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2230 if (err)
2231 goto out_err;
2232 err = auxtrace__flush_events(session, tool);
2233 if (err)
2234 goto out_err;
2235 err = perf_session__flush_thread_stacks(session);
2236out_err:
2237 ui_progress__finish();
2238 if (!tool->no_warn)
2239 perf_session__warn_about_errors(session);
2240
2241
2242
2243
2244 ordered_events__reinit(&session->ordered_events);
2245 auxtrace__free_events(session);
2246 session->one_mmap = false;
2247 return err;
2248}
2249
2250int perf_session__process_events(struct perf_session *session)
2251{
2252 if (perf_session__register_idle_thread(session) < 0)
2253 return -ENOMEM;
2254
2255 if (perf_data__is_pipe(session->data))
2256 return __perf_session__process_pipe_events(session);
2257
2258 return __perf_session__process_events(session);
2259}
2260
2261bool perf_session__has_traces(struct perf_session *session, const char *msg)
2262{
2263 struct evsel *evsel;
2264
2265 evlist__for_each_entry(session->evlist, evsel) {
2266 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2267 return true;
2268 }
2269
2270 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2271 return false;
2272}
2273
2274int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2275{
2276 char *bracket;
2277 struct ref_reloc_sym *ref;
2278 struct kmap *kmap;
2279
2280 ref = zalloc(sizeof(struct ref_reloc_sym));
2281 if (ref == NULL)
2282 return -ENOMEM;
2283
2284 ref->name = strdup(symbol_name);
2285 if (ref->name == NULL) {
2286 free(ref);
2287 return -ENOMEM;
2288 }
2289
2290 bracket = strchr(ref->name, ']');
2291 if (bracket)
2292 *bracket = '\0';
2293
2294 ref->addr = addr;
2295
2296 kmap = map__kmap(map);
2297 if (kmap)
2298 kmap->ref_reloc_sym = ref;
2299
2300 return 0;
2301}
2302
2303size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2304{
2305 return machines__fprintf_dsos(&session->machines, fp);
2306}
2307
2308size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2309 bool (skip)(struct dso *dso, int parm), int parm)
2310{
2311 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2312}
2313
2314size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2315{
2316 size_t ret;
2317 const char *msg = "";
2318
2319 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2320 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2321
2322 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2323
2324 ret += events_stats__fprintf(&session->evlist->stats, fp);
2325 return ret;
2326}
2327
2328size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2329{
2330
2331
2332
2333
2334 return machine__fprintf(&session->machines.host, fp);
2335}
2336
2337struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2338 unsigned int type)
2339{
2340 struct evsel *pos;
2341
2342 evlist__for_each_entry(session->evlist, pos) {
2343 if (pos->core.attr.type == type)
2344 return pos;
2345 }
2346 return NULL;
2347}
2348
2349int perf_session__cpu_bitmap(struct perf_session *session,
2350 const char *cpu_list, unsigned long *cpu_bitmap)
2351{
2352 int i, err = -1;
2353 struct perf_cpu_map *map;
2354 int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
2355
2356 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2357 struct evsel *evsel;
2358
2359 evsel = perf_session__find_first_evtype(session, i);
2360 if (!evsel)
2361 continue;
2362
2363 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2364 pr_err("File does not contain CPU events. "
2365 "Remove -C option to proceed.\n");
2366 return -1;
2367 }
2368 }
2369
2370 map = perf_cpu_map__new(cpu_list);
2371 if (map == NULL) {
2372 pr_err("Invalid cpu_list\n");
2373 return -1;
2374 }
2375
2376 for (i = 0; i < map->nr; i++) {
2377 int cpu = map->map[i];
2378
2379 if (cpu >= nr_cpus) {
2380 pr_err("Requested CPU %d too large. "
2381 "Consider raising MAX_NR_CPUS\n", cpu);
2382 goto out_delete_map;
2383 }
2384
2385 set_bit(cpu, cpu_bitmap);
2386 }
2387
2388 err = 0;
2389
2390out_delete_map:
2391 perf_cpu_map__put(map);
2392 return err;
2393}
2394
2395void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2396 bool full)
2397{
2398 if (session == NULL || fp == NULL)
2399 return;
2400
2401 fprintf(fp, "# ========\n");
2402 perf_header__fprintf_info(session, fp, full);
2403 fprintf(fp, "# ========\n#\n");
2404}
2405
2406int perf_event__process_id_index(struct perf_session *session,
2407 union perf_event *event)
2408{
2409 struct evlist *evlist = session->evlist;
2410 struct perf_record_id_index *ie = &event->id_index;
2411 size_t i, nr, max_nr;
2412
2413 max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2414 sizeof(struct id_index_entry);
2415 nr = ie->nr;
2416 if (nr > max_nr)
2417 return -EINVAL;
2418
2419 if (dump_trace)
2420 fprintf(stdout, " nr: %zu\n", nr);
2421
2422 for (i = 0; i < nr; i++) {
2423 struct id_index_entry *e = &ie->entries[i];
2424 struct perf_sample_id *sid;
2425
2426 if (dump_trace) {
2427 fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2428 fprintf(stdout, " idx: %"PRI_lu64, e->idx);
2429 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu);
2430 fprintf(stdout, " tid: %"PRI_ld64"\n", e->tid);
2431 }
2432
2433 sid = perf_evlist__id2sid(evlist, e->id);
2434 if (!sid)
2435 return -ENOENT;
2436 sid->idx = e->idx;
2437 sid->cpu = e->cpu;
2438 sid->tid = e->tid;
2439 }
2440 return 0;
2441}
2442