1
2#include <errno.h>
3#include <inttypes.h>
4#include <linux/err.h>
5#include <linux/kernel.h>
6#include <linux/zalloc.h>
7#include <api/fs/fs.h>
8
9#include <byteswap.h>
10#include <unistd.h>
11#include <sys/types.h>
12#include <sys/mman.h>
13#include <perf/cpumap.h>
14
15#include "map_symbol.h"
16#include "branch.h"
17#include "debug.h"
18#include "evlist.h"
19#include "evsel.h"
20#include "memswap.h"
21#include "map.h"
22#include "symbol.h"
23#include "session.h"
24#include "tool.h"
25#include "perf_regs.h"
26#include "asm/bug.h"
27#include "auxtrace.h"
28#include "thread.h"
29#include "thread-stack.h"
30#include "sample-raw.h"
31#include "stat.h"
32#include "ui/progress.h"
33#include "../perf.h"
34#include "arch/common.h"
35#include <internal/lib.h>
36
37#ifdef HAVE_ZSTD_SUPPORT
38static int perf_session__process_compressed_event(struct perf_session *session,
39 union perf_event *event, u64 file_offset)
40{
41 void *src;
42 size_t decomp_size, src_size;
43 u64 decomp_last_rem = 0;
44 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
45 struct decomp *decomp, *decomp_last = session->decomp_last;
46
47 if (decomp_last) {
48 decomp_last_rem = decomp_last->size - decomp_last->head;
49 decomp_len += decomp_last_rem;
50 }
51
52 mmap_len = sizeof(struct decomp) + decomp_len;
53 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
54 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
55 if (decomp == MAP_FAILED) {
56 pr_err("Couldn't allocate memory for decompression\n");
57 return -1;
58 }
59
60 decomp->file_pos = file_offset;
61 decomp->mmap_len = mmap_len;
62 decomp->head = 0;
63
64 if (decomp_last_rem) {
65 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
66 decomp->size = decomp_last_rem;
67 }
68
69 src = (void *)event + sizeof(struct perf_record_compressed);
70 src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
71
72 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
73 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
74 if (!decomp_size) {
75 munmap(decomp, mmap_len);
76 pr_err("Couldn't decompress data\n");
77 return -1;
78 }
79
80 decomp->size += decomp_size;
81
82 if (session->decomp == NULL) {
83 session->decomp = decomp;
84 session->decomp_last = decomp;
85 } else {
86 session->decomp_last->next = decomp;
87 session->decomp_last = decomp;
88 }
89
90 pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
91
92 return 0;
93}
94#else
95#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
96#endif
97
98static int perf_session__deliver_event(struct perf_session *session,
99 union perf_event *event,
100 struct perf_tool *tool,
101 u64 file_offset);
102
103static int perf_session__open(struct perf_session *session)
104{
105 struct perf_data *data = session->data;
106
107 if (perf_session__read_header(session) < 0) {
108 pr_err("incompatible file format (rerun with -v to learn more)\n");
109 return -1;
110 }
111
112 if (perf_data__is_pipe(data))
113 return 0;
114
115 if (perf_header__has_feat(&session->header, HEADER_STAT))
116 return 0;
117
118 if (!perf_evlist__valid_sample_type(session->evlist)) {
119 pr_err("non matching sample_type\n");
120 return -1;
121 }
122
123 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
124 pr_err("non matching sample_id_all\n");
125 return -1;
126 }
127
128 if (!perf_evlist__valid_read_format(session->evlist)) {
129 pr_err("non matching read_format\n");
130 return -1;
131 }
132
133 return 0;
134}
135
136void perf_session__set_id_hdr_size(struct perf_session *session)
137{
138 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
139
140 machines__set_id_hdr_size(&session->machines, id_hdr_size);
141}
142
143int perf_session__create_kernel_maps(struct perf_session *session)
144{
145 int ret = machine__create_kernel_maps(&session->machines.host);
146
147 if (ret >= 0)
148 ret = machines__create_guest_kernel_maps(&session->machines);
149 return ret;
150}
151
152static void perf_session__destroy_kernel_maps(struct perf_session *session)
153{
154 machines__destroy_kernel_maps(&session->machines);
155}
156
157static bool perf_session__has_comm_exec(struct perf_session *session)
158{
159 struct evsel *evsel;
160
161 evlist__for_each_entry(session->evlist, evsel) {
162 if (evsel->core.attr.comm_exec)
163 return true;
164 }
165
166 return false;
167}
168
169static void perf_session__set_comm_exec(struct perf_session *session)
170{
171 bool comm_exec = perf_session__has_comm_exec(session);
172
173 machines__set_comm_exec(&session->machines, comm_exec);
174}
175
176static int ordered_events__deliver_event(struct ordered_events *oe,
177 struct ordered_event *event)
178{
179 struct perf_session *session = container_of(oe, struct perf_session,
180 ordered_events);
181
182 return perf_session__deliver_event(session, event->event,
183 session->tool, event->file_offset);
184}
185
186struct perf_session *perf_session__new(struct perf_data *data,
187 bool repipe, struct perf_tool *tool)
188{
189 int ret = -ENOMEM;
190 struct perf_session *session = zalloc(sizeof(*session));
191
192 if (!session)
193 goto out;
194
195 session->repipe = repipe;
196 session->tool = tool;
197 INIT_LIST_HEAD(&session->auxtrace_index);
198 machines__init(&session->machines);
199 ordered_events__init(&session->ordered_events,
200 ordered_events__deliver_event, NULL);
201
202 perf_env__init(&session->header.env);
203 if (data) {
204 ret = perf_data__open(data);
205 if (ret < 0)
206 goto out_delete;
207
208 session->data = data;
209
210 if (perf_data__is_read(data)) {
211 ret = perf_session__open(session);
212 if (ret < 0)
213 goto out_delete;
214
215
216
217
218
219 if (!data->is_pipe) {
220 perf_session__set_id_hdr_size(session);
221 perf_session__set_comm_exec(session);
222 }
223
224 perf_evlist__init_trace_event_sample_raw(session->evlist);
225
226
227 if (data->is_dir) {
228 ret = perf_data__open_dir(data);
229 if (ret)
230 goto out_delete;
231 }
232
233 if (!symbol_conf.kallsyms_name &&
234 !symbol_conf.vmlinux_name)
235 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
236 }
237 } else {
238 session->machines.host.env = &perf_env;
239 }
240
241 session->machines.host.single_address_space =
242 perf_env__single_address_space(session->machines.host.env);
243
244 if (!data || perf_data__is_write(data)) {
245
246
247
248
249 if (perf_session__create_kernel_maps(session) < 0)
250 pr_warning("Cannot read kernel map\n");
251 }
252
253
254
255
256
257 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
258 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
259 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
260 tool->ordered_events = false;
261 }
262
263 return session;
264
265 out_delete:
266 perf_session__delete(session);
267 out:
268 return ERR_PTR(ret);
269}
270
271static void perf_session__delete_threads(struct perf_session *session)
272{
273 machine__delete_threads(&session->machines.host);
274}
275
276static void perf_session__release_decomp_events(struct perf_session *session)
277{
278 struct decomp *next, *decomp;
279 size_t mmap_len;
280 next = session->decomp;
281 do {
282 decomp = next;
283 if (decomp == NULL)
284 break;
285 next = decomp->next;
286 mmap_len = decomp->mmap_len;
287 munmap(decomp, mmap_len);
288 } while (1);
289}
290
291void perf_session__delete(struct perf_session *session)
292{
293 if (session == NULL)
294 return;
295 auxtrace__free(session);
296 auxtrace_index__free(&session->auxtrace_index);
297 perf_session__destroy_kernel_maps(session);
298 perf_session__delete_threads(session);
299 perf_session__release_decomp_events(session);
300 perf_env__exit(&session->header.env);
301 machines__exit(&session->machines);
302 if (session->data)
303 perf_data__close(session->data);
304 free(session);
305}
306
307static int process_event_synth_tracing_data_stub(struct perf_session *session
308 __maybe_unused,
309 union perf_event *event
310 __maybe_unused)
311{
312 dump_printf(": unhandled!\n");
313 return 0;
314}
315
316static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
317 union perf_event *event __maybe_unused,
318 struct evlist **pevlist
319 __maybe_unused)
320{
321 dump_printf(": unhandled!\n");
322 return 0;
323}
324
325static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
326 union perf_event *event __maybe_unused,
327 struct evlist **pevlist
328 __maybe_unused)
329{
330 if (dump_trace)
331 perf_event__fprintf_event_update(event, stdout);
332
333 dump_printf(": unhandled!\n");
334 return 0;
335}
336
337static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
338 union perf_event *event __maybe_unused,
339 struct perf_sample *sample __maybe_unused,
340 struct evsel *evsel __maybe_unused,
341 struct machine *machine __maybe_unused)
342{
343 dump_printf(": unhandled!\n");
344 return 0;
345}
346
347static int process_event_stub(struct perf_tool *tool __maybe_unused,
348 union perf_event *event __maybe_unused,
349 struct perf_sample *sample __maybe_unused,
350 struct machine *machine __maybe_unused)
351{
352 dump_printf(": unhandled!\n");
353 return 0;
354}
355
356static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
357 union perf_event *event __maybe_unused,
358 struct ordered_events *oe __maybe_unused)
359{
360 dump_printf(": unhandled!\n");
361 return 0;
362}
363
364static int process_finished_round(struct perf_tool *tool,
365 union perf_event *event,
366 struct ordered_events *oe);
367
368static int skipn(int fd, off_t n)
369{
370 char buf[4096];
371 ssize_t ret;
372
373 while (n > 0) {
374 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
375 if (ret <= 0)
376 return ret;
377 n -= ret;
378 }
379
380 return 0;
381}
382
383static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
384 union perf_event *event)
385{
386 dump_printf(": unhandled!\n");
387 if (perf_data__is_pipe(session->data))
388 skipn(perf_data__fd(session->data), event->auxtrace.size);
389 return event->auxtrace.size;
390}
391
392static int process_event_op2_stub(struct perf_session *session __maybe_unused,
393 union perf_event *event __maybe_unused)
394{
395 dump_printf(": unhandled!\n");
396 return 0;
397}
398
399
400static
401int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
402 union perf_event *event __maybe_unused)
403{
404 if (dump_trace)
405 perf_event__fprintf_thread_map(event, stdout);
406
407 dump_printf(": unhandled!\n");
408 return 0;
409}
410
411static
412int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
413 union perf_event *event __maybe_unused)
414{
415 if (dump_trace)
416 perf_event__fprintf_cpu_map(event, stdout);
417
418 dump_printf(": unhandled!\n");
419 return 0;
420}
421
422static
423int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
424 union perf_event *event __maybe_unused)
425{
426 if (dump_trace)
427 perf_event__fprintf_stat_config(event, stdout);
428
429 dump_printf(": unhandled!\n");
430 return 0;
431}
432
433static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
434 union perf_event *event)
435{
436 if (dump_trace)
437 perf_event__fprintf_stat(event, stdout);
438
439 dump_printf(": unhandled!\n");
440 return 0;
441}
442
443static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
444 union perf_event *event)
445{
446 if (dump_trace)
447 perf_event__fprintf_stat_round(event, stdout);
448
449 dump_printf(": unhandled!\n");
450 return 0;
451}
452
453static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
454 union perf_event *event __maybe_unused,
455 u64 file_offset __maybe_unused)
456{
457 dump_printf(": unhandled!\n");
458 return 0;
459}
460
461void perf_tool__fill_defaults(struct perf_tool *tool)
462{
463 if (tool->sample == NULL)
464 tool->sample = process_event_sample_stub;
465 if (tool->mmap == NULL)
466 tool->mmap = process_event_stub;
467 if (tool->mmap2 == NULL)
468 tool->mmap2 = process_event_stub;
469 if (tool->comm == NULL)
470 tool->comm = process_event_stub;
471 if (tool->namespaces == NULL)
472 tool->namespaces = process_event_stub;
473 if (tool->cgroup == NULL)
474 tool->cgroup = process_event_stub;
475 if (tool->fork == NULL)
476 tool->fork = process_event_stub;
477 if (tool->exit == NULL)
478 tool->exit = process_event_stub;
479 if (tool->lost == NULL)
480 tool->lost = perf_event__process_lost;
481 if (tool->lost_samples == NULL)
482 tool->lost_samples = perf_event__process_lost_samples;
483 if (tool->aux == NULL)
484 tool->aux = perf_event__process_aux;
485 if (tool->itrace_start == NULL)
486 tool->itrace_start = perf_event__process_itrace_start;
487 if (tool->context_switch == NULL)
488 tool->context_switch = perf_event__process_switch;
489 if (tool->ksymbol == NULL)
490 tool->ksymbol = perf_event__process_ksymbol;
491 if (tool->bpf == NULL)
492 tool->bpf = perf_event__process_bpf;
493 if (tool->read == NULL)
494 tool->read = process_event_sample_stub;
495 if (tool->throttle == NULL)
496 tool->throttle = process_event_stub;
497 if (tool->unthrottle == NULL)
498 tool->unthrottle = process_event_stub;
499 if (tool->attr == NULL)
500 tool->attr = process_event_synth_attr_stub;
501 if (tool->event_update == NULL)
502 tool->event_update = process_event_synth_event_update_stub;
503 if (tool->tracing_data == NULL)
504 tool->tracing_data = process_event_synth_tracing_data_stub;
505 if (tool->build_id == NULL)
506 tool->build_id = process_event_op2_stub;
507 if (tool->finished_round == NULL) {
508 if (tool->ordered_events)
509 tool->finished_round = process_finished_round;
510 else
511 tool->finished_round = process_finished_round_stub;
512 }
513 if (tool->id_index == NULL)
514 tool->id_index = process_event_op2_stub;
515 if (tool->auxtrace_info == NULL)
516 tool->auxtrace_info = process_event_op2_stub;
517 if (tool->auxtrace == NULL)
518 tool->auxtrace = process_event_auxtrace_stub;
519 if (tool->auxtrace_error == NULL)
520 tool->auxtrace_error = process_event_op2_stub;
521 if (tool->thread_map == NULL)
522 tool->thread_map = process_event_thread_map_stub;
523 if (tool->cpu_map == NULL)
524 tool->cpu_map = process_event_cpu_map_stub;
525 if (tool->stat_config == NULL)
526 tool->stat_config = process_event_stat_config_stub;
527 if (tool->stat == NULL)
528 tool->stat = process_stat_stub;
529 if (tool->stat_round == NULL)
530 tool->stat_round = process_stat_round_stub;
531 if (tool->time_conv == NULL)
532 tool->time_conv = process_event_op2_stub;
533 if (tool->feature == NULL)
534 tool->feature = process_event_op2_stub;
535 if (tool->compressed == NULL)
536 tool->compressed = perf_session__process_compressed_event;
537}
538
539static void swap_sample_id_all(union perf_event *event, void *data)
540{
541 void *end = (void *) event + event->header.size;
542 int size = end - data;
543
544 BUG_ON(size % sizeof(u64));
545 mem_bswap_64(data, size);
546}
547
548static void perf_event__all64_swap(union perf_event *event,
549 bool sample_id_all __maybe_unused)
550{
551 struct perf_event_header *hdr = &event->header;
552 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
553}
554
555static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
556{
557 event->comm.pid = bswap_32(event->comm.pid);
558 event->comm.tid = bswap_32(event->comm.tid);
559
560 if (sample_id_all) {
561 void *data = &event->comm.comm;
562
563 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
564 swap_sample_id_all(event, data);
565 }
566}
567
568static void perf_event__mmap_swap(union perf_event *event,
569 bool sample_id_all)
570{
571 event->mmap.pid = bswap_32(event->mmap.pid);
572 event->mmap.tid = bswap_32(event->mmap.tid);
573 event->mmap.start = bswap_64(event->mmap.start);
574 event->mmap.len = bswap_64(event->mmap.len);
575 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
576
577 if (sample_id_all) {
578 void *data = &event->mmap.filename;
579
580 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
581 swap_sample_id_all(event, data);
582 }
583}
584
585static void perf_event__mmap2_swap(union perf_event *event,
586 bool sample_id_all)
587{
588 event->mmap2.pid = bswap_32(event->mmap2.pid);
589 event->mmap2.tid = bswap_32(event->mmap2.tid);
590 event->mmap2.start = bswap_64(event->mmap2.start);
591 event->mmap2.len = bswap_64(event->mmap2.len);
592 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
593 event->mmap2.maj = bswap_32(event->mmap2.maj);
594 event->mmap2.min = bswap_32(event->mmap2.min);
595 event->mmap2.ino = bswap_64(event->mmap2.ino);
596
597 if (sample_id_all) {
598 void *data = &event->mmap2.filename;
599
600 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
601 swap_sample_id_all(event, data);
602 }
603}
604static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
605{
606 event->fork.pid = bswap_32(event->fork.pid);
607 event->fork.tid = bswap_32(event->fork.tid);
608 event->fork.ppid = bswap_32(event->fork.ppid);
609 event->fork.ptid = bswap_32(event->fork.ptid);
610 event->fork.time = bswap_64(event->fork.time);
611
612 if (sample_id_all)
613 swap_sample_id_all(event, &event->fork + 1);
614}
615
616static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
617{
618 event->read.pid = bswap_32(event->read.pid);
619 event->read.tid = bswap_32(event->read.tid);
620 event->read.value = bswap_64(event->read.value);
621 event->read.time_enabled = bswap_64(event->read.time_enabled);
622 event->read.time_running = bswap_64(event->read.time_running);
623 event->read.id = bswap_64(event->read.id);
624
625 if (sample_id_all)
626 swap_sample_id_all(event, &event->read + 1);
627}
628
629static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
630{
631 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
632 event->aux.aux_size = bswap_64(event->aux.aux_size);
633 event->aux.flags = bswap_64(event->aux.flags);
634
635 if (sample_id_all)
636 swap_sample_id_all(event, &event->aux + 1);
637}
638
639static void perf_event__itrace_start_swap(union perf_event *event,
640 bool sample_id_all)
641{
642 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
643 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
644
645 if (sample_id_all)
646 swap_sample_id_all(event, &event->itrace_start + 1);
647}
648
649static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
650{
651 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
652 event->context_switch.next_prev_pid =
653 bswap_32(event->context_switch.next_prev_pid);
654 event->context_switch.next_prev_tid =
655 bswap_32(event->context_switch.next_prev_tid);
656 }
657
658 if (sample_id_all)
659 swap_sample_id_all(event, &event->context_switch + 1);
660}
661
662static void perf_event__throttle_swap(union perf_event *event,
663 bool sample_id_all)
664{
665 event->throttle.time = bswap_64(event->throttle.time);
666 event->throttle.id = bswap_64(event->throttle.id);
667 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
668
669 if (sample_id_all)
670 swap_sample_id_all(event, &event->throttle + 1);
671}
672
673static void perf_event__namespaces_swap(union perf_event *event,
674 bool sample_id_all)
675{
676 u64 i;
677
678 event->namespaces.pid = bswap_32(event->namespaces.pid);
679 event->namespaces.tid = bswap_32(event->namespaces.tid);
680 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
681
682 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
683 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
684
685 ns->dev = bswap_64(ns->dev);
686 ns->ino = bswap_64(ns->ino);
687 }
688
689 if (sample_id_all)
690 swap_sample_id_all(event, &event->namespaces.link_info[i]);
691}
692
693static u8 revbyte(u8 b)
694{
695 int rev = (b >> 4) | ((b & 0xf) << 4);
696 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
697 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
698 return (u8) rev;
699}
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715static void swap_bitfield(u8 *p, unsigned len)
716{
717 unsigned i;
718
719 for (i = 0; i < len; i++) {
720 *p = revbyte(*p);
721 p++;
722 }
723}
724
725
726void perf_event__attr_swap(struct perf_event_attr *attr)
727{
728 attr->type = bswap_32(attr->type);
729 attr->size = bswap_32(attr->size);
730
731#define bswap_safe(f, n) \
732 (attr->size > (offsetof(struct perf_event_attr, f) + \
733 sizeof(attr->f) * (n)))
734#define bswap_field(f, sz) \
735do { \
736 if (bswap_safe(f, 0)) \
737 attr->f = bswap_##sz(attr->f); \
738} while(0)
739#define bswap_field_16(f) bswap_field(f, 16)
740#define bswap_field_32(f) bswap_field(f, 32)
741#define bswap_field_64(f) bswap_field(f, 64)
742
743 bswap_field_64(config);
744 bswap_field_64(sample_period);
745 bswap_field_64(sample_type);
746 bswap_field_64(read_format);
747 bswap_field_32(wakeup_events);
748 bswap_field_32(bp_type);
749 bswap_field_64(bp_addr);
750 bswap_field_64(bp_len);
751 bswap_field_64(branch_sample_type);
752 bswap_field_64(sample_regs_user);
753 bswap_field_32(sample_stack_user);
754 bswap_field_32(aux_watermark);
755 bswap_field_16(sample_max_stack);
756 bswap_field_32(aux_sample_size);
757
758
759
760
761
762 if (bswap_safe(read_format, 1))
763 swap_bitfield((u8 *) (&attr->read_format + 1),
764 sizeof(u64));
765#undef bswap_field_64
766#undef bswap_field_32
767#undef bswap_field
768#undef bswap_safe
769}
770
771static void perf_event__hdr_attr_swap(union perf_event *event,
772 bool sample_id_all __maybe_unused)
773{
774 size_t size;
775
776 perf_event__attr_swap(&event->attr.attr);
777
778 size = event->header.size;
779 size -= (void *)&event->attr.id - (void *)event;
780 mem_bswap_64(event->attr.id, size);
781}
782
783static void perf_event__event_update_swap(union perf_event *event,
784 bool sample_id_all __maybe_unused)
785{
786 event->event_update.type = bswap_64(event->event_update.type);
787 event->event_update.id = bswap_64(event->event_update.id);
788}
789
790static void perf_event__event_type_swap(union perf_event *event,
791 bool sample_id_all __maybe_unused)
792{
793 event->event_type.event_type.event_id =
794 bswap_64(event->event_type.event_type.event_id);
795}
796
797static void perf_event__tracing_data_swap(union perf_event *event,
798 bool sample_id_all __maybe_unused)
799{
800 event->tracing_data.size = bswap_32(event->tracing_data.size);
801}
802
803static void perf_event__auxtrace_info_swap(union perf_event *event,
804 bool sample_id_all __maybe_unused)
805{
806 size_t size;
807
808 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
809
810 size = event->header.size;
811 size -= (void *)&event->auxtrace_info.priv - (void *)event;
812 mem_bswap_64(event->auxtrace_info.priv, size);
813}
814
815static void perf_event__auxtrace_swap(union perf_event *event,
816 bool sample_id_all __maybe_unused)
817{
818 event->auxtrace.size = bswap_64(event->auxtrace.size);
819 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
820 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
821 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
822 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
823 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
824}
825
826static void perf_event__auxtrace_error_swap(union perf_event *event,
827 bool sample_id_all __maybe_unused)
828{
829 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
830 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
831 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
832 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
833 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
834 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
835 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
836 if (event->auxtrace_error.fmt)
837 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
838}
839
840static void perf_event__thread_map_swap(union perf_event *event,
841 bool sample_id_all __maybe_unused)
842{
843 unsigned i;
844
845 event->thread_map.nr = bswap_64(event->thread_map.nr);
846
847 for (i = 0; i < event->thread_map.nr; i++)
848 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
849}
850
851static void perf_event__cpu_map_swap(union perf_event *event,
852 bool sample_id_all __maybe_unused)
853{
854 struct perf_record_cpu_map_data *data = &event->cpu_map.data;
855 struct cpu_map_entries *cpus;
856 struct perf_record_record_cpu_map *mask;
857 unsigned i;
858
859 data->type = bswap_64(data->type);
860
861 switch (data->type) {
862 case PERF_CPU_MAP__CPUS:
863 cpus = (struct cpu_map_entries *)data->data;
864
865 cpus->nr = bswap_16(cpus->nr);
866
867 for (i = 0; i < cpus->nr; i++)
868 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
869 break;
870 case PERF_CPU_MAP__MASK:
871 mask = (struct perf_record_record_cpu_map *)data->data;
872
873 mask->nr = bswap_16(mask->nr);
874 mask->long_size = bswap_16(mask->long_size);
875
876 switch (mask->long_size) {
877 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
878 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
879 default:
880 pr_err("cpu_map swap: unsupported long size\n");
881 }
882 default:
883 break;
884 }
885}
886
887static void perf_event__stat_config_swap(union perf_event *event,
888 bool sample_id_all __maybe_unused)
889{
890 u64 size;
891
892 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
893 size += 1;
894 mem_bswap_64(&event->stat_config.nr, size);
895}
896
897static void perf_event__stat_swap(union perf_event *event,
898 bool sample_id_all __maybe_unused)
899{
900 event->stat.id = bswap_64(event->stat.id);
901 event->stat.thread = bswap_32(event->stat.thread);
902 event->stat.cpu = bswap_32(event->stat.cpu);
903 event->stat.val = bswap_64(event->stat.val);
904 event->stat.ena = bswap_64(event->stat.ena);
905 event->stat.run = bswap_64(event->stat.run);
906}
907
908static void perf_event__stat_round_swap(union perf_event *event,
909 bool sample_id_all __maybe_unused)
910{
911 event->stat_round.type = bswap_64(event->stat_round.type);
912 event->stat_round.time = bswap_64(event->stat_round.time);
913}
914
915typedef void (*perf_event__swap_op)(union perf_event *event,
916 bool sample_id_all);
917
918static perf_event__swap_op perf_event__swap_ops[] = {
919 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
920 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
921 [PERF_RECORD_COMM] = perf_event__comm_swap,
922 [PERF_RECORD_FORK] = perf_event__task_swap,
923 [PERF_RECORD_EXIT] = perf_event__task_swap,
924 [PERF_RECORD_LOST] = perf_event__all64_swap,
925 [PERF_RECORD_READ] = perf_event__read_swap,
926 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
927 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
928 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
929 [PERF_RECORD_AUX] = perf_event__aux_swap,
930 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
931 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
932 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
933 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
934 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
935 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
936 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
937 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
938 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
939 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
940 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
941 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
942 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
943 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
944 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
945 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
946 [PERF_RECORD_STAT] = perf_event__stat_swap,
947 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
948 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
949 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
950 [PERF_RECORD_HEADER_MAX] = NULL,
951};
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992static int process_finished_round(struct perf_tool *tool __maybe_unused,
993 union perf_event *event __maybe_unused,
994 struct ordered_events *oe)
995{
996 if (dump_trace)
997 fprintf(stdout, "\n");
998 return ordered_events__flush(oe, OE_FLUSH__ROUND);
999}
1000
1001int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1002 u64 timestamp, u64 file_offset)
1003{
1004 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
1005}
1006
1007static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1008{
1009 struct ip_callchain *callchain = sample->callchain;
1010 struct branch_stack *lbr_stack = sample->branch_stack;
1011 struct branch_entry *entries = perf_sample__branch_entries(sample);
1012 u64 kernel_callchain_nr = callchain->nr;
1013 unsigned int i;
1014
1015 for (i = 0; i < kernel_callchain_nr; i++) {
1016 if (callchain->ips[i] == PERF_CONTEXT_USER)
1017 break;
1018 }
1019
1020 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1021 u64 total_nr;
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038 total_nr = i + 1 + lbr_stack->nr + 1;
1039 kernel_callchain_nr = i + 1;
1040
1041 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1042
1043 for (i = 0; i < kernel_callchain_nr; i++)
1044 printf("..... %2d: %016" PRIx64 "\n",
1045 i, callchain->ips[i]);
1046
1047 printf("..... %2d: %016" PRIx64 "\n",
1048 (int)(kernel_callchain_nr), entries[0].to);
1049 for (i = 0; i < lbr_stack->nr; i++)
1050 printf("..... %2d: %016" PRIx64 "\n",
1051 (int)(i + kernel_callchain_nr + 1), entries[i].from);
1052 }
1053}
1054
1055static void callchain__printf(struct evsel *evsel,
1056 struct perf_sample *sample)
1057{
1058 unsigned int i;
1059 struct ip_callchain *callchain = sample->callchain;
1060
1061 if (evsel__has_branch_callstack(evsel))
1062 callchain__lbr_callstack_printf(sample);
1063
1064 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1065
1066 for (i = 0; i < callchain->nr; i++)
1067 printf("..... %2d: %016" PRIx64 "\n",
1068 i, callchain->ips[i]);
1069}
1070
1071static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1072{
1073 struct branch_entry *entries = perf_sample__branch_entries(sample);
1074 uint64_t i;
1075
1076 printf("%s: nr:%" PRIu64 "\n",
1077 !callstack ? "... branch stack" : "... branch callstack",
1078 sample->branch_stack->nr);
1079
1080 for (i = 0; i < sample->branch_stack->nr; i++) {
1081 struct branch_entry *e = &entries[i];
1082
1083 if (!callstack) {
1084 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1085 i, e->from, e->to,
1086 (unsigned short)e->flags.cycles,
1087 e->flags.mispred ? "M" : " ",
1088 e->flags.predicted ? "P" : " ",
1089 e->flags.abort ? "A" : " ",
1090 e->flags.in_tx ? "T" : " ",
1091 (unsigned)e->flags.reserved);
1092 } else {
1093 printf("..... %2"PRIu64": %016" PRIx64 "\n",
1094 i, i > 0 ? e->from : e->to);
1095 }
1096 }
1097}
1098
1099static void regs_dump__printf(u64 mask, u64 *regs)
1100{
1101 unsigned rid, i = 0;
1102
1103 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1104 u64 val = regs[i++];
1105
1106 printf(".... %-5s 0x%016" PRIx64 "\n",
1107 perf_reg_name(rid), val);
1108 }
1109}
1110
1111static const char *regs_abi[] = {
1112 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1113 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1114 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1115};
1116
1117static inline const char *regs_dump_abi(struct regs_dump *d)
1118{
1119 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1120 return "unknown";
1121
1122 return regs_abi[d->abi];
1123}
1124
1125static void regs__printf(const char *type, struct regs_dump *regs)
1126{
1127 u64 mask = regs->mask;
1128
1129 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1130 type,
1131 mask,
1132 regs_dump_abi(regs));
1133
1134 regs_dump__printf(mask, regs->regs);
1135}
1136
1137static void regs_user__printf(struct perf_sample *sample)
1138{
1139 struct regs_dump *user_regs = &sample->user_regs;
1140
1141 if (user_regs->regs)
1142 regs__printf("user", user_regs);
1143}
1144
1145static void regs_intr__printf(struct perf_sample *sample)
1146{
1147 struct regs_dump *intr_regs = &sample->intr_regs;
1148
1149 if (intr_regs->regs)
1150 regs__printf("intr", intr_regs);
1151}
1152
1153static void stack_user__printf(struct stack_dump *dump)
1154{
1155 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1156 dump->size, dump->offset);
1157}
1158
1159static void perf_evlist__print_tstamp(struct evlist *evlist,
1160 union perf_event *event,
1161 struct perf_sample *sample)
1162{
1163 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1164
1165 if (event->header.type != PERF_RECORD_SAMPLE &&
1166 !perf_evlist__sample_id_all(evlist)) {
1167 fputs("-1 -1 ", stdout);
1168 return;
1169 }
1170
1171 if ((sample_type & PERF_SAMPLE_CPU))
1172 printf("%u ", sample->cpu);
1173
1174 if (sample_type & PERF_SAMPLE_TIME)
1175 printf("%" PRIu64 " ", sample->time);
1176}
1177
1178static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1179{
1180 printf("... sample_read:\n");
1181
1182 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1183 printf("...... time enabled %016" PRIx64 "\n",
1184 sample->read.time_enabled);
1185
1186 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1187 printf("...... time running %016" PRIx64 "\n",
1188 sample->read.time_running);
1189
1190 if (read_format & PERF_FORMAT_GROUP) {
1191 u64 i;
1192
1193 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1194
1195 for (i = 0; i < sample->read.group.nr; i++) {
1196 struct sample_read_value *value;
1197
1198 value = &sample->read.group.values[i];
1199 printf("..... id %016" PRIx64
1200 ", value %016" PRIx64 "\n",
1201 value->id, value->value);
1202 }
1203 } else
1204 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1205 sample->read.one.id, sample->read.one.value);
1206}
1207
1208static void dump_event(struct evlist *evlist, union perf_event *event,
1209 u64 file_offset, struct perf_sample *sample)
1210{
1211 if (!dump_trace)
1212 return;
1213
1214 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1215 file_offset, event->header.size, event->header.type);
1216
1217 trace_event(event);
1218 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1219 evlist->trace_event_sample_raw(evlist, event, sample);
1220
1221 if (sample)
1222 perf_evlist__print_tstamp(evlist, event, sample);
1223
1224 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1225 event->header.size, perf_event__name(event->header.type));
1226}
1227
1228static void dump_sample(struct evsel *evsel, union perf_event *event,
1229 struct perf_sample *sample)
1230{
1231 u64 sample_type;
1232
1233 if (!dump_trace)
1234 return;
1235
1236 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1237 event->header.misc, sample->pid, sample->tid, sample->ip,
1238 sample->period, sample->addr);
1239
1240 sample_type = evsel->core.attr.sample_type;
1241
1242 if (evsel__has_callchain(evsel))
1243 callchain__printf(evsel, sample);
1244
1245 if (evsel__has_br_stack(evsel))
1246 branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1247
1248 if (sample_type & PERF_SAMPLE_REGS_USER)
1249 regs_user__printf(sample);
1250
1251 if (sample_type & PERF_SAMPLE_REGS_INTR)
1252 regs_intr__printf(sample);
1253
1254 if (sample_type & PERF_SAMPLE_STACK_USER)
1255 stack_user__printf(&sample->user_stack);
1256
1257 if (sample_type & PERF_SAMPLE_WEIGHT)
1258 printf("... weight: %" PRIu64 "\n", sample->weight);
1259
1260 if (sample_type & PERF_SAMPLE_DATA_SRC)
1261 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1262
1263 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1264 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1265
1266 if (sample_type & PERF_SAMPLE_TRANSACTION)
1267 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1268
1269 if (sample_type & PERF_SAMPLE_READ)
1270 sample_read__printf(sample, evsel->core.attr.read_format);
1271}
1272
1273static void dump_read(struct evsel *evsel, union perf_event *event)
1274{
1275 struct perf_record_read *read_event = &event->read;
1276 u64 read_format;
1277
1278 if (!dump_trace)
1279 return;
1280
1281 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1282 evsel__name(evsel), event->read.value);
1283
1284 if (!evsel)
1285 return;
1286
1287 read_format = evsel->core.attr.read_format;
1288
1289 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1290 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1291
1292 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1293 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1294
1295 if (read_format & PERF_FORMAT_ID)
1296 printf("... id : %" PRI_lu64 "\n", read_event->id);
1297}
1298
1299static struct machine *machines__find_for_cpumode(struct machines *machines,
1300 union perf_event *event,
1301 struct perf_sample *sample)
1302{
1303 struct machine *machine;
1304
1305 if (perf_guest &&
1306 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1307 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1308 u32 pid;
1309
1310 if (event->header.type == PERF_RECORD_MMAP
1311 || event->header.type == PERF_RECORD_MMAP2)
1312 pid = event->mmap.pid;
1313 else
1314 pid = sample->pid;
1315
1316 machine = machines__find(machines, pid);
1317 if (!machine)
1318 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1319 return machine;
1320 }
1321
1322 return &machines->host;
1323}
1324
1325static int deliver_sample_value(struct evlist *evlist,
1326 struct perf_tool *tool,
1327 union perf_event *event,
1328 struct perf_sample *sample,
1329 struct sample_read_value *v,
1330 struct machine *machine)
1331{
1332 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1333 struct evsel *evsel;
1334
1335 if (sid) {
1336 sample->id = v->id;
1337 sample->period = v->value - sid->period;
1338 sid->period = v->value;
1339 }
1340
1341 if (!sid || sid->evsel == NULL) {
1342 ++evlist->stats.nr_unknown_id;
1343 return 0;
1344 }
1345
1346
1347
1348
1349
1350 if (!sample->period)
1351 return 0;
1352
1353 evsel = container_of(sid->evsel, struct evsel, core);
1354 return tool->sample(tool, event, sample, evsel, machine);
1355}
1356
1357static int deliver_sample_group(struct evlist *evlist,
1358 struct perf_tool *tool,
1359 union perf_event *event,
1360 struct perf_sample *sample,
1361 struct machine *machine)
1362{
1363 int ret = -EINVAL;
1364 u64 i;
1365
1366 for (i = 0; i < sample->read.group.nr; i++) {
1367 ret = deliver_sample_value(evlist, tool, event, sample,
1368 &sample->read.group.values[i],
1369 machine);
1370 if (ret)
1371 break;
1372 }
1373
1374 return ret;
1375}
1376
1377static int
1378 perf_evlist__deliver_sample(struct evlist *evlist,
1379 struct perf_tool *tool,
1380 union perf_event *event,
1381 struct perf_sample *sample,
1382 struct evsel *evsel,
1383 struct machine *machine)
1384{
1385
1386 u64 sample_type = evsel->core.attr.sample_type;
1387 u64 read_format = evsel->core.attr.read_format;
1388
1389
1390 if (!(sample_type & PERF_SAMPLE_READ))
1391 return tool->sample(tool, event, sample, evsel, machine);
1392
1393
1394 if (read_format & PERF_FORMAT_GROUP)
1395 return deliver_sample_group(evlist, tool, event, sample,
1396 machine);
1397 else
1398 return deliver_sample_value(evlist, tool, event, sample,
1399 &sample->read.one, machine);
1400}
1401
1402static int machines__deliver_event(struct machines *machines,
1403 struct evlist *evlist,
1404 union perf_event *event,
1405 struct perf_sample *sample,
1406 struct perf_tool *tool, u64 file_offset)
1407{
1408 struct evsel *evsel;
1409 struct machine *machine;
1410
1411 dump_event(evlist, event, file_offset, sample);
1412
1413 evsel = perf_evlist__id2evsel(evlist, sample->id);
1414
1415 machine = machines__find_for_cpumode(machines, event, sample);
1416
1417 switch (event->header.type) {
1418 case PERF_RECORD_SAMPLE:
1419 if (evsel == NULL) {
1420 ++evlist->stats.nr_unknown_id;
1421 return 0;
1422 }
1423 dump_sample(evsel, event, sample);
1424 if (machine == NULL) {
1425 ++evlist->stats.nr_unprocessable_samples;
1426 return 0;
1427 }
1428 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1429 case PERF_RECORD_MMAP:
1430 return tool->mmap(tool, event, sample, machine);
1431 case PERF_RECORD_MMAP2:
1432 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1433 ++evlist->stats.nr_proc_map_timeout;
1434 return tool->mmap2(tool, event, sample, machine);
1435 case PERF_RECORD_COMM:
1436 return tool->comm(tool, event, sample, machine);
1437 case PERF_RECORD_NAMESPACES:
1438 return tool->namespaces(tool, event, sample, machine);
1439 case PERF_RECORD_CGROUP:
1440 return tool->cgroup(tool, event, sample, machine);
1441 case PERF_RECORD_FORK:
1442 return tool->fork(tool, event, sample, machine);
1443 case PERF_RECORD_EXIT:
1444 return tool->exit(tool, event, sample, machine);
1445 case PERF_RECORD_LOST:
1446 if (tool->lost == perf_event__process_lost)
1447 evlist->stats.total_lost += event->lost.lost;
1448 return tool->lost(tool, event, sample, machine);
1449 case PERF_RECORD_LOST_SAMPLES:
1450 if (tool->lost_samples == perf_event__process_lost_samples)
1451 evlist->stats.total_lost_samples += event->lost_samples.lost;
1452 return tool->lost_samples(tool, event, sample, machine);
1453 case PERF_RECORD_READ:
1454 dump_read(evsel, event);
1455 return tool->read(tool, event, sample, evsel, machine);
1456 case PERF_RECORD_THROTTLE:
1457 return tool->throttle(tool, event, sample, machine);
1458 case PERF_RECORD_UNTHROTTLE:
1459 return tool->unthrottle(tool, event, sample, machine);
1460 case PERF_RECORD_AUX:
1461 if (tool->aux == perf_event__process_aux) {
1462 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1463 evlist->stats.total_aux_lost += 1;
1464 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1465 evlist->stats.total_aux_partial += 1;
1466 }
1467 return tool->aux(tool, event, sample, machine);
1468 case PERF_RECORD_ITRACE_START:
1469 return tool->itrace_start(tool, event, sample, machine);
1470 case PERF_RECORD_SWITCH:
1471 case PERF_RECORD_SWITCH_CPU_WIDE:
1472 return tool->context_switch(tool, event, sample, machine);
1473 case PERF_RECORD_KSYMBOL:
1474 return tool->ksymbol(tool, event, sample, machine);
1475 case PERF_RECORD_BPF_EVENT:
1476 return tool->bpf(tool, event, sample, machine);
1477 default:
1478 ++evlist->stats.nr_unknown_events;
1479 return -1;
1480 }
1481}
1482
1483static int perf_session__deliver_event(struct perf_session *session,
1484 union perf_event *event,
1485 struct perf_tool *tool,
1486 u64 file_offset)
1487{
1488 struct perf_sample sample;
1489 int ret;
1490
1491 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1492 if (ret) {
1493 pr_err("Can't parse sample, err = %d\n", ret);
1494 return ret;
1495 }
1496
1497 ret = auxtrace__process_event(session, event, &sample, tool);
1498 if (ret < 0)
1499 return ret;
1500 if (ret > 0)
1501 return 0;
1502
1503 ret = machines__deliver_event(&session->machines, session->evlist,
1504 event, &sample, tool, file_offset);
1505
1506 if (dump_trace && sample.aux_sample.size)
1507 auxtrace__dump_auxtrace_sample(session, &sample);
1508
1509 return ret;
1510}
1511
1512static s64 perf_session__process_user_event(struct perf_session *session,
1513 union perf_event *event,
1514 u64 file_offset)
1515{
1516 struct ordered_events *oe = &session->ordered_events;
1517 struct perf_tool *tool = session->tool;
1518 struct perf_sample sample = { .time = 0, };
1519 int fd = perf_data__fd(session->data);
1520 int err;
1521
1522 if (event->header.type != PERF_RECORD_COMPRESSED ||
1523 tool->compressed == perf_session__process_compressed_event_stub)
1524 dump_event(session->evlist, event, file_offset, &sample);
1525
1526
1527 switch (event->header.type) {
1528 case PERF_RECORD_HEADER_ATTR:
1529 err = tool->attr(tool, event, &session->evlist);
1530 if (err == 0) {
1531 perf_session__set_id_hdr_size(session);
1532 perf_session__set_comm_exec(session);
1533 }
1534 return err;
1535 case PERF_RECORD_EVENT_UPDATE:
1536 return tool->event_update(tool, event, &session->evlist);
1537 case PERF_RECORD_HEADER_EVENT_TYPE:
1538
1539
1540
1541
1542 return 0;
1543 case PERF_RECORD_HEADER_TRACING_DATA:
1544
1545
1546
1547
1548
1549 if (!perf_data__is_pipe(session->data))
1550 lseek(fd, file_offset, SEEK_SET);
1551 return tool->tracing_data(session, event);
1552 case PERF_RECORD_HEADER_BUILD_ID:
1553 return tool->build_id(session, event);
1554 case PERF_RECORD_FINISHED_ROUND:
1555 return tool->finished_round(tool, event, oe);
1556 case PERF_RECORD_ID_INDEX:
1557 return tool->id_index(session, event);
1558 case PERF_RECORD_AUXTRACE_INFO:
1559 return tool->auxtrace_info(session, event);
1560 case PERF_RECORD_AUXTRACE:
1561
1562 lseek(fd, file_offset + event->header.size, SEEK_SET);
1563 return tool->auxtrace(session, event);
1564 case PERF_RECORD_AUXTRACE_ERROR:
1565 perf_session__auxtrace_error_inc(session, event);
1566 return tool->auxtrace_error(session, event);
1567 case PERF_RECORD_THREAD_MAP:
1568 return tool->thread_map(session, event);
1569 case PERF_RECORD_CPU_MAP:
1570 return tool->cpu_map(session, event);
1571 case PERF_RECORD_STAT_CONFIG:
1572 return tool->stat_config(session, event);
1573 case PERF_RECORD_STAT:
1574 return tool->stat(session, event);
1575 case PERF_RECORD_STAT_ROUND:
1576 return tool->stat_round(session, event);
1577 case PERF_RECORD_TIME_CONV:
1578 session->time_conv = event->time_conv;
1579 return tool->time_conv(session, event);
1580 case PERF_RECORD_HEADER_FEATURE:
1581 return tool->feature(session, event);
1582 case PERF_RECORD_COMPRESSED:
1583 err = tool->compressed(session, event, file_offset);
1584 if (err)
1585 dump_event(session->evlist, event, file_offset, &sample);
1586 return err;
1587 default:
1588 return -EINVAL;
1589 }
1590}
1591
1592int perf_session__deliver_synth_event(struct perf_session *session,
1593 union perf_event *event,
1594 struct perf_sample *sample)
1595{
1596 struct evlist *evlist = session->evlist;
1597 struct perf_tool *tool = session->tool;
1598
1599 events_stats__inc(&evlist->stats, event->header.type);
1600
1601 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1602 return perf_session__process_user_event(session, event, 0);
1603
1604 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1605}
1606
1607static void event_swap(union perf_event *event, bool sample_id_all)
1608{
1609 perf_event__swap_op swap;
1610
1611 swap = perf_event__swap_ops[event->header.type];
1612 if (swap)
1613 swap(event, sample_id_all);
1614}
1615
1616int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1617 void *buf, size_t buf_sz,
1618 union perf_event **event_ptr,
1619 struct perf_sample *sample)
1620{
1621 union perf_event *event;
1622 size_t hdr_sz, rest;
1623 int fd;
1624
1625 if (session->one_mmap && !session->header.needs_swap) {
1626 event = file_offset - session->one_mmap_offset +
1627 session->one_mmap_addr;
1628 goto out_parse_sample;
1629 }
1630
1631 if (perf_data__is_pipe(session->data))
1632 return -1;
1633
1634 fd = perf_data__fd(session->data);
1635 hdr_sz = sizeof(struct perf_event_header);
1636
1637 if (buf_sz < hdr_sz)
1638 return -1;
1639
1640 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1641 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1642 return -1;
1643
1644 event = (union perf_event *)buf;
1645
1646 if (session->header.needs_swap)
1647 perf_event_header__bswap(&event->header);
1648
1649 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1650 return -1;
1651
1652 rest = event->header.size - hdr_sz;
1653
1654 if (readn(fd, buf, rest) != (ssize_t)rest)
1655 return -1;
1656
1657 if (session->header.needs_swap)
1658 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1659
1660out_parse_sample:
1661
1662 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1663 perf_evlist__parse_sample(session->evlist, event, sample))
1664 return -1;
1665
1666 *event_ptr = event;
1667
1668 return 0;
1669}
1670
1671int perf_session__peek_events(struct perf_session *session, u64 offset,
1672 u64 size, peek_events_cb_t cb, void *data)
1673{
1674 u64 max_offset = offset + size;
1675 char buf[PERF_SAMPLE_MAX_SIZE];
1676 union perf_event *event;
1677 int err;
1678
1679 do {
1680 err = perf_session__peek_event(session, offset, buf,
1681 PERF_SAMPLE_MAX_SIZE, &event,
1682 NULL);
1683 if (err)
1684 return err;
1685
1686 err = cb(session, event, offset, data);
1687 if (err)
1688 return err;
1689
1690 offset += event->header.size;
1691 if (event->header.type == PERF_RECORD_AUXTRACE)
1692 offset += event->auxtrace.size;
1693
1694 } while (offset < max_offset);
1695
1696 return err;
1697}
1698
1699static s64 perf_session__process_event(struct perf_session *session,
1700 union perf_event *event, u64 file_offset)
1701{
1702 struct evlist *evlist = session->evlist;
1703 struct perf_tool *tool = session->tool;
1704 int ret;
1705
1706 if (session->header.needs_swap)
1707 event_swap(event, perf_evlist__sample_id_all(evlist));
1708
1709 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1710 return -EINVAL;
1711
1712 events_stats__inc(&evlist->stats, event->header.type);
1713
1714 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1715 return perf_session__process_user_event(session, event, file_offset);
1716
1717 if (tool->ordered_events) {
1718 u64 timestamp = -1ULL;
1719
1720 ret = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp);
1721 if (ret && ret != -1)
1722 return ret;
1723
1724 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1725 if (ret != -ETIME)
1726 return ret;
1727 }
1728
1729 return perf_session__deliver_event(session, event, tool, file_offset);
1730}
1731
1732void perf_event_header__bswap(struct perf_event_header *hdr)
1733{
1734 hdr->type = bswap_32(hdr->type);
1735 hdr->misc = bswap_16(hdr->misc);
1736 hdr->size = bswap_16(hdr->size);
1737}
1738
1739struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1740{
1741 return machine__findnew_thread(&session->machines.host, -1, pid);
1742}
1743
1744
1745
1746
1747
1748
1749
1750
1751int perf_session__register_idle_thread(struct perf_session *session)
1752{
1753 struct thread *thread;
1754 int err = 0;
1755
1756 thread = machine__findnew_thread(&session->machines.host, 0, 0);
1757 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1758 pr_err("problem inserting idle task.\n");
1759 err = -1;
1760 }
1761
1762 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1763 pr_err("problem inserting idle task.\n");
1764 err = -1;
1765 }
1766
1767
1768 thread__put(thread);
1769 return err;
1770}
1771
1772static void
1773perf_session__warn_order(const struct perf_session *session)
1774{
1775 const struct ordered_events *oe = &session->ordered_events;
1776 struct evsel *evsel;
1777 bool should_warn = true;
1778
1779 evlist__for_each_entry(session->evlist, evsel) {
1780 if (evsel->core.attr.write_backward)
1781 should_warn = false;
1782 }
1783
1784 if (!should_warn)
1785 return;
1786 if (oe->nr_unordered_events != 0)
1787 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1788}
1789
1790static void perf_session__warn_about_errors(const struct perf_session *session)
1791{
1792 const struct events_stats *stats = &session->evlist->stats;
1793
1794 if (session->tool->lost == perf_event__process_lost &&
1795 stats->nr_events[PERF_RECORD_LOST] != 0) {
1796 ui__warning("Processed %d events and lost %d chunks!\n\n"
1797 "Check IO/CPU overload!\n\n",
1798 stats->nr_events[0],
1799 stats->nr_events[PERF_RECORD_LOST]);
1800 }
1801
1802 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1803 double drop_rate;
1804
1805 drop_rate = (double)stats->total_lost_samples /
1806 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1807 if (drop_rate > 0.05) {
1808 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1809 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1810 drop_rate * 100.0);
1811 }
1812 }
1813
1814 if (session->tool->aux == perf_event__process_aux &&
1815 stats->total_aux_lost != 0) {
1816 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1817 stats->total_aux_lost,
1818 stats->nr_events[PERF_RECORD_AUX]);
1819 }
1820
1821 if (session->tool->aux == perf_event__process_aux &&
1822 stats->total_aux_partial != 0) {
1823 bool vmm_exclusive = false;
1824
1825 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1826 &vmm_exclusive);
1827
1828 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1829 "Are you running a KVM guest in the background?%s\n\n",
1830 stats->total_aux_partial,
1831 stats->nr_events[PERF_RECORD_AUX],
1832 vmm_exclusive ?
1833 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1834 "will reduce the gaps to only guest's timeslices." :
1835 "");
1836 }
1837
1838 if (stats->nr_unknown_events != 0) {
1839 ui__warning("Found %u unknown events!\n\n"
1840 "Is this an older tool processing a perf.data "
1841 "file generated by a more recent tool?\n\n"
1842 "If that is not the case, consider "
1843 "reporting to linux-kernel@vger.kernel.org.\n\n",
1844 stats->nr_unknown_events);
1845 }
1846
1847 if (stats->nr_unknown_id != 0) {
1848 ui__warning("%u samples with id not present in the header\n",
1849 stats->nr_unknown_id);
1850 }
1851
1852 if (stats->nr_invalid_chains != 0) {
1853 ui__warning("Found invalid callchains!\n\n"
1854 "%u out of %u events were discarded for this reason.\n\n"
1855 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1856 stats->nr_invalid_chains,
1857 stats->nr_events[PERF_RECORD_SAMPLE]);
1858 }
1859
1860 if (stats->nr_unprocessable_samples != 0) {
1861 ui__warning("%u unprocessable samples recorded.\n"
1862 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1863 stats->nr_unprocessable_samples);
1864 }
1865
1866 perf_session__warn_order(session);
1867
1868 events_stats__auxtrace_error_warn(stats);
1869
1870 if (stats->nr_proc_map_timeout != 0) {
1871 ui__warning("%d map information files for pre-existing threads were\n"
1872 "not processed, if there are samples for addresses they\n"
1873 "will not be resolved, you may find out which are these\n"
1874 "threads by running with -v and redirecting the output\n"
1875 "to a file.\n"
1876 "The time limit to process proc map is too short?\n"
1877 "Increase it by --proc-map-timeout\n",
1878 stats->nr_proc_map_timeout);
1879 }
1880}
1881
1882static int perf_session__flush_thread_stack(struct thread *thread,
1883 void *p __maybe_unused)
1884{
1885 return thread_stack__flush(thread);
1886}
1887
1888static int perf_session__flush_thread_stacks(struct perf_session *session)
1889{
1890 return machines__for_each_thread(&session->machines,
1891 perf_session__flush_thread_stack,
1892 NULL);
1893}
1894
1895volatile int session_done;
1896
1897static int __perf_session__process_decomp_events(struct perf_session *session);
1898
1899static int __perf_session__process_pipe_events(struct perf_session *session)
1900{
1901 struct ordered_events *oe = &session->ordered_events;
1902 struct perf_tool *tool = session->tool;
1903 int fd = perf_data__fd(session->data);
1904 union perf_event *event;
1905 uint32_t size, cur_size = 0;
1906 void *buf = NULL;
1907 s64 skip = 0;
1908 u64 head;
1909 ssize_t err;
1910 void *p;
1911
1912 perf_tool__fill_defaults(tool);
1913
1914 head = 0;
1915 cur_size = sizeof(union perf_event);
1916
1917 buf = malloc(cur_size);
1918 if (!buf)
1919 return -errno;
1920 ordered_events__set_copy_on_queue(oe, true);
1921more:
1922 event = buf;
1923 err = readn(fd, event, sizeof(struct perf_event_header));
1924 if (err <= 0) {
1925 if (err == 0)
1926 goto done;
1927
1928 pr_err("failed to read event header\n");
1929 goto out_err;
1930 }
1931
1932 if (session->header.needs_swap)
1933 perf_event_header__bswap(&event->header);
1934
1935 size = event->header.size;
1936 if (size < sizeof(struct perf_event_header)) {
1937 pr_err("bad event header size\n");
1938 goto out_err;
1939 }
1940
1941 if (size > cur_size) {
1942 void *new = realloc(buf, size);
1943 if (!new) {
1944 pr_err("failed to allocate memory to read event\n");
1945 goto out_err;
1946 }
1947 buf = new;
1948 cur_size = size;
1949 event = buf;
1950 }
1951 p = event;
1952 p += sizeof(struct perf_event_header);
1953
1954 if (size - sizeof(struct perf_event_header)) {
1955 err = readn(fd, p, size - sizeof(struct perf_event_header));
1956 if (err <= 0) {
1957 if (err == 0) {
1958 pr_err("unexpected end of event stream\n");
1959 goto done;
1960 }
1961
1962 pr_err("failed to read event data\n");
1963 goto out_err;
1964 }
1965 }
1966
1967 if ((skip = perf_session__process_event(session, event, head)) < 0) {
1968 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1969 head, event->header.size, event->header.type);
1970 err = -EINVAL;
1971 goto out_err;
1972 }
1973
1974 head += size;
1975
1976 if (skip > 0)
1977 head += skip;
1978
1979 err = __perf_session__process_decomp_events(session);
1980 if (err)
1981 goto out_err;
1982
1983 if (!session_done())
1984 goto more;
1985done:
1986
1987 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1988 if (err)
1989 goto out_err;
1990 err = auxtrace__flush_events(session, tool);
1991 if (err)
1992 goto out_err;
1993 err = perf_session__flush_thread_stacks(session);
1994out_err:
1995 free(buf);
1996 if (!tool->no_warn)
1997 perf_session__warn_about_errors(session);
1998 ordered_events__free(&session->ordered_events);
1999 auxtrace__free_events(session);
2000 return err;
2001}
2002
2003static union perf_event *
2004prefetch_event(char *buf, u64 head, size_t mmap_size,
2005 bool needs_swap, union perf_event *error)
2006{
2007 union perf_event *event;
2008
2009
2010
2011
2012
2013 if (head + sizeof(event->header) > mmap_size)
2014 return NULL;
2015
2016 event = (union perf_event *)(buf + head);
2017 if (needs_swap)
2018 perf_event_header__bswap(&event->header);
2019
2020 if (head + event->header.size <= mmap_size)
2021 return event;
2022
2023
2024 if (needs_swap)
2025 perf_event_header__bswap(&event->header);
2026
2027 pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
2028 " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
2029
2030 return error;
2031}
2032
2033static union perf_event *
2034fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2035{
2036 return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2037}
2038
2039static union perf_event *
2040fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2041{
2042 return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2043}
2044
2045static int __perf_session__process_decomp_events(struct perf_session *session)
2046{
2047 s64 skip;
2048 u64 size, file_pos = 0;
2049 struct decomp *decomp = session->decomp_last;
2050
2051 if (!decomp)
2052 return 0;
2053
2054 while (decomp->head < decomp->size && !session_done()) {
2055 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2056 session->header.needs_swap);
2057
2058 if (!event)
2059 break;
2060
2061 size = event->header.size;
2062
2063 if (size < sizeof(struct perf_event_header) ||
2064 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
2065 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2066 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2067 return -EINVAL;
2068 }
2069
2070 if (skip)
2071 size += skip;
2072
2073 decomp->head += size;
2074 }
2075
2076 return 0;
2077}
2078
2079
2080
2081
2082
2083#if BITS_PER_LONG == 64
2084#define MMAP_SIZE ULLONG_MAX
2085#define NUM_MMAPS 1
2086#else
2087#define MMAP_SIZE (32 * 1024 * 1024ULL)
2088#define NUM_MMAPS 128
2089#endif
2090
2091struct reader;
2092
2093typedef s64 (*reader_cb_t)(struct perf_session *session,
2094 union perf_event *event,
2095 u64 file_offset);
2096
2097struct reader {
2098 int fd;
2099 u64 data_size;
2100 u64 data_offset;
2101 reader_cb_t process;
2102};
2103
2104static int
2105reader__process_events(struct reader *rd, struct perf_session *session,
2106 struct ui_progress *prog)
2107{
2108 u64 data_size = rd->data_size;
2109 u64 head, page_offset, file_offset, file_pos, size;
2110 int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2111 size_t mmap_size;
2112 char *buf, *mmaps[NUM_MMAPS];
2113 union perf_event *event;
2114 s64 skip;
2115
2116 page_offset = page_size * (rd->data_offset / page_size);
2117 file_offset = page_offset;
2118 head = rd->data_offset - page_offset;
2119
2120 ui_progress__init_size(prog, data_size, "Processing events...");
2121
2122 data_size += rd->data_offset;
2123
2124 mmap_size = MMAP_SIZE;
2125 if (mmap_size > data_size) {
2126 mmap_size = data_size;
2127 session->one_mmap = true;
2128 }
2129
2130 memset(mmaps, 0, sizeof(mmaps));
2131
2132 mmap_prot = PROT_READ;
2133 mmap_flags = MAP_SHARED;
2134
2135 if (session->header.needs_swap) {
2136 mmap_prot |= PROT_WRITE;
2137 mmap_flags = MAP_PRIVATE;
2138 }
2139remap:
2140 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2141 file_offset);
2142 if (buf == MAP_FAILED) {
2143 pr_err("failed to mmap file\n");
2144 err = -errno;
2145 goto out;
2146 }
2147 mmaps[map_idx] = buf;
2148 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2149 file_pos = file_offset + head;
2150 if (session->one_mmap) {
2151 session->one_mmap_addr = buf;
2152 session->one_mmap_offset = file_offset;
2153 }
2154
2155more:
2156 event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
2157 if (IS_ERR(event))
2158 return PTR_ERR(event);
2159
2160 if (!event) {
2161 if (mmaps[map_idx]) {
2162 munmap(mmaps[map_idx], mmap_size);
2163 mmaps[map_idx] = NULL;
2164 }
2165
2166 page_offset = page_size * (head / page_size);
2167 file_offset += page_offset;
2168 head -= page_offset;
2169 goto remap;
2170 }
2171
2172 size = event->header.size;
2173
2174 skip = -EINVAL;
2175
2176 if (size < sizeof(struct perf_event_header) ||
2177 (skip = rd->process(session, event, file_pos)) < 0) {
2178 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2179 file_offset + head, event->header.size,
2180 event->header.type, strerror(-skip));
2181 err = skip;
2182 goto out;
2183 }
2184
2185 if (skip)
2186 size += skip;
2187
2188 head += size;
2189 file_pos += size;
2190
2191 err = __perf_session__process_decomp_events(session);
2192 if (err)
2193 goto out;
2194
2195 ui_progress__update(prog, size);
2196
2197 if (session_done())
2198 goto out;
2199
2200 if (file_pos < data_size)
2201 goto more;
2202
2203out:
2204 return err;
2205}
2206
2207static s64 process_simple(struct perf_session *session,
2208 union perf_event *event,
2209 u64 file_offset)
2210{
2211 return perf_session__process_event(session, event, file_offset);
2212}
2213
2214static int __perf_session__process_events(struct perf_session *session)
2215{
2216 struct reader rd = {
2217 .fd = perf_data__fd(session->data),
2218 .data_size = session->header.data_size,
2219 .data_offset = session->header.data_offset,
2220 .process = process_simple,
2221 };
2222 struct ordered_events *oe = &session->ordered_events;
2223 struct perf_tool *tool = session->tool;
2224 struct ui_progress prog;
2225 int err;
2226
2227 perf_tool__fill_defaults(tool);
2228
2229 if (rd.data_size == 0)
2230 return -1;
2231
2232 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2233
2234 err = reader__process_events(&rd, session, &prog);
2235 if (err)
2236 goto out_err;
2237
2238 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2239 if (err)
2240 goto out_err;
2241 err = auxtrace__flush_events(session, tool);
2242 if (err)
2243 goto out_err;
2244 err = perf_session__flush_thread_stacks(session);
2245out_err:
2246 ui_progress__finish();
2247 if (!tool->no_warn)
2248 perf_session__warn_about_errors(session);
2249
2250
2251
2252
2253 ordered_events__reinit(&session->ordered_events);
2254 auxtrace__free_events(session);
2255 session->one_mmap = false;
2256 return err;
2257}
2258
2259int perf_session__process_events(struct perf_session *session)
2260{
2261 if (perf_session__register_idle_thread(session) < 0)
2262 return -ENOMEM;
2263
2264 if (perf_data__is_pipe(session->data))
2265 return __perf_session__process_pipe_events(session);
2266
2267 return __perf_session__process_events(session);
2268}
2269
2270bool perf_session__has_traces(struct perf_session *session, const char *msg)
2271{
2272 struct evsel *evsel;
2273
2274 evlist__for_each_entry(session->evlist, evsel) {
2275 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2276 return true;
2277 }
2278
2279 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2280 return false;
2281}
2282
2283int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2284{
2285 char *bracket;
2286 struct ref_reloc_sym *ref;
2287 struct kmap *kmap;
2288
2289 ref = zalloc(sizeof(struct ref_reloc_sym));
2290 if (ref == NULL)
2291 return -ENOMEM;
2292
2293 ref->name = strdup(symbol_name);
2294 if (ref->name == NULL) {
2295 free(ref);
2296 return -ENOMEM;
2297 }
2298
2299 bracket = strchr(ref->name, ']');
2300 if (bracket)
2301 *bracket = '\0';
2302
2303 ref->addr = addr;
2304
2305 kmap = map__kmap(map);
2306 if (kmap)
2307 kmap->ref_reloc_sym = ref;
2308
2309 return 0;
2310}
2311
2312size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2313{
2314 return machines__fprintf_dsos(&session->machines, fp);
2315}
2316
2317size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2318 bool (skip)(struct dso *dso, int parm), int parm)
2319{
2320 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2321}
2322
2323size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2324{
2325 size_t ret;
2326 const char *msg = "";
2327
2328 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2329 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2330
2331 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2332
2333 ret += events_stats__fprintf(&session->evlist->stats, fp);
2334 return ret;
2335}
2336
2337size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2338{
2339
2340
2341
2342
2343 return machine__fprintf(&session->machines.host, fp);
2344}
2345
2346struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2347 unsigned int type)
2348{
2349 struct evsel *pos;
2350
2351 evlist__for_each_entry(session->evlist, pos) {
2352 if (pos->core.attr.type == type)
2353 return pos;
2354 }
2355 return NULL;
2356}
2357
2358int perf_session__cpu_bitmap(struct perf_session *session,
2359 const char *cpu_list, unsigned long *cpu_bitmap)
2360{
2361 int i, err = -1;
2362 struct perf_cpu_map *map;
2363 int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
2364
2365 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2366 struct evsel *evsel;
2367
2368 evsel = perf_session__find_first_evtype(session, i);
2369 if (!evsel)
2370 continue;
2371
2372 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2373 pr_err("File does not contain CPU events. "
2374 "Remove -C option to proceed.\n");
2375 return -1;
2376 }
2377 }
2378
2379 map = perf_cpu_map__new(cpu_list);
2380 if (map == NULL) {
2381 pr_err("Invalid cpu_list\n");
2382 return -1;
2383 }
2384
2385 for (i = 0; i < map->nr; i++) {
2386 int cpu = map->map[i];
2387
2388 if (cpu >= nr_cpus) {
2389 pr_err("Requested CPU %d too large. "
2390 "Consider raising MAX_NR_CPUS\n", cpu);
2391 goto out_delete_map;
2392 }
2393
2394 set_bit(cpu, cpu_bitmap);
2395 }
2396
2397 err = 0;
2398
2399out_delete_map:
2400 perf_cpu_map__put(map);
2401 return err;
2402}
2403
2404void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2405 bool full)
2406{
2407 if (session == NULL || fp == NULL)
2408 return;
2409
2410 fprintf(fp, "# ========\n");
2411 perf_header__fprintf_info(session, fp, full);
2412 fprintf(fp, "# ========\n#\n");
2413}
2414
2415int perf_event__process_id_index(struct perf_session *session,
2416 union perf_event *event)
2417{
2418 struct evlist *evlist = session->evlist;
2419 struct perf_record_id_index *ie = &event->id_index;
2420 size_t i, nr, max_nr;
2421
2422 max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2423 sizeof(struct id_index_entry);
2424 nr = ie->nr;
2425 if (nr > max_nr)
2426 return -EINVAL;
2427
2428 if (dump_trace)
2429 fprintf(stdout, " nr: %zu\n", nr);
2430
2431 for (i = 0; i < nr; i++) {
2432 struct id_index_entry *e = &ie->entries[i];
2433 struct perf_sample_id *sid;
2434
2435 if (dump_trace) {
2436 fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2437 fprintf(stdout, " idx: %"PRI_lu64, e->idx);
2438 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu);
2439 fprintf(stdout, " tid: %"PRI_ld64"\n", e->tid);
2440 }
2441
2442 sid = perf_evlist__id2sid(evlist, e->id);
2443 if (!sid)
2444 return -ENOENT;
2445 sid->idx = e->idx;
2446 sid->cpu = e->cpu;
2447 sid->tid = e->tid;
2448 }
2449 return 0;
2450}
2451