1
2#include <errno.h>
3#include <inttypes.h>
4#include <linux/err.h>
5#include <linux/kernel.h>
6#include <linux/zalloc.h>
7#include <api/fs/fs.h>
8
9#include <byteswap.h>
10#include <unistd.h>
11#include <sys/types.h>
12#include <sys/mman.h>
13#include <perf/cpumap.h>
14
15#include "map_symbol.h"
16#include "branch.h"
17#include "debug.h"
18#include "evlist.h"
19#include "evsel.h"
20#include "memswap.h"
21#include "map.h"
22#include "symbol.h"
23#include "session.h"
24#include "tool.h"
25#include "perf_regs.h"
26#include "asm/bug.h"
27#include "auxtrace.h"
28#include "thread.h"
29#include "thread-stack.h"
30#include "sample-raw.h"
31#include "stat.h"
32#include "tsc.h"
33#include "ui/progress.h"
34#include "../perf.h"
35#include "arch/common.h"
36#include "units.h"
37#include <internal/lib.h>
38
39#ifdef HAVE_ZSTD_SUPPORT
40static int perf_session__process_compressed_event(struct perf_session *session,
41 union perf_event *event, u64 file_offset)
42{
43 void *src;
44 size_t decomp_size, src_size;
45 u64 decomp_last_rem = 0;
46 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
47 struct decomp *decomp, *decomp_last = session->decomp_last;
48
49 if (decomp_last) {
50 decomp_last_rem = decomp_last->size - decomp_last->head;
51 decomp_len += decomp_last_rem;
52 }
53
54 mmap_len = sizeof(struct decomp) + decomp_len;
55 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
56 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
57 if (decomp == MAP_FAILED) {
58 pr_err("Couldn't allocate memory for decompression\n");
59 return -1;
60 }
61
62 decomp->file_pos = file_offset;
63 decomp->mmap_len = mmap_len;
64 decomp->head = 0;
65
66 if (decomp_last_rem) {
67 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
68 decomp->size = decomp_last_rem;
69 }
70
71 src = (void *)event + sizeof(struct perf_record_compressed);
72 src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
73
74 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
75 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
76 if (!decomp_size) {
77 munmap(decomp, mmap_len);
78 pr_err("Couldn't decompress data\n");
79 return -1;
80 }
81
82 decomp->size += decomp_size;
83
84 if (session->decomp == NULL) {
85 session->decomp = decomp;
86 session->decomp_last = decomp;
87 } else {
88 session->decomp_last->next = decomp;
89 session->decomp_last = decomp;
90 }
91
92 pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
93
94 return 0;
95}
96#else
97#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
98#endif
99
100static int perf_session__deliver_event(struct perf_session *session,
101 union perf_event *event,
102 struct perf_tool *tool,
103 u64 file_offset);
104
105static int perf_session__open(struct perf_session *session, int repipe_fd)
106{
107 struct perf_data *data = session->data;
108
109 if (perf_session__read_header(session, repipe_fd) < 0) {
110 pr_err("incompatible file format (rerun with -v to learn more)\n");
111 return -1;
112 }
113
114 if (perf_data__is_pipe(data))
115 return 0;
116
117 if (perf_header__has_feat(&session->header, HEADER_STAT))
118 return 0;
119
120 if (!evlist__valid_sample_type(session->evlist)) {
121 pr_err("non matching sample_type\n");
122 return -1;
123 }
124
125 if (!evlist__valid_sample_id_all(session->evlist)) {
126 pr_err("non matching sample_id_all\n");
127 return -1;
128 }
129
130 if (!evlist__valid_read_format(session->evlist)) {
131 pr_err("non matching read_format\n");
132 return -1;
133 }
134
135 return 0;
136}
137
138void perf_session__set_id_hdr_size(struct perf_session *session)
139{
140 u16 id_hdr_size = evlist__id_hdr_size(session->evlist);
141
142 machines__set_id_hdr_size(&session->machines, id_hdr_size);
143}
144
145int perf_session__create_kernel_maps(struct perf_session *session)
146{
147 int ret = machine__create_kernel_maps(&session->machines.host);
148
149 if (ret >= 0)
150 ret = machines__create_guest_kernel_maps(&session->machines);
151 return ret;
152}
153
154static void perf_session__destroy_kernel_maps(struct perf_session *session)
155{
156 machines__destroy_kernel_maps(&session->machines);
157}
158
159static bool perf_session__has_comm_exec(struct perf_session *session)
160{
161 struct evsel *evsel;
162
163 evlist__for_each_entry(session->evlist, evsel) {
164 if (evsel->core.attr.comm_exec)
165 return true;
166 }
167
168 return false;
169}
170
171static void perf_session__set_comm_exec(struct perf_session *session)
172{
173 bool comm_exec = perf_session__has_comm_exec(session);
174
175 machines__set_comm_exec(&session->machines, comm_exec);
176}
177
178static int ordered_events__deliver_event(struct ordered_events *oe,
179 struct ordered_event *event)
180{
181 struct perf_session *session = container_of(oe, struct perf_session,
182 ordered_events);
183
184 return perf_session__deliver_event(session, event->event,
185 session->tool, event->file_offset);
186}
187
188struct perf_session *__perf_session__new(struct perf_data *data,
189 bool repipe, int repipe_fd,
190 struct perf_tool *tool)
191{
192 int ret = -ENOMEM;
193 struct perf_session *session = zalloc(sizeof(*session));
194
195 if (!session)
196 goto out;
197
198 session->repipe = repipe;
199 session->tool = tool;
200 INIT_LIST_HEAD(&session->auxtrace_index);
201 machines__init(&session->machines);
202 ordered_events__init(&session->ordered_events,
203 ordered_events__deliver_event, NULL);
204
205 perf_env__init(&session->header.env);
206 if (data) {
207 ret = perf_data__open(data);
208 if (ret < 0)
209 goto out_delete;
210
211 session->data = data;
212
213 if (perf_data__is_read(data)) {
214 ret = perf_session__open(session, repipe_fd);
215 if (ret < 0)
216 goto out_delete;
217
218
219
220
221
222 if (!data->is_pipe) {
223 perf_session__set_id_hdr_size(session);
224 perf_session__set_comm_exec(session);
225 }
226
227 evlist__init_trace_event_sample_raw(session->evlist);
228
229
230 if (data->is_dir) {
231 ret = perf_data__open_dir(data);
232 if (ret)
233 goto out_delete;
234 }
235
236 if (!symbol_conf.kallsyms_name &&
237 !symbol_conf.vmlinux_name)
238 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
239 }
240 } else {
241 session->machines.host.env = &perf_env;
242 }
243
244 session->machines.host.single_address_space =
245 perf_env__single_address_space(session->machines.host.env);
246
247 if (!data || perf_data__is_write(data)) {
248
249
250
251
252 if (perf_session__create_kernel_maps(session) < 0)
253 pr_warning("Cannot read kernel map\n");
254 }
255
256
257
258
259
260 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
261 tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
262 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
263 tool->ordered_events = false;
264 }
265
266 return session;
267
268 out_delete:
269 perf_session__delete(session);
270 out:
271 return ERR_PTR(ret);
272}
273
274static void perf_session__delete_threads(struct perf_session *session)
275{
276 machine__delete_threads(&session->machines.host);
277}
278
279static void perf_session__release_decomp_events(struct perf_session *session)
280{
281 struct decomp *next, *decomp;
282 size_t mmap_len;
283 next = session->decomp;
284 do {
285 decomp = next;
286 if (decomp == NULL)
287 break;
288 next = decomp->next;
289 mmap_len = decomp->mmap_len;
290 munmap(decomp, mmap_len);
291 } while (1);
292}
293
294void perf_session__delete(struct perf_session *session)
295{
296 if (session == NULL)
297 return;
298 auxtrace__free(session);
299 auxtrace_index__free(&session->auxtrace_index);
300 perf_session__destroy_kernel_maps(session);
301 perf_session__delete_threads(session);
302 perf_session__release_decomp_events(session);
303 perf_env__exit(&session->header.env);
304 machines__exit(&session->machines);
305 if (session->data) {
306 if (perf_data__is_read(session->data))
307 evlist__delete(session->evlist);
308 perf_data__close(session->data);
309 }
310 trace_event__cleanup(&session->tevent);
311 free(session);
312}
313
314static int process_event_synth_tracing_data_stub(struct perf_session *session
315 __maybe_unused,
316 union perf_event *event
317 __maybe_unused)
318{
319 dump_printf(": unhandled!\n");
320 return 0;
321}
322
323static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
324 union perf_event *event __maybe_unused,
325 struct evlist **pevlist
326 __maybe_unused)
327{
328 dump_printf(": unhandled!\n");
329 return 0;
330}
331
332static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
333 union perf_event *event __maybe_unused,
334 struct evlist **pevlist
335 __maybe_unused)
336{
337 if (dump_trace)
338 perf_event__fprintf_event_update(event, stdout);
339
340 dump_printf(": unhandled!\n");
341 return 0;
342}
343
344static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
345 union perf_event *event __maybe_unused,
346 struct perf_sample *sample __maybe_unused,
347 struct evsel *evsel __maybe_unused,
348 struct machine *machine __maybe_unused)
349{
350 dump_printf(": unhandled!\n");
351 return 0;
352}
353
354static int process_event_stub(struct perf_tool *tool __maybe_unused,
355 union perf_event *event __maybe_unused,
356 struct perf_sample *sample __maybe_unused,
357 struct machine *machine __maybe_unused)
358{
359 dump_printf(": unhandled!\n");
360 return 0;
361}
362
363static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
364 union perf_event *event __maybe_unused,
365 struct ordered_events *oe __maybe_unused)
366{
367 dump_printf(": unhandled!\n");
368 return 0;
369}
370
371static int process_finished_round(struct perf_tool *tool,
372 union perf_event *event,
373 struct ordered_events *oe);
374
375static int skipn(int fd, off_t n)
376{
377 char buf[4096];
378 ssize_t ret;
379
380 while (n > 0) {
381 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
382 if (ret <= 0)
383 return ret;
384 n -= ret;
385 }
386
387 return 0;
388}
389
390static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
391 union perf_event *event)
392{
393 dump_printf(": unhandled!\n");
394 if (perf_data__is_pipe(session->data))
395 skipn(perf_data__fd(session->data), event->auxtrace.size);
396 return event->auxtrace.size;
397}
398
399static int process_event_op2_stub(struct perf_session *session __maybe_unused,
400 union perf_event *event __maybe_unused)
401{
402 dump_printf(": unhandled!\n");
403 return 0;
404}
405
406
407static
408int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
409 union perf_event *event __maybe_unused)
410{
411 if (dump_trace)
412 perf_event__fprintf_thread_map(event, stdout);
413
414 dump_printf(": unhandled!\n");
415 return 0;
416}
417
418static
419int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
420 union perf_event *event __maybe_unused)
421{
422 if (dump_trace)
423 perf_event__fprintf_cpu_map(event, stdout);
424
425 dump_printf(": unhandled!\n");
426 return 0;
427}
428
429static
430int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
431 union perf_event *event __maybe_unused)
432{
433 if (dump_trace)
434 perf_event__fprintf_stat_config(event, stdout);
435
436 dump_printf(": unhandled!\n");
437 return 0;
438}
439
440static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
441 union perf_event *event)
442{
443 if (dump_trace)
444 perf_event__fprintf_stat(event, stdout);
445
446 dump_printf(": unhandled!\n");
447 return 0;
448}
449
450static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
451 union perf_event *event)
452{
453 if (dump_trace)
454 perf_event__fprintf_stat_round(event, stdout);
455
456 dump_printf(": unhandled!\n");
457 return 0;
458}
459
460static int process_event_time_conv_stub(struct perf_session *perf_session __maybe_unused,
461 union perf_event *event)
462{
463 if (dump_trace)
464 perf_event__fprintf_time_conv(event, stdout);
465
466 dump_printf(": unhandled!\n");
467 return 0;
468}
469
470static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
471 union perf_event *event __maybe_unused,
472 u64 file_offset __maybe_unused)
473{
474 dump_printf(": unhandled!\n");
475 return 0;
476}
477
478void perf_tool__fill_defaults(struct perf_tool *tool)
479{
480 if (tool->sample == NULL)
481 tool->sample = process_event_sample_stub;
482 if (tool->mmap == NULL)
483 tool->mmap = process_event_stub;
484 if (tool->mmap2 == NULL)
485 tool->mmap2 = process_event_stub;
486 if (tool->comm == NULL)
487 tool->comm = process_event_stub;
488 if (tool->namespaces == NULL)
489 tool->namespaces = process_event_stub;
490 if (tool->cgroup == NULL)
491 tool->cgroup = process_event_stub;
492 if (tool->fork == NULL)
493 tool->fork = process_event_stub;
494 if (tool->exit == NULL)
495 tool->exit = process_event_stub;
496 if (tool->lost == NULL)
497 tool->lost = perf_event__process_lost;
498 if (tool->lost_samples == NULL)
499 tool->lost_samples = perf_event__process_lost_samples;
500 if (tool->aux == NULL)
501 tool->aux = perf_event__process_aux;
502 if (tool->itrace_start == NULL)
503 tool->itrace_start = perf_event__process_itrace_start;
504 if (tool->context_switch == NULL)
505 tool->context_switch = perf_event__process_switch;
506 if (tool->ksymbol == NULL)
507 tool->ksymbol = perf_event__process_ksymbol;
508 if (tool->bpf == NULL)
509 tool->bpf = perf_event__process_bpf;
510 if (tool->text_poke == NULL)
511 tool->text_poke = perf_event__process_text_poke;
512 if (tool->read == NULL)
513 tool->read = process_event_sample_stub;
514 if (tool->throttle == NULL)
515 tool->throttle = process_event_stub;
516 if (tool->unthrottle == NULL)
517 tool->unthrottle = process_event_stub;
518 if (tool->attr == NULL)
519 tool->attr = process_event_synth_attr_stub;
520 if (tool->event_update == NULL)
521 tool->event_update = process_event_synth_event_update_stub;
522 if (tool->tracing_data == NULL)
523 tool->tracing_data = process_event_synth_tracing_data_stub;
524 if (tool->build_id == NULL)
525 tool->build_id = process_event_op2_stub;
526 if (tool->finished_round == NULL) {
527 if (tool->ordered_events)
528 tool->finished_round = process_finished_round;
529 else
530 tool->finished_round = process_finished_round_stub;
531 }
532 if (tool->id_index == NULL)
533 tool->id_index = process_event_op2_stub;
534 if (tool->auxtrace_info == NULL)
535 tool->auxtrace_info = process_event_op2_stub;
536 if (tool->auxtrace == NULL)
537 tool->auxtrace = process_event_auxtrace_stub;
538 if (tool->auxtrace_error == NULL)
539 tool->auxtrace_error = process_event_op2_stub;
540 if (tool->thread_map == NULL)
541 tool->thread_map = process_event_thread_map_stub;
542 if (tool->cpu_map == NULL)
543 tool->cpu_map = process_event_cpu_map_stub;
544 if (tool->stat_config == NULL)
545 tool->stat_config = process_event_stat_config_stub;
546 if (tool->stat == NULL)
547 tool->stat = process_stat_stub;
548 if (tool->stat_round == NULL)
549 tool->stat_round = process_stat_round_stub;
550 if (tool->time_conv == NULL)
551 tool->time_conv = process_event_time_conv_stub;
552 if (tool->feature == NULL)
553 tool->feature = process_event_op2_stub;
554 if (tool->compressed == NULL)
555 tool->compressed = perf_session__process_compressed_event;
556}
557
558static void swap_sample_id_all(union perf_event *event, void *data)
559{
560 void *end = (void *) event + event->header.size;
561 int size = end - data;
562
563 BUG_ON(size % sizeof(u64));
564 mem_bswap_64(data, size);
565}
566
567static void perf_event__all64_swap(union perf_event *event,
568 bool sample_id_all __maybe_unused)
569{
570 struct perf_event_header *hdr = &event->header;
571 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
572}
573
574static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
575{
576 event->comm.pid = bswap_32(event->comm.pid);
577 event->comm.tid = bswap_32(event->comm.tid);
578
579 if (sample_id_all) {
580 void *data = &event->comm.comm;
581
582 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
583 swap_sample_id_all(event, data);
584 }
585}
586
587static void perf_event__mmap_swap(union perf_event *event,
588 bool sample_id_all)
589{
590 event->mmap.pid = bswap_32(event->mmap.pid);
591 event->mmap.tid = bswap_32(event->mmap.tid);
592 event->mmap.start = bswap_64(event->mmap.start);
593 event->mmap.len = bswap_64(event->mmap.len);
594 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
595
596 if (sample_id_all) {
597 void *data = &event->mmap.filename;
598
599 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
600 swap_sample_id_all(event, data);
601 }
602}
603
604static void perf_event__mmap2_swap(union perf_event *event,
605 bool sample_id_all)
606{
607 event->mmap2.pid = bswap_32(event->mmap2.pid);
608 event->mmap2.tid = bswap_32(event->mmap2.tid);
609 event->mmap2.start = bswap_64(event->mmap2.start);
610 event->mmap2.len = bswap_64(event->mmap2.len);
611 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
612
613 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
614 event->mmap2.maj = bswap_32(event->mmap2.maj);
615 event->mmap2.min = bswap_32(event->mmap2.min);
616 event->mmap2.ino = bswap_64(event->mmap2.ino);
617 event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
618 }
619
620 if (sample_id_all) {
621 void *data = &event->mmap2.filename;
622
623 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
624 swap_sample_id_all(event, data);
625 }
626}
627static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
628{
629 event->fork.pid = bswap_32(event->fork.pid);
630 event->fork.tid = bswap_32(event->fork.tid);
631 event->fork.ppid = bswap_32(event->fork.ppid);
632 event->fork.ptid = bswap_32(event->fork.ptid);
633 event->fork.time = bswap_64(event->fork.time);
634
635 if (sample_id_all)
636 swap_sample_id_all(event, &event->fork + 1);
637}
638
639static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
640{
641 event->read.pid = bswap_32(event->read.pid);
642 event->read.tid = bswap_32(event->read.tid);
643 event->read.value = bswap_64(event->read.value);
644 event->read.time_enabled = bswap_64(event->read.time_enabled);
645 event->read.time_running = bswap_64(event->read.time_running);
646 event->read.id = bswap_64(event->read.id);
647
648 if (sample_id_all)
649 swap_sample_id_all(event, &event->read + 1);
650}
651
652static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
653{
654 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
655 event->aux.aux_size = bswap_64(event->aux.aux_size);
656 event->aux.flags = bswap_64(event->aux.flags);
657
658 if (sample_id_all)
659 swap_sample_id_all(event, &event->aux + 1);
660}
661
662static void perf_event__itrace_start_swap(union perf_event *event,
663 bool sample_id_all)
664{
665 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
666 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
667
668 if (sample_id_all)
669 swap_sample_id_all(event, &event->itrace_start + 1);
670}
671
672static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
673{
674 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
675 event->context_switch.next_prev_pid =
676 bswap_32(event->context_switch.next_prev_pid);
677 event->context_switch.next_prev_tid =
678 bswap_32(event->context_switch.next_prev_tid);
679 }
680
681 if (sample_id_all)
682 swap_sample_id_all(event, &event->context_switch + 1);
683}
684
685static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
686{
687 event->text_poke.addr = bswap_64(event->text_poke.addr);
688 event->text_poke.old_len = bswap_16(event->text_poke.old_len);
689 event->text_poke.new_len = bswap_16(event->text_poke.new_len);
690
691 if (sample_id_all) {
692 size_t len = sizeof(event->text_poke.old_len) +
693 sizeof(event->text_poke.new_len) +
694 event->text_poke.old_len +
695 event->text_poke.new_len;
696 void *data = &event->text_poke.old_len;
697
698 data += PERF_ALIGN(len, sizeof(u64));
699 swap_sample_id_all(event, data);
700 }
701}
702
703static void perf_event__throttle_swap(union perf_event *event,
704 bool sample_id_all)
705{
706 event->throttle.time = bswap_64(event->throttle.time);
707 event->throttle.id = bswap_64(event->throttle.id);
708 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
709
710 if (sample_id_all)
711 swap_sample_id_all(event, &event->throttle + 1);
712}
713
714static void perf_event__namespaces_swap(union perf_event *event,
715 bool sample_id_all)
716{
717 u64 i;
718
719 event->namespaces.pid = bswap_32(event->namespaces.pid);
720 event->namespaces.tid = bswap_32(event->namespaces.tid);
721 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
722
723 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
724 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
725
726 ns->dev = bswap_64(ns->dev);
727 ns->ino = bswap_64(ns->ino);
728 }
729
730 if (sample_id_all)
731 swap_sample_id_all(event, &event->namespaces.link_info[i]);
732}
733
734static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
735{
736 event->cgroup.id = bswap_64(event->cgroup.id);
737
738 if (sample_id_all) {
739 void *data = &event->cgroup.path;
740
741 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
742 swap_sample_id_all(event, data);
743 }
744}
745
746static u8 revbyte(u8 b)
747{
748 int rev = (b >> 4) | ((b & 0xf) << 4);
749 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
750 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
751 return (u8) rev;
752}
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768static void swap_bitfield(u8 *p, unsigned len)
769{
770 unsigned i;
771
772 for (i = 0; i < len; i++) {
773 *p = revbyte(*p);
774 p++;
775 }
776}
777
778
779void perf_event__attr_swap(struct perf_event_attr *attr)
780{
781 attr->type = bswap_32(attr->type);
782 attr->size = bswap_32(attr->size);
783
784#define bswap_safe(f, n) \
785 (attr->size > (offsetof(struct perf_event_attr, f) + \
786 sizeof(attr->f) * (n)))
787#define bswap_field(f, sz) \
788do { \
789 if (bswap_safe(f, 0)) \
790 attr->f = bswap_##sz(attr->f); \
791} while(0)
792#define bswap_field_16(f) bswap_field(f, 16)
793#define bswap_field_32(f) bswap_field(f, 32)
794#define bswap_field_64(f) bswap_field(f, 64)
795
796 bswap_field_64(config);
797 bswap_field_64(sample_period);
798 bswap_field_64(sample_type);
799 bswap_field_64(read_format);
800 bswap_field_32(wakeup_events);
801 bswap_field_32(bp_type);
802 bswap_field_64(bp_addr);
803 bswap_field_64(bp_len);
804 bswap_field_64(branch_sample_type);
805 bswap_field_64(sample_regs_user);
806 bswap_field_32(sample_stack_user);
807 bswap_field_32(aux_watermark);
808 bswap_field_16(sample_max_stack);
809 bswap_field_32(aux_sample_size);
810
811
812
813
814
815 if (bswap_safe(read_format, 1))
816 swap_bitfield((u8 *) (&attr->read_format + 1),
817 sizeof(u64));
818#undef bswap_field_64
819#undef bswap_field_32
820#undef bswap_field
821#undef bswap_safe
822}
823
824static void perf_event__hdr_attr_swap(union perf_event *event,
825 bool sample_id_all __maybe_unused)
826{
827 size_t size;
828
829 perf_event__attr_swap(&event->attr.attr);
830
831 size = event->header.size;
832 size -= (void *)&event->attr.id - (void *)event;
833 mem_bswap_64(event->attr.id, size);
834}
835
836static void perf_event__event_update_swap(union perf_event *event,
837 bool sample_id_all __maybe_unused)
838{
839 event->event_update.type = bswap_64(event->event_update.type);
840 event->event_update.id = bswap_64(event->event_update.id);
841}
842
843static void perf_event__event_type_swap(union perf_event *event,
844 bool sample_id_all __maybe_unused)
845{
846 event->event_type.event_type.event_id =
847 bswap_64(event->event_type.event_type.event_id);
848}
849
850static void perf_event__tracing_data_swap(union perf_event *event,
851 bool sample_id_all __maybe_unused)
852{
853 event->tracing_data.size = bswap_32(event->tracing_data.size);
854}
855
856static void perf_event__auxtrace_info_swap(union perf_event *event,
857 bool sample_id_all __maybe_unused)
858{
859 size_t size;
860
861 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
862
863 size = event->header.size;
864 size -= (void *)&event->auxtrace_info.priv - (void *)event;
865 mem_bswap_64(event->auxtrace_info.priv, size);
866}
867
868static void perf_event__auxtrace_swap(union perf_event *event,
869 bool sample_id_all __maybe_unused)
870{
871 event->auxtrace.size = bswap_64(event->auxtrace.size);
872 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
873 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
874 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
875 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
876 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
877}
878
879static void perf_event__auxtrace_error_swap(union perf_event *event,
880 bool sample_id_all __maybe_unused)
881{
882 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
883 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
884 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
885 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
886 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
887 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
888 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
889 if (event->auxtrace_error.fmt)
890 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
891}
892
893static void perf_event__thread_map_swap(union perf_event *event,
894 bool sample_id_all __maybe_unused)
895{
896 unsigned i;
897
898 event->thread_map.nr = bswap_64(event->thread_map.nr);
899
900 for (i = 0; i < event->thread_map.nr; i++)
901 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
902}
903
904static void perf_event__cpu_map_swap(union perf_event *event,
905 bool sample_id_all __maybe_unused)
906{
907 struct perf_record_cpu_map_data *data = &event->cpu_map.data;
908 struct cpu_map_entries *cpus;
909 struct perf_record_record_cpu_map *mask;
910 unsigned i;
911
912 data->type = bswap_16(data->type);
913
914 switch (data->type) {
915 case PERF_CPU_MAP__CPUS:
916 cpus = (struct cpu_map_entries *)data->data;
917
918 cpus->nr = bswap_16(cpus->nr);
919
920 for (i = 0; i < cpus->nr; i++)
921 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
922 break;
923 case PERF_CPU_MAP__MASK:
924 mask = (struct perf_record_record_cpu_map *)data->data;
925
926 mask->nr = bswap_16(mask->nr);
927 mask->long_size = bswap_16(mask->long_size);
928
929 switch (mask->long_size) {
930 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
931 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
932 default:
933 pr_err("cpu_map swap: unsupported long size\n");
934 }
935 default:
936 break;
937 }
938}
939
940static void perf_event__stat_config_swap(union perf_event *event,
941 bool sample_id_all __maybe_unused)
942{
943 u64 size;
944
945 size = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
946 size += 1;
947 mem_bswap_64(&event->stat_config.nr, size);
948}
949
950static void perf_event__stat_swap(union perf_event *event,
951 bool sample_id_all __maybe_unused)
952{
953 event->stat.id = bswap_64(event->stat.id);
954 event->stat.thread = bswap_32(event->stat.thread);
955 event->stat.cpu = bswap_32(event->stat.cpu);
956 event->stat.val = bswap_64(event->stat.val);
957 event->stat.ena = bswap_64(event->stat.ena);
958 event->stat.run = bswap_64(event->stat.run);
959}
960
961static void perf_event__stat_round_swap(union perf_event *event,
962 bool sample_id_all __maybe_unused)
963{
964 event->stat_round.type = bswap_64(event->stat_round.type);
965 event->stat_round.time = bswap_64(event->stat_round.time);
966}
967
968static void perf_event__time_conv_swap(union perf_event *event,
969 bool sample_id_all __maybe_unused)
970{
971 event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
972 event->time_conv.time_mult = bswap_64(event->time_conv.time_mult);
973 event->time_conv.time_zero = bswap_64(event->time_conv.time_zero);
974
975 if (event_contains(event->time_conv, time_cycles)) {
976 event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
977 event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
978 }
979}
980
981typedef void (*perf_event__swap_op)(union perf_event *event,
982 bool sample_id_all);
983
984static perf_event__swap_op perf_event__swap_ops[] = {
985 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
986 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
987 [PERF_RECORD_COMM] = perf_event__comm_swap,
988 [PERF_RECORD_FORK] = perf_event__task_swap,
989 [PERF_RECORD_EXIT] = perf_event__task_swap,
990 [PERF_RECORD_LOST] = perf_event__all64_swap,
991 [PERF_RECORD_READ] = perf_event__read_swap,
992 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
993 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
994 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
995 [PERF_RECORD_AUX] = perf_event__aux_swap,
996 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
997 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
998 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
999 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
1000 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
1001 [PERF_RECORD_CGROUP] = perf_event__cgroup_swap,
1002 [PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap,
1003 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
1004 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
1005 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
1006 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
1007 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
1008 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
1009 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
1010 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
1011 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
1012 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
1013 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
1014 [PERF_RECORD_STAT] = perf_event__stat_swap,
1015 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
1016 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
1017 [PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap,
1018 [PERF_RECORD_HEADER_MAX] = NULL,
1019};
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060static int process_finished_round(struct perf_tool *tool __maybe_unused,
1061 union perf_event *event __maybe_unused,
1062 struct ordered_events *oe)
1063{
1064 if (dump_trace)
1065 fprintf(stdout, "\n");
1066 return ordered_events__flush(oe, OE_FLUSH__ROUND);
1067}
1068
1069int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1070 u64 timestamp, u64 file_offset)
1071{
1072 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
1073}
1074
1075static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1076{
1077 struct ip_callchain *callchain = sample->callchain;
1078 struct branch_stack *lbr_stack = sample->branch_stack;
1079 struct branch_entry *entries = perf_sample__branch_entries(sample);
1080 u64 kernel_callchain_nr = callchain->nr;
1081 unsigned int i;
1082
1083 for (i = 0; i < kernel_callchain_nr; i++) {
1084 if (callchain->ips[i] == PERF_CONTEXT_USER)
1085 break;
1086 }
1087
1088 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1089 u64 total_nr;
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 total_nr = i + 1 + lbr_stack->nr + 1;
1107 kernel_callchain_nr = i + 1;
1108
1109 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1110
1111 for (i = 0; i < kernel_callchain_nr; i++)
1112 printf("..... %2d: %016" PRIx64 "\n",
1113 i, callchain->ips[i]);
1114
1115 printf("..... %2d: %016" PRIx64 "\n",
1116 (int)(kernel_callchain_nr), entries[0].to);
1117 for (i = 0; i < lbr_stack->nr; i++)
1118 printf("..... %2d: %016" PRIx64 "\n",
1119 (int)(i + kernel_callchain_nr + 1), entries[i].from);
1120 }
1121}
1122
1123static void callchain__printf(struct evsel *evsel,
1124 struct perf_sample *sample)
1125{
1126 unsigned int i;
1127 struct ip_callchain *callchain = sample->callchain;
1128
1129 if (evsel__has_branch_callstack(evsel))
1130 callchain__lbr_callstack_printf(sample);
1131
1132 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1133
1134 for (i = 0; i < callchain->nr; i++)
1135 printf("..... %2d: %016" PRIx64 "\n",
1136 i, callchain->ips[i]);
1137}
1138
1139static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1140{
1141 struct branch_entry *entries = perf_sample__branch_entries(sample);
1142 uint64_t i;
1143
1144 printf("%s: nr:%" PRIu64 "\n",
1145 !callstack ? "... branch stack" : "... branch callstack",
1146 sample->branch_stack->nr);
1147
1148 for (i = 0; i < sample->branch_stack->nr; i++) {
1149 struct branch_entry *e = &entries[i];
1150
1151 if (!callstack) {
1152 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1153 i, e->from, e->to,
1154 (unsigned short)e->flags.cycles,
1155 e->flags.mispred ? "M" : " ",
1156 e->flags.predicted ? "P" : " ",
1157 e->flags.abort ? "A" : " ",
1158 e->flags.in_tx ? "T" : " ",
1159 (unsigned)e->flags.reserved);
1160 } else {
1161 printf("..... %2"PRIu64": %016" PRIx64 "\n",
1162 i, i > 0 ? e->from : e->to);
1163 }
1164 }
1165}
1166
1167static void regs_dump__printf(u64 mask, u64 *regs)
1168{
1169 unsigned rid, i = 0;
1170
1171 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1172 u64 val = regs[i++];
1173
1174 printf(".... %-5s 0x%016" PRIx64 "\n",
1175 perf_reg_name(rid), val);
1176 }
1177}
1178
1179static const char *regs_abi[] = {
1180 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1181 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1182 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1183};
1184
1185static inline const char *regs_dump_abi(struct regs_dump *d)
1186{
1187 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1188 return "unknown";
1189
1190 return regs_abi[d->abi];
1191}
1192
1193static void regs__printf(const char *type, struct regs_dump *regs)
1194{
1195 u64 mask = regs->mask;
1196
1197 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1198 type,
1199 mask,
1200 regs_dump_abi(regs));
1201
1202 regs_dump__printf(mask, regs->regs);
1203}
1204
1205static void regs_user__printf(struct perf_sample *sample)
1206{
1207 struct regs_dump *user_regs = &sample->user_regs;
1208
1209 if (user_regs->regs)
1210 regs__printf("user", user_regs);
1211}
1212
1213static void regs_intr__printf(struct perf_sample *sample)
1214{
1215 struct regs_dump *intr_regs = &sample->intr_regs;
1216
1217 if (intr_regs->regs)
1218 regs__printf("intr", intr_regs);
1219}
1220
1221static void stack_user__printf(struct stack_dump *dump)
1222{
1223 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1224 dump->size, dump->offset);
1225}
1226
1227static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1228{
1229 u64 sample_type = __evlist__combined_sample_type(evlist);
1230
1231 if (event->header.type != PERF_RECORD_SAMPLE &&
1232 !evlist__sample_id_all(evlist)) {
1233 fputs("-1 -1 ", stdout);
1234 return;
1235 }
1236
1237 if ((sample_type & PERF_SAMPLE_CPU))
1238 printf("%u ", sample->cpu);
1239
1240 if (sample_type & PERF_SAMPLE_TIME)
1241 printf("%" PRIu64 " ", sample->time);
1242}
1243
1244static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1245{
1246 printf("... sample_read:\n");
1247
1248 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1249 printf("...... time enabled %016" PRIx64 "\n",
1250 sample->read.time_enabled);
1251
1252 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1253 printf("...... time running %016" PRIx64 "\n",
1254 sample->read.time_running);
1255
1256 if (read_format & PERF_FORMAT_GROUP) {
1257 u64 i;
1258
1259 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1260
1261 for (i = 0; i < sample->read.group.nr; i++) {
1262 struct sample_read_value *value;
1263
1264 value = &sample->read.group.values[i];
1265 printf("..... id %016" PRIx64
1266 ", value %016" PRIx64 "\n",
1267 value->id, value->value);
1268 }
1269 } else
1270 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1271 sample->read.one.id, sample->read.one.value);
1272}
1273
1274static void dump_event(struct evlist *evlist, union perf_event *event,
1275 u64 file_offset, struct perf_sample *sample)
1276{
1277 if (!dump_trace)
1278 return;
1279
1280 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1281 file_offset, event->header.size, event->header.type);
1282
1283 trace_event(event);
1284 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1285 evlist->trace_event_sample_raw(evlist, event, sample);
1286
1287 if (sample)
1288 evlist__print_tstamp(evlist, event, sample);
1289
1290 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1291 event->header.size, perf_event__name(event->header.type));
1292}
1293
1294char *get_page_size_name(u64 size, char *str)
1295{
1296 if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size))
1297 snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A");
1298
1299 return str;
1300}
1301
1302static void dump_sample(struct evsel *evsel, union perf_event *event,
1303 struct perf_sample *sample)
1304{
1305 u64 sample_type;
1306 char str[PAGE_SIZE_NAME_LEN];
1307
1308 if (!dump_trace)
1309 return;
1310
1311 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1312 event->header.misc, sample->pid, sample->tid, sample->ip,
1313 sample->period, sample->addr);
1314
1315 sample_type = evsel->core.attr.sample_type;
1316
1317 if (evsel__has_callchain(evsel))
1318 callchain__printf(evsel, sample);
1319
1320 if (evsel__has_br_stack(evsel))
1321 branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1322
1323 if (sample_type & PERF_SAMPLE_REGS_USER)
1324 regs_user__printf(sample);
1325
1326 if (sample_type & PERF_SAMPLE_REGS_INTR)
1327 regs_intr__printf(sample);
1328
1329 if (sample_type & PERF_SAMPLE_STACK_USER)
1330 stack_user__printf(&sample->user_stack);
1331
1332 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1333 printf("... weight: %" PRIu64 "", sample->weight);
1334 if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
1335 printf(",0x%"PRIx16"", sample->ins_lat);
1336 printf(",0x%"PRIx16"", sample->p_stage_cyc);
1337 }
1338 printf("\n");
1339 }
1340
1341 if (sample_type & PERF_SAMPLE_DATA_SRC)
1342 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1343
1344 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1345 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1346
1347 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1348 printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str));
1349
1350 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
1351 printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str));
1352
1353 if (sample_type & PERF_SAMPLE_TRANSACTION)
1354 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1355
1356 if (sample_type & PERF_SAMPLE_READ)
1357 sample_read__printf(sample, evsel->core.attr.read_format);
1358}
1359
1360static void dump_read(struct evsel *evsel, union perf_event *event)
1361{
1362 struct perf_record_read *read_event = &event->read;
1363 u64 read_format;
1364
1365 if (!dump_trace)
1366 return;
1367
1368 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1369 evsel__name(evsel), event->read.value);
1370
1371 if (!evsel)
1372 return;
1373
1374 read_format = evsel->core.attr.read_format;
1375
1376 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1377 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1378
1379 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1380 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1381
1382 if (read_format & PERF_FORMAT_ID)
1383 printf("... id : %" PRI_lu64 "\n", read_event->id);
1384}
1385
1386static struct machine *machines__find_for_cpumode(struct machines *machines,
1387 union perf_event *event,
1388 struct perf_sample *sample)
1389{
1390 if (perf_guest &&
1391 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1392 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1393 u32 pid;
1394
1395 if (event->header.type == PERF_RECORD_MMAP
1396 || event->header.type == PERF_RECORD_MMAP2)
1397 pid = event->mmap.pid;
1398 else
1399 pid = sample->pid;
1400
1401 return machines__find_guest(machines, pid);
1402 }
1403
1404 return &machines->host;
1405}
1406
1407static int deliver_sample_value(struct evlist *evlist,
1408 struct perf_tool *tool,
1409 union perf_event *event,
1410 struct perf_sample *sample,
1411 struct sample_read_value *v,
1412 struct machine *machine)
1413{
1414 struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
1415 struct evsel *evsel;
1416
1417 if (sid) {
1418 sample->id = v->id;
1419 sample->period = v->value - sid->period;
1420 sid->period = v->value;
1421 }
1422
1423 if (!sid || sid->evsel == NULL) {
1424 ++evlist->stats.nr_unknown_id;
1425 return 0;
1426 }
1427
1428
1429
1430
1431
1432 if (!sample->period)
1433 return 0;
1434
1435 evsel = container_of(sid->evsel, struct evsel, core);
1436 return tool->sample(tool, event, sample, evsel, machine);
1437}
1438
1439static int deliver_sample_group(struct evlist *evlist,
1440 struct perf_tool *tool,
1441 union perf_event *event,
1442 struct perf_sample *sample,
1443 struct machine *machine)
1444{
1445 int ret = -EINVAL;
1446 u64 i;
1447
1448 for (i = 0; i < sample->read.group.nr; i++) {
1449 ret = deliver_sample_value(evlist, tool, event, sample,
1450 &sample->read.group.values[i],
1451 machine);
1452 if (ret)
1453 break;
1454 }
1455
1456 return ret;
1457}
1458
1459static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
1460 union perf_event *event, struct perf_sample *sample,
1461 struct evsel *evsel, struct machine *machine)
1462{
1463
1464 u64 sample_type = evsel->core.attr.sample_type;
1465 u64 read_format = evsel->core.attr.read_format;
1466
1467
1468 if (!(sample_type & PERF_SAMPLE_READ))
1469 return tool->sample(tool, event, sample, evsel, machine);
1470
1471
1472 if (read_format & PERF_FORMAT_GROUP)
1473 return deliver_sample_group(evlist, tool, event, sample,
1474 machine);
1475 else
1476 return deliver_sample_value(evlist, tool, event, sample,
1477 &sample->read.one, machine);
1478}
1479
1480static int machines__deliver_event(struct machines *machines,
1481 struct evlist *evlist,
1482 union perf_event *event,
1483 struct perf_sample *sample,
1484 struct perf_tool *tool, u64 file_offset)
1485{
1486 struct evsel *evsel;
1487 struct machine *machine;
1488
1489 dump_event(evlist, event, file_offset, sample);
1490
1491 evsel = evlist__id2evsel(evlist, sample->id);
1492
1493 machine = machines__find_for_cpumode(machines, event, sample);
1494
1495 switch (event->header.type) {
1496 case PERF_RECORD_SAMPLE:
1497 if (evsel == NULL) {
1498 ++evlist->stats.nr_unknown_id;
1499 return 0;
1500 }
1501 dump_sample(evsel, event, sample);
1502 if (machine == NULL) {
1503 ++evlist->stats.nr_unprocessable_samples;
1504 return 0;
1505 }
1506 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1507 case PERF_RECORD_MMAP:
1508 return tool->mmap(tool, event, sample, machine);
1509 case PERF_RECORD_MMAP2:
1510 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1511 ++evlist->stats.nr_proc_map_timeout;
1512 return tool->mmap2(tool, event, sample, machine);
1513 case PERF_RECORD_COMM:
1514 return tool->comm(tool, event, sample, machine);
1515 case PERF_RECORD_NAMESPACES:
1516 return tool->namespaces(tool, event, sample, machine);
1517 case PERF_RECORD_CGROUP:
1518 return tool->cgroup(tool, event, sample, machine);
1519 case PERF_RECORD_FORK:
1520 return tool->fork(tool, event, sample, machine);
1521 case PERF_RECORD_EXIT:
1522 return tool->exit(tool, event, sample, machine);
1523 case PERF_RECORD_LOST:
1524 if (tool->lost == perf_event__process_lost)
1525 evlist->stats.total_lost += event->lost.lost;
1526 return tool->lost(tool, event, sample, machine);
1527 case PERF_RECORD_LOST_SAMPLES:
1528 if (tool->lost_samples == perf_event__process_lost_samples)
1529 evlist->stats.total_lost_samples += event->lost_samples.lost;
1530 return tool->lost_samples(tool, event, sample, machine);
1531 case PERF_RECORD_READ:
1532 dump_read(evsel, event);
1533 return tool->read(tool, event, sample, evsel, machine);
1534 case PERF_RECORD_THROTTLE:
1535 return tool->throttle(tool, event, sample, machine);
1536 case PERF_RECORD_UNTHROTTLE:
1537 return tool->unthrottle(tool, event, sample, machine);
1538 case PERF_RECORD_AUX:
1539 if (tool->aux == perf_event__process_aux) {
1540 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1541 evlist->stats.total_aux_lost += 1;
1542 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1543 evlist->stats.total_aux_partial += 1;
1544 if (event->aux.flags & PERF_AUX_FLAG_COLLISION)
1545 evlist->stats.total_aux_collision += 1;
1546 }
1547 return tool->aux(tool, event, sample, machine);
1548 case PERF_RECORD_ITRACE_START:
1549 return tool->itrace_start(tool, event, sample, machine);
1550 case PERF_RECORD_SWITCH:
1551 case PERF_RECORD_SWITCH_CPU_WIDE:
1552 return tool->context_switch(tool, event, sample, machine);
1553 case PERF_RECORD_KSYMBOL:
1554 return tool->ksymbol(tool, event, sample, machine);
1555 case PERF_RECORD_BPF_EVENT:
1556 return tool->bpf(tool, event, sample, machine);
1557 case PERF_RECORD_TEXT_POKE:
1558 return tool->text_poke(tool, event, sample, machine);
1559 default:
1560 ++evlist->stats.nr_unknown_events;
1561 return -1;
1562 }
1563}
1564
1565static int perf_session__deliver_event(struct perf_session *session,
1566 union perf_event *event,
1567 struct perf_tool *tool,
1568 u64 file_offset)
1569{
1570 struct perf_sample sample;
1571 int ret = evlist__parse_sample(session->evlist, event, &sample);
1572
1573 if (ret) {
1574 pr_err("Can't parse sample, err = %d\n", ret);
1575 return ret;
1576 }
1577
1578 ret = auxtrace__process_event(session, event, &sample, tool);
1579 if (ret < 0)
1580 return ret;
1581 if (ret > 0)
1582 return 0;
1583
1584 ret = machines__deliver_event(&session->machines, session->evlist,
1585 event, &sample, tool, file_offset);
1586
1587 if (dump_trace && sample.aux_sample.size)
1588 auxtrace__dump_auxtrace_sample(session, &sample);
1589
1590 return ret;
1591}
1592
1593static s64 perf_session__process_user_event(struct perf_session *session,
1594 union perf_event *event,
1595 u64 file_offset)
1596{
1597 struct ordered_events *oe = &session->ordered_events;
1598 struct perf_tool *tool = session->tool;
1599 struct perf_sample sample = { .time = 0, };
1600 int fd = perf_data__fd(session->data);
1601 int err;
1602
1603 if (event->header.type != PERF_RECORD_COMPRESSED ||
1604 tool->compressed == perf_session__process_compressed_event_stub)
1605 dump_event(session->evlist, event, file_offset, &sample);
1606
1607
1608 switch (event->header.type) {
1609 case PERF_RECORD_HEADER_ATTR:
1610 err = tool->attr(tool, event, &session->evlist);
1611 if (err == 0) {
1612 perf_session__set_id_hdr_size(session);
1613 perf_session__set_comm_exec(session);
1614 }
1615 return err;
1616 case PERF_RECORD_EVENT_UPDATE:
1617 return tool->event_update(tool, event, &session->evlist);
1618 case PERF_RECORD_HEADER_EVENT_TYPE:
1619
1620
1621
1622
1623 return 0;
1624 case PERF_RECORD_HEADER_TRACING_DATA:
1625
1626
1627
1628
1629
1630 if (!perf_data__is_pipe(session->data))
1631 lseek(fd, file_offset, SEEK_SET);
1632 return tool->tracing_data(session, event);
1633 case PERF_RECORD_HEADER_BUILD_ID:
1634 return tool->build_id(session, event);
1635 case PERF_RECORD_FINISHED_ROUND:
1636 return tool->finished_round(tool, event, oe);
1637 case PERF_RECORD_ID_INDEX:
1638 return tool->id_index(session, event);
1639 case PERF_RECORD_AUXTRACE_INFO:
1640 return tool->auxtrace_info(session, event);
1641 case PERF_RECORD_AUXTRACE:
1642
1643 lseek(fd, file_offset + event->header.size, SEEK_SET);
1644 return tool->auxtrace(session, event);
1645 case PERF_RECORD_AUXTRACE_ERROR:
1646 perf_session__auxtrace_error_inc(session, event);
1647 return tool->auxtrace_error(session, event);
1648 case PERF_RECORD_THREAD_MAP:
1649 return tool->thread_map(session, event);
1650 case PERF_RECORD_CPU_MAP:
1651 return tool->cpu_map(session, event);
1652 case PERF_RECORD_STAT_CONFIG:
1653 return tool->stat_config(session, event);
1654 case PERF_RECORD_STAT:
1655 return tool->stat(session, event);
1656 case PERF_RECORD_STAT_ROUND:
1657 return tool->stat_round(session, event);
1658 case PERF_RECORD_TIME_CONV:
1659 session->time_conv = event->time_conv;
1660 return tool->time_conv(session, event);
1661 case PERF_RECORD_HEADER_FEATURE:
1662 return tool->feature(session, event);
1663 case PERF_RECORD_COMPRESSED:
1664 err = tool->compressed(session, event, file_offset);
1665 if (err)
1666 dump_event(session->evlist, event, file_offset, &sample);
1667 return err;
1668 default:
1669 return -EINVAL;
1670 }
1671}
1672
1673int perf_session__deliver_synth_event(struct perf_session *session,
1674 union perf_event *event,
1675 struct perf_sample *sample)
1676{
1677 struct evlist *evlist = session->evlist;
1678 struct perf_tool *tool = session->tool;
1679
1680 events_stats__inc(&evlist->stats, event->header.type);
1681
1682 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1683 return perf_session__process_user_event(session, event, 0);
1684
1685 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1686}
1687
1688static void event_swap(union perf_event *event, bool sample_id_all)
1689{
1690 perf_event__swap_op swap;
1691
1692 swap = perf_event__swap_ops[event->header.type];
1693 if (swap)
1694 swap(event, sample_id_all);
1695}
1696
1697int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1698 void *buf, size_t buf_sz,
1699 union perf_event **event_ptr,
1700 struct perf_sample *sample)
1701{
1702 union perf_event *event;
1703 size_t hdr_sz, rest;
1704 int fd;
1705
1706 if (session->one_mmap && !session->header.needs_swap) {
1707 event = file_offset - session->one_mmap_offset +
1708 session->one_mmap_addr;
1709 goto out_parse_sample;
1710 }
1711
1712 if (perf_data__is_pipe(session->data))
1713 return -1;
1714
1715 fd = perf_data__fd(session->data);
1716 hdr_sz = sizeof(struct perf_event_header);
1717
1718 if (buf_sz < hdr_sz)
1719 return -1;
1720
1721 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1722 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1723 return -1;
1724
1725 event = (union perf_event *)buf;
1726
1727 if (session->header.needs_swap)
1728 perf_event_header__bswap(&event->header);
1729
1730 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1731 return -1;
1732
1733 buf += hdr_sz;
1734 rest = event->header.size - hdr_sz;
1735
1736 if (readn(fd, buf, rest) != (ssize_t)rest)
1737 return -1;
1738
1739 if (session->header.needs_swap)
1740 event_swap(event, evlist__sample_id_all(session->evlist));
1741
1742out_parse_sample:
1743
1744 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1745 evlist__parse_sample(session->evlist, event, sample))
1746 return -1;
1747
1748 *event_ptr = event;
1749
1750 return 0;
1751}
1752
1753int perf_session__peek_events(struct perf_session *session, u64 offset,
1754 u64 size, peek_events_cb_t cb, void *data)
1755{
1756 u64 max_offset = offset + size;
1757 char buf[PERF_SAMPLE_MAX_SIZE];
1758 union perf_event *event;
1759 int err;
1760
1761 do {
1762 err = perf_session__peek_event(session, offset, buf,
1763 PERF_SAMPLE_MAX_SIZE, &event,
1764 NULL);
1765 if (err)
1766 return err;
1767
1768 err = cb(session, event, offset, data);
1769 if (err)
1770 return err;
1771
1772 offset += event->header.size;
1773 if (event->header.type == PERF_RECORD_AUXTRACE)
1774 offset += event->auxtrace.size;
1775
1776 } while (offset < max_offset);
1777
1778 return err;
1779}
1780
1781static s64 perf_session__process_event(struct perf_session *session,
1782 union perf_event *event, u64 file_offset)
1783{
1784 struct evlist *evlist = session->evlist;
1785 struct perf_tool *tool = session->tool;
1786 int ret;
1787
1788 if (session->header.needs_swap)
1789 event_swap(event, evlist__sample_id_all(evlist));
1790
1791 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1792 return -EINVAL;
1793
1794 events_stats__inc(&evlist->stats, event->header.type);
1795
1796 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1797 return perf_session__process_user_event(session, event, file_offset);
1798
1799 if (tool->ordered_events) {
1800 u64 timestamp = -1ULL;
1801
1802 ret = evlist__parse_sample_timestamp(evlist, event, ×tamp);
1803 if (ret && ret != -1)
1804 return ret;
1805
1806 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1807 if (ret != -ETIME)
1808 return ret;
1809 }
1810
1811 return perf_session__deliver_event(session, event, tool, file_offset);
1812}
1813
1814void perf_event_header__bswap(struct perf_event_header *hdr)
1815{
1816 hdr->type = bswap_32(hdr->type);
1817 hdr->misc = bswap_16(hdr->misc);
1818 hdr->size = bswap_16(hdr->size);
1819}
1820
1821struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1822{
1823 return machine__findnew_thread(&session->machines.host, -1, pid);
1824}
1825
1826int perf_session__register_idle_thread(struct perf_session *session)
1827{
1828 struct thread *thread = machine__idle_thread(&session->machines.host);
1829
1830
1831 thread__put(thread);
1832 return thread ? 0 : -1;
1833}
1834
1835static void
1836perf_session__warn_order(const struct perf_session *session)
1837{
1838 const struct ordered_events *oe = &session->ordered_events;
1839 struct evsel *evsel;
1840 bool should_warn = true;
1841
1842 evlist__for_each_entry(session->evlist, evsel) {
1843 if (evsel->core.attr.write_backward)
1844 should_warn = false;
1845 }
1846
1847 if (!should_warn)
1848 return;
1849 if (oe->nr_unordered_events != 0)
1850 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1851}
1852
1853static void perf_session__warn_about_errors(const struct perf_session *session)
1854{
1855 const struct events_stats *stats = &session->evlist->stats;
1856
1857 if (session->tool->lost == perf_event__process_lost &&
1858 stats->nr_events[PERF_RECORD_LOST] != 0) {
1859 ui__warning("Processed %d events and lost %d chunks!\n\n"
1860 "Check IO/CPU overload!\n\n",
1861 stats->nr_events[0],
1862 stats->nr_events[PERF_RECORD_LOST]);
1863 }
1864
1865 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1866 double drop_rate;
1867
1868 drop_rate = (double)stats->total_lost_samples /
1869 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1870 if (drop_rate > 0.05) {
1871 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1872 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1873 drop_rate * 100.0);
1874 }
1875 }
1876
1877 if (session->tool->aux == perf_event__process_aux &&
1878 stats->total_aux_lost != 0) {
1879 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1880 stats->total_aux_lost,
1881 stats->nr_events[PERF_RECORD_AUX]);
1882 }
1883
1884 if (session->tool->aux == perf_event__process_aux &&
1885 stats->total_aux_partial != 0) {
1886 bool vmm_exclusive = false;
1887
1888 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1889 &vmm_exclusive);
1890
1891 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1892 "Are you running a KVM guest in the background?%s\n\n",
1893 stats->total_aux_partial,
1894 stats->nr_events[PERF_RECORD_AUX],
1895 vmm_exclusive ?
1896 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1897 "will reduce the gaps to only guest's timeslices." :
1898 "");
1899 }
1900
1901 if (session->tool->aux == perf_event__process_aux &&
1902 stats->total_aux_collision != 0) {
1903 ui__warning("AUX data detected collision %" PRIu64 " times out of %u!\n\n",
1904 stats->total_aux_collision,
1905 stats->nr_events[PERF_RECORD_AUX]);
1906 }
1907
1908 if (stats->nr_unknown_events != 0) {
1909 ui__warning("Found %u unknown events!\n\n"
1910 "Is this an older tool processing a perf.data "
1911 "file generated by a more recent tool?\n\n"
1912 "If that is not the case, consider "
1913 "reporting to linux-kernel@vger.kernel.org.\n\n",
1914 stats->nr_unknown_events);
1915 }
1916
1917 if (stats->nr_unknown_id != 0) {
1918 ui__warning("%u samples with id not present in the header\n",
1919 stats->nr_unknown_id);
1920 }
1921
1922 if (stats->nr_invalid_chains != 0) {
1923 ui__warning("Found invalid callchains!\n\n"
1924 "%u out of %u events were discarded for this reason.\n\n"
1925 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1926 stats->nr_invalid_chains,
1927 stats->nr_events[PERF_RECORD_SAMPLE]);
1928 }
1929
1930 if (stats->nr_unprocessable_samples != 0) {
1931 ui__warning("%u unprocessable samples recorded.\n"
1932 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1933 stats->nr_unprocessable_samples);
1934 }
1935
1936 perf_session__warn_order(session);
1937
1938 events_stats__auxtrace_error_warn(stats);
1939
1940 if (stats->nr_proc_map_timeout != 0) {
1941 ui__warning("%d map information files for pre-existing threads were\n"
1942 "not processed, if there are samples for addresses they\n"
1943 "will not be resolved, you may find out which are these\n"
1944 "threads by running with -v and redirecting the output\n"
1945 "to a file.\n"
1946 "The time limit to process proc map is too short?\n"
1947 "Increase it by --proc-map-timeout\n",
1948 stats->nr_proc_map_timeout);
1949 }
1950}
1951
1952static int perf_session__flush_thread_stack(struct thread *thread,
1953 void *p __maybe_unused)
1954{
1955 return thread_stack__flush(thread);
1956}
1957
1958static int perf_session__flush_thread_stacks(struct perf_session *session)
1959{
1960 return machines__for_each_thread(&session->machines,
1961 perf_session__flush_thread_stack,
1962 NULL);
1963}
1964
1965volatile int session_done;
1966
1967static int __perf_session__process_decomp_events(struct perf_session *session);
1968
1969static int __perf_session__process_pipe_events(struct perf_session *session)
1970{
1971 struct ordered_events *oe = &session->ordered_events;
1972 struct perf_tool *tool = session->tool;
1973 union perf_event *event;
1974 uint32_t size, cur_size = 0;
1975 void *buf = NULL;
1976 s64 skip = 0;
1977 u64 head;
1978 ssize_t err;
1979 void *p;
1980
1981 perf_tool__fill_defaults(tool);
1982
1983 head = 0;
1984 cur_size = sizeof(union perf_event);
1985
1986 buf = malloc(cur_size);
1987 if (!buf)
1988 return -errno;
1989 ordered_events__set_copy_on_queue(oe, true);
1990more:
1991 event = buf;
1992 err = perf_data__read(session->data, event,
1993 sizeof(struct perf_event_header));
1994 if (err <= 0) {
1995 if (err == 0)
1996 goto done;
1997
1998 pr_err("failed to read event header\n");
1999 goto out_err;
2000 }
2001
2002 if (session->header.needs_swap)
2003 perf_event_header__bswap(&event->header);
2004
2005 size = event->header.size;
2006 if (size < sizeof(struct perf_event_header)) {
2007 pr_err("bad event header size\n");
2008 goto out_err;
2009 }
2010
2011 if (size > cur_size) {
2012 void *new = realloc(buf, size);
2013 if (!new) {
2014 pr_err("failed to allocate memory to read event\n");
2015 goto out_err;
2016 }
2017 buf = new;
2018 cur_size = size;
2019 event = buf;
2020 }
2021 p = event;
2022 p += sizeof(struct perf_event_header);
2023
2024 if (size - sizeof(struct perf_event_header)) {
2025 err = perf_data__read(session->data, p,
2026 size - sizeof(struct perf_event_header));
2027 if (err <= 0) {
2028 if (err == 0) {
2029 pr_err("unexpected end of event stream\n");
2030 goto done;
2031 }
2032
2033 pr_err("failed to read event data\n");
2034 goto out_err;
2035 }
2036 }
2037
2038 if ((skip = perf_session__process_event(session, event, head)) < 0) {
2039 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2040 head, event->header.size, event->header.type);
2041 err = -EINVAL;
2042 goto out_err;
2043 }
2044
2045 head += size;
2046
2047 if (skip > 0)
2048 head += skip;
2049
2050 err = __perf_session__process_decomp_events(session);
2051 if (err)
2052 goto out_err;
2053
2054 if (!session_done())
2055 goto more;
2056done:
2057
2058 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2059 if (err)
2060 goto out_err;
2061 err = auxtrace__flush_events(session, tool);
2062 if (err)
2063 goto out_err;
2064 err = perf_session__flush_thread_stacks(session);
2065out_err:
2066 free(buf);
2067 if (!tool->no_warn)
2068 perf_session__warn_about_errors(session);
2069 ordered_events__free(&session->ordered_events);
2070 auxtrace__free_events(session);
2071 return err;
2072}
2073
2074static union perf_event *
2075prefetch_event(char *buf, u64 head, size_t mmap_size,
2076 bool needs_swap, union perf_event *error)
2077{
2078 union perf_event *event;
2079
2080
2081
2082
2083
2084 if (head + sizeof(event->header) > mmap_size)
2085 return NULL;
2086
2087 event = (union perf_event *)(buf + head);
2088 if (needs_swap)
2089 perf_event_header__bswap(&event->header);
2090
2091 if (head + event->header.size <= mmap_size)
2092 return event;
2093
2094
2095 if (needs_swap)
2096 perf_event_header__bswap(&event->header);
2097
2098 pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
2099 " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
2100
2101 return error;
2102}
2103
2104static union perf_event *
2105fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2106{
2107 return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2108}
2109
2110static union perf_event *
2111fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2112{
2113 return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2114}
2115
2116static int __perf_session__process_decomp_events(struct perf_session *session)
2117{
2118 s64 skip;
2119 u64 size;
2120 struct decomp *decomp = session->decomp_last;
2121
2122 if (!decomp)
2123 return 0;
2124
2125 while (decomp->head < decomp->size && !session_done()) {
2126 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2127 session->header.needs_swap);
2128
2129 if (!event)
2130 break;
2131
2132 size = event->header.size;
2133
2134 if (size < sizeof(struct perf_event_header) ||
2135 (skip = perf_session__process_event(session, event, decomp->file_pos)) < 0) {
2136 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2137 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2138 return -EINVAL;
2139 }
2140
2141 if (skip)
2142 size += skip;
2143
2144 decomp->head += size;
2145 }
2146
2147 return 0;
2148}
2149
2150
2151
2152
2153
2154#if BITS_PER_LONG == 64
2155#define MMAP_SIZE ULLONG_MAX
2156#define NUM_MMAPS 1
2157#else
2158#define MMAP_SIZE (32 * 1024 * 1024ULL)
2159#define NUM_MMAPS 128
2160#endif
2161
2162struct reader;
2163
2164typedef s64 (*reader_cb_t)(struct perf_session *session,
2165 union perf_event *event,
2166 u64 file_offset);
2167
2168struct reader {
2169 int fd;
2170 u64 data_size;
2171 u64 data_offset;
2172 reader_cb_t process;
2173 bool in_place_update;
2174};
2175
2176static int
2177reader__process_events(struct reader *rd, struct perf_session *session,
2178 struct ui_progress *prog)
2179{
2180 u64 data_size = rd->data_size;
2181 u64 head, page_offset, file_offset, file_pos, size;
2182 int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2183 size_t mmap_size;
2184 char *buf, *mmaps[NUM_MMAPS];
2185 union perf_event *event;
2186 s64 skip;
2187
2188 page_offset = page_size * (rd->data_offset / page_size);
2189 file_offset = page_offset;
2190 head = rd->data_offset - page_offset;
2191
2192 ui_progress__init_size(prog, data_size, "Processing events...");
2193
2194 data_size += rd->data_offset;
2195
2196 mmap_size = MMAP_SIZE;
2197 if (mmap_size > data_size) {
2198 mmap_size = data_size;
2199 session->one_mmap = true;
2200 }
2201
2202 memset(mmaps, 0, sizeof(mmaps));
2203
2204 mmap_prot = PROT_READ;
2205 mmap_flags = MAP_SHARED;
2206
2207 if (rd->in_place_update) {
2208 mmap_prot |= PROT_WRITE;
2209 } else if (session->header.needs_swap) {
2210 mmap_prot |= PROT_WRITE;
2211 mmap_flags = MAP_PRIVATE;
2212 }
2213remap:
2214 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2215 file_offset);
2216 if (buf == MAP_FAILED) {
2217 pr_err("failed to mmap file\n");
2218 err = -errno;
2219 goto out;
2220 }
2221 mmaps[map_idx] = buf;
2222 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2223 file_pos = file_offset + head;
2224 if (session->one_mmap) {
2225 session->one_mmap_addr = buf;
2226 session->one_mmap_offset = file_offset;
2227 }
2228
2229more:
2230 event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
2231 if (IS_ERR(event))
2232 return PTR_ERR(event);
2233
2234 if (!event) {
2235 if (mmaps[map_idx]) {
2236 munmap(mmaps[map_idx], mmap_size);
2237 mmaps[map_idx] = NULL;
2238 }
2239
2240 page_offset = page_size * (head / page_size);
2241 file_offset += page_offset;
2242 head -= page_offset;
2243 goto remap;
2244 }
2245
2246 size = event->header.size;
2247
2248 skip = -EINVAL;
2249
2250 if (size < sizeof(struct perf_event_header) ||
2251 (skip = rd->process(session, event, file_pos)) < 0) {
2252 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2253 file_offset + head, event->header.size,
2254 event->header.type, strerror(-skip));
2255 err = skip;
2256 goto out;
2257 }
2258
2259 if (skip)
2260 size += skip;
2261
2262 head += size;
2263 file_pos += size;
2264
2265 err = __perf_session__process_decomp_events(session);
2266 if (err)
2267 goto out;
2268
2269 ui_progress__update(prog, size);
2270
2271 if (session_done())
2272 goto out;
2273
2274 if (file_pos < data_size)
2275 goto more;
2276
2277out:
2278 return err;
2279}
2280
2281static s64 process_simple(struct perf_session *session,
2282 union perf_event *event,
2283 u64 file_offset)
2284{
2285 return perf_session__process_event(session, event, file_offset);
2286}
2287
2288static int __perf_session__process_events(struct perf_session *session)
2289{
2290 struct reader rd = {
2291 .fd = perf_data__fd(session->data),
2292 .data_size = session->header.data_size,
2293 .data_offset = session->header.data_offset,
2294 .process = process_simple,
2295 .in_place_update = session->data->in_place_update,
2296 };
2297 struct ordered_events *oe = &session->ordered_events;
2298 struct perf_tool *tool = session->tool;
2299 struct ui_progress prog;
2300 int err;
2301
2302 perf_tool__fill_defaults(tool);
2303
2304 if (rd.data_size == 0)
2305 return -1;
2306
2307 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2308
2309 err = reader__process_events(&rd, session, &prog);
2310 if (err)
2311 goto out_err;
2312
2313 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2314 if (err)
2315 goto out_err;
2316 err = auxtrace__flush_events(session, tool);
2317 if (err)
2318 goto out_err;
2319 err = perf_session__flush_thread_stacks(session);
2320out_err:
2321 ui_progress__finish();
2322 if (!tool->no_warn)
2323 perf_session__warn_about_errors(session);
2324
2325
2326
2327
2328 ordered_events__reinit(&session->ordered_events);
2329 auxtrace__free_events(session);
2330 session->one_mmap = false;
2331 return err;
2332}
2333
2334int perf_session__process_events(struct perf_session *session)
2335{
2336 if (perf_session__register_idle_thread(session) < 0)
2337 return -ENOMEM;
2338
2339 if (perf_data__is_pipe(session->data))
2340 return __perf_session__process_pipe_events(session);
2341
2342 return __perf_session__process_events(session);
2343}
2344
2345bool perf_session__has_traces(struct perf_session *session, const char *msg)
2346{
2347 struct evsel *evsel;
2348
2349 evlist__for_each_entry(session->evlist, evsel) {
2350 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2351 return true;
2352 }
2353
2354 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2355 return false;
2356}
2357
2358int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2359{
2360 char *bracket;
2361 struct ref_reloc_sym *ref;
2362 struct kmap *kmap;
2363
2364 ref = zalloc(sizeof(struct ref_reloc_sym));
2365 if (ref == NULL)
2366 return -ENOMEM;
2367
2368 ref->name = strdup(symbol_name);
2369 if (ref->name == NULL) {
2370 free(ref);
2371 return -ENOMEM;
2372 }
2373
2374 bracket = strchr(ref->name, ']');
2375 if (bracket)
2376 *bracket = '\0';
2377
2378 ref->addr = addr;
2379
2380 kmap = map__kmap(map);
2381 if (kmap)
2382 kmap->ref_reloc_sym = ref;
2383
2384 return 0;
2385}
2386
2387size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2388{
2389 return machines__fprintf_dsos(&session->machines, fp);
2390}
2391
2392size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2393 bool (skip)(struct dso *dso, int parm), int parm)
2394{
2395 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2396}
2397
2398size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
2399 bool skip_empty)
2400{
2401 size_t ret;
2402 const char *msg = "";
2403
2404 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2405 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2406
2407 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2408
2409 ret += events_stats__fprintf(&session->evlist->stats, fp, skip_empty);
2410 return ret;
2411}
2412
2413size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2414{
2415
2416
2417
2418
2419 return machine__fprintf(&session->machines.host, fp);
2420}
2421
2422struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2423 unsigned int type)
2424{
2425 struct evsel *pos;
2426
2427 evlist__for_each_entry(session->evlist, pos) {
2428 if (pos->core.attr.type == type)
2429 return pos;
2430 }
2431 return NULL;
2432}
2433
2434int perf_session__cpu_bitmap(struct perf_session *session,
2435 const char *cpu_list, unsigned long *cpu_bitmap)
2436{
2437 int i, err = -1;
2438 struct perf_cpu_map *map;
2439 int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
2440
2441 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2442 struct evsel *evsel;
2443
2444 evsel = perf_session__find_first_evtype(session, i);
2445 if (!evsel)
2446 continue;
2447
2448 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2449 pr_err("File does not contain CPU events. "
2450 "Remove -C option to proceed.\n");
2451 return -1;
2452 }
2453 }
2454
2455 map = perf_cpu_map__new(cpu_list);
2456 if (map == NULL) {
2457 pr_err("Invalid cpu_list\n");
2458 return -1;
2459 }
2460
2461 for (i = 0; i < map->nr; i++) {
2462 int cpu = map->map[i];
2463
2464 if (cpu >= nr_cpus) {
2465 pr_err("Requested CPU %d too large. "
2466 "Consider raising MAX_NR_CPUS\n", cpu);
2467 goto out_delete_map;
2468 }
2469
2470 set_bit(cpu, cpu_bitmap);
2471 }
2472
2473 err = 0;
2474
2475out_delete_map:
2476 perf_cpu_map__put(map);
2477 return err;
2478}
2479
2480void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2481 bool full)
2482{
2483 if (session == NULL || fp == NULL)
2484 return;
2485
2486 fprintf(fp, "# ========\n");
2487 perf_header__fprintf_info(session, fp, full);
2488 fprintf(fp, "# ========\n#\n");
2489}
2490
2491int perf_event__process_id_index(struct perf_session *session,
2492 union perf_event *event)
2493{
2494 struct evlist *evlist = session->evlist;
2495 struct perf_record_id_index *ie = &event->id_index;
2496 size_t i, nr, max_nr;
2497
2498 max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2499 sizeof(struct id_index_entry);
2500 nr = ie->nr;
2501 if (nr > max_nr)
2502 return -EINVAL;
2503
2504 if (dump_trace)
2505 fprintf(stdout, " nr: %zu\n", nr);
2506
2507 for (i = 0; i < nr; i++) {
2508 struct id_index_entry *e = &ie->entries[i];
2509 struct perf_sample_id *sid;
2510
2511 if (dump_trace) {
2512 fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2513 fprintf(stdout, " idx: %"PRI_lu64, e->idx);
2514 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu);
2515 fprintf(stdout, " tid: %"PRI_ld64"\n", e->tid);
2516 }
2517
2518 sid = evlist__id2sid(evlist, e->id);
2519 if (!sid)
2520 return -ENOENT;
2521 sid->idx = e->idx;
2522 sid->cpu = e->cpu;
2523 sid->tid = e->tid;
2524 }
2525 return 0;
2526}
2527