1
2#include <errno.h>
3#include <inttypes.h>
4#include <linux/err.h>
5#include <linux/kernel.h>
6#include <linux/zalloc.h>
7#include <api/fs/fs.h>
8
9#include <byteswap.h>
10#include <unistd.h>
11#include <sys/types.h>
12#include <sys/mman.h>
13#include <perf/cpumap.h>
14
15#include "map_symbol.h"
16#include "branch.h"
17#include "debug.h"
18#include "evlist.h"
19#include "evsel.h"
20#include "memswap.h"
21#include "map.h"
22#include "symbol.h"
23#include "session.h"
24#include "tool.h"
25#include "perf_regs.h"
26#include "asm/bug.h"
27#include "auxtrace.h"
28#include "thread.h"
29#include "thread-stack.h"
30#include "sample-raw.h"
31#include "stat.h"
32#include "ui/progress.h"
33#include "../perf.h"
34#include "arch/common.h"
35#include <internal/lib.h>
36
37#ifdef HAVE_ZSTD_SUPPORT
38static int perf_session__process_compressed_event(struct perf_session *session,
39 union perf_event *event, u64 file_offset)
40{
41 void *src;
42 size_t decomp_size, src_size;
43 u64 decomp_last_rem = 0;
44 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
45 struct decomp *decomp, *decomp_last = session->decomp_last;
46
47 if (decomp_last) {
48 decomp_last_rem = decomp_last->size - decomp_last->head;
49 decomp_len += decomp_last_rem;
50 }
51
52 mmap_len = sizeof(struct decomp) + decomp_len;
53 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
54 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
55 if (decomp == MAP_FAILED) {
56 pr_err("Couldn't allocate memory for decompression\n");
57 return -1;
58 }
59
60 decomp->file_pos = file_offset;
61 decomp->mmap_len = mmap_len;
62 decomp->head = 0;
63
64 if (decomp_last_rem) {
65 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
66 decomp->size = decomp_last_rem;
67 }
68
69 src = (void *)event + sizeof(struct perf_record_compressed);
70 src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
71
72 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
73 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
74 if (!decomp_size) {
75 munmap(decomp, mmap_len);
76 pr_err("Couldn't decompress data\n");
77 return -1;
78 }
79
80 decomp->size += decomp_size;
81
82 if (session->decomp == NULL) {
83 session->decomp = decomp;
84 session->decomp_last = decomp;
85 } else {
86 session->decomp_last->next = decomp;
87 session->decomp_last = decomp;
88 }
89
90 pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
91
92 return 0;
93}
94#else
95#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
96#endif
97
98static int perf_session__deliver_event(struct perf_session *session,
99 union perf_event *event,
100 struct perf_tool *tool,
101 u64 file_offset);
102
103static int perf_session__open(struct perf_session *session)
104{
105 struct perf_data *data = session->data;
106
107 if (perf_session__read_header(session) < 0) {
108 pr_err("incompatible file format (rerun with -v to learn more)\n");
109 return -1;
110 }
111
112 if (perf_data__is_pipe(data))
113 return 0;
114
115 if (perf_header__has_feat(&session->header, HEADER_STAT))
116 return 0;
117
118 if (!evlist__valid_sample_type(session->evlist)) {
119 pr_err("non matching sample_type\n");
120 return -1;
121 }
122
123 if (!evlist__valid_sample_id_all(session->evlist)) {
124 pr_err("non matching sample_id_all\n");
125 return -1;
126 }
127
128 if (!perf_evlist__valid_read_format(session->evlist)) {
129 pr_err("non matching read_format\n");
130 return -1;
131 }
132
133 return 0;
134}
135
136void perf_session__set_id_hdr_size(struct perf_session *session)
137{
138 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
139
140 machines__set_id_hdr_size(&session->machines, id_hdr_size);
141}
142
143int perf_session__create_kernel_maps(struct perf_session *session)
144{
145 int ret = machine__create_kernel_maps(&session->machines.host);
146
147 if (ret >= 0)
148 ret = machines__create_guest_kernel_maps(&session->machines);
149 return ret;
150}
151
152static void perf_session__destroy_kernel_maps(struct perf_session *session)
153{
154 machines__destroy_kernel_maps(&session->machines);
155}
156
157static bool perf_session__has_comm_exec(struct perf_session *session)
158{
159 struct evsel *evsel;
160
161 evlist__for_each_entry(session->evlist, evsel) {
162 if (evsel->core.attr.comm_exec)
163 return true;
164 }
165
166 return false;
167}
168
169static void perf_session__set_comm_exec(struct perf_session *session)
170{
171 bool comm_exec = perf_session__has_comm_exec(session);
172
173 machines__set_comm_exec(&session->machines, comm_exec);
174}
175
176static int ordered_events__deliver_event(struct ordered_events *oe,
177 struct ordered_event *event)
178{
179 struct perf_session *session = container_of(oe, struct perf_session,
180 ordered_events);
181
182 return perf_session__deliver_event(session, event->event,
183 session->tool, event->file_offset);
184}
185
186struct perf_session *perf_session__new(struct perf_data *data,
187 bool repipe, struct perf_tool *tool)
188{
189 int ret = -ENOMEM;
190 struct perf_session *session = zalloc(sizeof(*session));
191
192 if (!session)
193 goto out;
194
195 session->repipe = repipe;
196 session->tool = tool;
197 INIT_LIST_HEAD(&session->auxtrace_index);
198 machines__init(&session->machines);
199 ordered_events__init(&session->ordered_events,
200 ordered_events__deliver_event, NULL);
201
202 perf_env__init(&session->header.env);
203 if (data) {
204 ret = perf_data__open(data);
205 if (ret < 0)
206 goto out_delete;
207
208 session->data = data;
209
210 if (perf_data__is_read(data)) {
211 ret = perf_session__open(session);
212 if (ret < 0)
213 goto out_delete;
214
215
216
217
218
219 if (!data->is_pipe) {
220 perf_session__set_id_hdr_size(session);
221 perf_session__set_comm_exec(session);
222 }
223
224 perf_evlist__init_trace_event_sample_raw(session->evlist);
225
226
227 if (data->is_dir) {
228 ret = perf_data__open_dir(data);
229 if (ret)
230 goto out_delete;
231 }
232
233 if (!symbol_conf.kallsyms_name &&
234 !symbol_conf.vmlinux_name)
235 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
236 }
237 } else {
238 session->machines.host.env = &perf_env;
239 }
240
241 session->machines.host.single_address_space =
242 perf_env__single_address_space(session->machines.host.env);
243
244 if (!data || perf_data__is_write(data)) {
245
246
247
248
249 if (perf_session__create_kernel_maps(session) < 0)
250 pr_warning("Cannot read kernel map\n");
251 }
252
253
254
255
256
257 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
258 tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
259 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
260 tool->ordered_events = false;
261 }
262
263 return session;
264
265 out_delete:
266 perf_session__delete(session);
267 out:
268 return ERR_PTR(ret);
269}
270
271static void perf_session__delete_threads(struct perf_session *session)
272{
273 machine__delete_threads(&session->machines.host);
274}
275
276static void perf_session__release_decomp_events(struct perf_session *session)
277{
278 struct decomp *next, *decomp;
279 size_t mmap_len;
280 next = session->decomp;
281 do {
282 decomp = next;
283 if (decomp == NULL)
284 break;
285 next = decomp->next;
286 mmap_len = decomp->mmap_len;
287 munmap(decomp, mmap_len);
288 } while (1);
289}
290
291void perf_session__delete(struct perf_session *session)
292{
293 if (session == NULL)
294 return;
295 auxtrace__free(session);
296 auxtrace_index__free(&session->auxtrace_index);
297 perf_session__destroy_kernel_maps(session);
298 perf_session__delete_threads(session);
299 perf_session__release_decomp_events(session);
300 perf_env__exit(&session->header.env);
301 machines__exit(&session->machines);
302 if (session->data)
303 perf_data__close(session->data);
304 free(session);
305}
306
307static int process_event_synth_tracing_data_stub(struct perf_session *session
308 __maybe_unused,
309 union perf_event *event
310 __maybe_unused)
311{
312 dump_printf(": unhandled!\n");
313 return 0;
314}
315
316static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
317 union perf_event *event __maybe_unused,
318 struct evlist **pevlist
319 __maybe_unused)
320{
321 dump_printf(": unhandled!\n");
322 return 0;
323}
324
325static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
326 union perf_event *event __maybe_unused,
327 struct evlist **pevlist
328 __maybe_unused)
329{
330 if (dump_trace)
331 perf_event__fprintf_event_update(event, stdout);
332
333 dump_printf(": unhandled!\n");
334 return 0;
335}
336
337static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
338 union perf_event *event __maybe_unused,
339 struct perf_sample *sample __maybe_unused,
340 struct evsel *evsel __maybe_unused,
341 struct machine *machine __maybe_unused)
342{
343 dump_printf(": unhandled!\n");
344 return 0;
345}
346
347static int process_event_stub(struct perf_tool *tool __maybe_unused,
348 union perf_event *event __maybe_unused,
349 struct perf_sample *sample __maybe_unused,
350 struct machine *machine __maybe_unused)
351{
352 dump_printf(": unhandled!\n");
353 return 0;
354}
355
356static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
357 union perf_event *event __maybe_unused,
358 struct ordered_events *oe __maybe_unused)
359{
360 dump_printf(": unhandled!\n");
361 return 0;
362}
363
364static int process_finished_round(struct perf_tool *tool,
365 union perf_event *event,
366 struct ordered_events *oe);
367
368static int skipn(int fd, off_t n)
369{
370 char buf[4096];
371 ssize_t ret;
372
373 while (n > 0) {
374 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
375 if (ret <= 0)
376 return ret;
377 n -= ret;
378 }
379
380 return 0;
381}
382
383static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
384 union perf_event *event)
385{
386 dump_printf(": unhandled!\n");
387 if (perf_data__is_pipe(session->data))
388 skipn(perf_data__fd(session->data), event->auxtrace.size);
389 return event->auxtrace.size;
390}
391
392static int process_event_op2_stub(struct perf_session *session __maybe_unused,
393 union perf_event *event __maybe_unused)
394{
395 dump_printf(": unhandled!\n");
396 return 0;
397}
398
399
400static
401int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
402 union perf_event *event __maybe_unused)
403{
404 if (dump_trace)
405 perf_event__fprintf_thread_map(event, stdout);
406
407 dump_printf(": unhandled!\n");
408 return 0;
409}
410
411static
412int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
413 union perf_event *event __maybe_unused)
414{
415 if (dump_trace)
416 perf_event__fprintf_cpu_map(event, stdout);
417
418 dump_printf(": unhandled!\n");
419 return 0;
420}
421
422static
423int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
424 union perf_event *event __maybe_unused)
425{
426 if (dump_trace)
427 perf_event__fprintf_stat_config(event, stdout);
428
429 dump_printf(": unhandled!\n");
430 return 0;
431}
432
433static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
434 union perf_event *event)
435{
436 if (dump_trace)
437 perf_event__fprintf_stat(event, stdout);
438
439 dump_printf(": unhandled!\n");
440 return 0;
441}
442
443static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
444 union perf_event *event)
445{
446 if (dump_trace)
447 perf_event__fprintf_stat_round(event, stdout);
448
449 dump_printf(": unhandled!\n");
450 return 0;
451}
452
453static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
454 union perf_event *event __maybe_unused,
455 u64 file_offset __maybe_unused)
456{
457 dump_printf(": unhandled!\n");
458 return 0;
459}
460
461void perf_tool__fill_defaults(struct perf_tool *tool)
462{
463 if (tool->sample == NULL)
464 tool->sample = process_event_sample_stub;
465 if (tool->mmap == NULL)
466 tool->mmap = process_event_stub;
467 if (tool->mmap2 == NULL)
468 tool->mmap2 = process_event_stub;
469 if (tool->comm == NULL)
470 tool->comm = process_event_stub;
471 if (tool->namespaces == NULL)
472 tool->namespaces = process_event_stub;
473 if (tool->cgroup == NULL)
474 tool->cgroup = process_event_stub;
475 if (tool->fork == NULL)
476 tool->fork = process_event_stub;
477 if (tool->exit == NULL)
478 tool->exit = process_event_stub;
479 if (tool->lost == NULL)
480 tool->lost = perf_event__process_lost;
481 if (tool->lost_samples == NULL)
482 tool->lost_samples = perf_event__process_lost_samples;
483 if (tool->aux == NULL)
484 tool->aux = perf_event__process_aux;
485 if (tool->itrace_start == NULL)
486 tool->itrace_start = perf_event__process_itrace_start;
487 if (tool->context_switch == NULL)
488 tool->context_switch = perf_event__process_switch;
489 if (tool->ksymbol == NULL)
490 tool->ksymbol = perf_event__process_ksymbol;
491 if (tool->bpf == NULL)
492 tool->bpf = perf_event__process_bpf;
493 if (tool->text_poke == NULL)
494 tool->text_poke = perf_event__process_text_poke;
495 if (tool->read == NULL)
496 tool->read = process_event_sample_stub;
497 if (tool->throttle == NULL)
498 tool->throttle = process_event_stub;
499 if (tool->unthrottle == NULL)
500 tool->unthrottle = process_event_stub;
501 if (tool->attr == NULL)
502 tool->attr = process_event_synth_attr_stub;
503 if (tool->event_update == NULL)
504 tool->event_update = process_event_synth_event_update_stub;
505 if (tool->tracing_data == NULL)
506 tool->tracing_data = process_event_synth_tracing_data_stub;
507 if (tool->build_id == NULL)
508 tool->build_id = process_event_op2_stub;
509 if (tool->finished_round == NULL) {
510 if (tool->ordered_events)
511 tool->finished_round = process_finished_round;
512 else
513 tool->finished_round = process_finished_round_stub;
514 }
515 if (tool->id_index == NULL)
516 tool->id_index = process_event_op2_stub;
517 if (tool->auxtrace_info == NULL)
518 tool->auxtrace_info = process_event_op2_stub;
519 if (tool->auxtrace == NULL)
520 tool->auxtrace = process_event_auxtrace_stub;
521 if (tool->auxtrace_error == NULL)
522 tool->auxtrace_error = process_event_op2_stub;
523 if (tool->thread_map == NULL)
524 tool->thread_map = process_event_thread_map_stub;
525 if (tool->cpu_map == NULL)
526 tool->cpu_map = process_event_cpu_map_stub;
527 if (tool->stat_config == NULL)
528 tool->stat_config = process_event_stat_config_stub;
529 if (tool->stat == NULL)
530 tool->stat = process_stat_stub;
531 if (tool->stat_round == NULL)
532 tool->stat_round = process_stat_round_stub;
533 if (tool->time_conv == NULL)
534 tool->time_conv = process_event_op2_stub;
535 if (tool->feature == NULL)
536 tool->feature = process_event_op2_stub;
537 if (tool->compressed == NULL)
538 tool->compressed = perf_session__process_compressed_event;
539}
540
541static void swap_sample_id_all(union perf_event *event, void *data)
542{
543 void *end = (void *) event + event->header.size;
544 int size = end - data;
545
546 BUG_ON(size % sizeof(u64));
547 mem_bswap_64(data, size);
548}
549
550static void perf_event__all64_swap(union perf_event *event,
551 bool sample_id_all __maybe_unused)
552{
553 struct perf_event_header *hdr = &event->header;
554 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
555}
556
557static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
558{
559 event->comm.pid = bswap_32(event->comm.pid);
560 event->comm.tid = bswap_32(event->comm.tid);
561
562 if (sample_id_all) {
563 void *data = &event->comm.comm;
564
565 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
566 swap_sample_id_all(event, data);
567 }
568}
569
570static void perf_event__mmap_swap(union perf_event *event,
571 bool sample_id_all)
572{
573 event->mmap.pid = bswap_32(event->mmap.pid);
574 event->mmap.tid = bswap_32(event->mmap.tid);
575 event->mmap.start = bswap_64(event->mmap.start);
576 event->mmap.len = bswap_64(event->mmap.len);
577 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
578
579 if (sample_id_all) {
580 void *data = &event->mmap.filename;
581
582 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
583 swap_sample_id_all(event, data);
584 }
585}
586
587static void perf_event__mmap2_swap(union perf_event *event,
588 bool sample_id_all)
589{
590 event->mmap2.pid = bswap_32(event->mmap2.pid);
591 event->mmap2.tid = bswap_32(event->mmap2.tid);
592 event->mmap2.start = bswap_64(event->mmap2.start);
593 event->mmap2.len = bswap_64(event->mmap2.len);
594 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
595 event->mmap2.maj = bswap_32(event->mmap2.maj);
596 event->mmap2.min = bswap_32(event->mmap2.min);
597 event->mmap2.ino = bswap_64(event->mmap2.ino);
598 event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
599
600 if (sample_id_all) {
601 void *data = &event->mmap2.filename;
602
603 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
604 swap_sample_id_all(event, data);
605 }
606}
607static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
608{
609 event->fork.pid = bswap_32(event->fork.pid);
610 event->fork.tid = bswap_32(event->fork.tid);
611 event->fork.ppid = bswap_32(event->fork.ppid);
612 event->fork.ptid = bswap_32(event->fork.ptid);
613 event->fork.time = bswap_64(event->fork.time);
614
615 if (sample_id_all)
616 swap_sample_id_all(event, &event->fork + 1);
617}
618
619static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
620{
621 event->read.pid = bswap_32(event->read.pid);
622 event->read.tid = bswap_32(event->read.tid);
623 event->read.value = bswap_64(event->read.value);
624 event->read.time_enabled = bswap_64(event->read.time_enabled);
625 event->read.time_running = bswap_64(event->read.time_running);
626 event->read.id = bswap_64(event->read.id);
627
628 if (sample_id_all)
629 swap_sample_id_all(event, &event->read + 1);
630}
631
632static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
633{
634 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
635 event->aux.aux_size = bswap_64(event->aux.aux_size);
636 event->aux.flags = bswap_64(event->aux.flags);
637
638 if (sample_id_all)
639 swap_sample_id_all(event, &event->aux + 1);
640}
641
642static void perf_event__itrace_start_swap(union perf_event *event,
643 bool sample_id_all)
644{
645 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
646 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
647
648 if (sample_id_all)
649 swap_sample_id_all(event, &event->itrace_start + 1);
650}
651
652static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
653{
654 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
655 event->context_switch.next_prev_pid =
656 bswap_32(event->context_switch.next_prev_pid);
657 event->context_switch.next_prev_tid =
658 bswap_32(event->context_switch.next_prev_tid);
659 }
660
661 if (sample_id_all)
662 swap_sample_id_all(event, &event->context_switch + 1);
663}
664
665static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
666{
667 event->text_poke.addr = bswap_64(event->text_poke.addr);
668 event->text_poke.old_len = bswap_16(event->text_poke.old_len);
669 event->text_poke.new_len = bswap_16(event->text_poke.new_len);
670
671 if (sample_id_all) {
672 size_t len = sizeof(event->text_poke.old_len) +
673 sizeof(event->text_poke.new_len) +
674 event->text_poke.old_len +
675 event->text_poke.new_len;
676 void *data = &event->text_poke.old_len;
677
678 data += PERF_ALIGN(len, sizeof(u64));
679 swap_sample_id_all(event, data);
680 }
681}
682
683static void perf_event__throttle_swap(union perf_event *event,
684 bool sample_id_all)
685{
686 event->throttle.time = bswap_64(event->throttle.time);
687 event->throttle.id = bswap_64(event->throttle.id);
688 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
689
690 if (sample_id_all)
691 swap_sample_id_all(event, &event->throttle + 1);
692}
693
694static void perf_event__namespaces_swap(union perf_event *event,
695 bool sample_id_all)
696{
697 u64 i;
698
699 event->namespaces.pid = bswap_32(event->namespaces.pid);
700 event->namespaces.tid = bswap_32(event->namespaces.tid);
701 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
702
703 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
704 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
705
706 ns->dev = bswap_64(ns->dev);
707 ns->ino = bswap_64(ns->ino);
708 }
709
710 if (sample_id_all)
711 swap_sample_id_all(event, &event->namespaces.link_info[i]);
712}
713
714static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
715{
716 event->cgroup.id = bswap_64(event->cgroup.id);
717
718 if (sample_id_all) {
719 void *data = &event->cgroup.path;
720
721 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
722 swap_sample_id_all(event, data);
723 }
724}
725
726static u8 revbyte(u8 b)
727{
728 int rev = (b >> 4) | ((b & 0xf) << 4);
729 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
730 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
731 return (u8) rev;
732}
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748static void swap_bitfield(u8 *p, unsigned len)
749{
750 unsigned i;
751
752 for (i = 0; i < len; i++) {
753 *p = revbyte(*p);
754 p++;
755 }
756}
757
758
759void perf_event__attr_swap(struct perf_event_attr *attr)
760{
761 attr->type = bswap_32(attr->type);
762 attr->size = bswap_32(attr->size);
763
764#define bswap_safe(f, n) \
765 (attr->size > (offsetof(struct perf_event_attr, f) + \
766 sizeof(attr->f) * (n)))
767#define bswap_field(f, sz) \
768do { \
769 if (bswap_safe(f, 0)) \
770 attr->f = bswap_##sz(attr->f); \
771} while(0)
772#define bswap_field_16(f) bswap_field(f, 16)
773#define bswap_field_32(f) bswap_field(f, 32)
774#define bswap_field_64(f) bswap_field(f, 64)
775
776 bswap_field_64(config);
777 bswap_field_64(sample_period);
778 bswap_field_64(sample_type);
779 bswap_field_64(read_format);
780 bswap_field_32(wakeup_events);
781 bswap_field_32(bp_type);
782 bswap_field_64(bp_addr);
783 bswap_field_64(bp_len);
784 bswap_field_64(branch_sample_type);
785 bswap_field_64(sample_regs_user);
786 bswap_field_32(sample_stack_user);
787 bswap_field_32(aux_watermark);
788 bswap_field_16(sample_max_stack);
789 bswap_field_32(aux_sample_size);
790
791
792
793
794
795 if (bswap_safe(read_format, 1))
796 swap_bitfield((u8 *) (&attr->read_format + 1),
797 sizeof(u64));
798#undef bswap_field_64
799#undef bswap_field_32
800#undef bswap_field
801#undef bswap_safe
802}
803
804static void perf_event__hdr_attr_swap(union perf_event *event,
805 bool sample_id_all __maybe_unused)
806{
807 size_t size;
808
809 perf_event__attr_swap(&event->attr.attr);
810
811 size = event->header.size;
812 size -= (void *)&event->attr.id - (void *)event;
813 mem_bswap_64(event->attr.id, size);
814}
815
816static void perf_event__event_update_swap(union perf_event *event,
817 bool sample_id_all __maybe_unused)
818{
819 event->event_update.type = bswap_64(event->event_update.type);
820 event->event_update.id = bswap_64(event->event_update.id);
821}
822
823static void perf_event__event_type_swap(union perf_event *event,
824 bool sample_id_all __maybe_unused)
825{
826 event->event_type.event_type.event_id =
827 bswap_64(event->event_type.event_type.event_id);
828}
829
830static void perf_event__tracing_data_swap(union perf_event *event,
831 bool sample_id_all __maybe_unused)
832{
833 event->tracing_data.size = bswap_32(event->tracing_data.size);
834}
835
836static void perf_event__auxtrace_info_swap(union perf_event *event,
837 bool sample_id_all __maybe_unused)
838{
839 size_t size;
840
841 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
842
843 size = event->header.size;
844 size -= (void *)&event->auxtrace_info.priv - (void *)event;
845 mem_bswap_64(event->auxtrace_info.priv, size);
846}
847
848static void perf_event__auxtrace_swap(union perf_event *event,
849 bool sample_id_all __maybe_unused)
850{
851 event->auxtrace.size = bswap_64(event->auxtrace.size);
852 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
853 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
854 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
855 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
856 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
857}
858
859static void perf_event__auxtrace_error_swap(union perf_event *event,
860 bool sample_id_all __maybe_unused)
861{
862 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
863 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
864 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
865 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
866 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
867 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
868 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
869 if (event->auxtrace_error.fmt)
870 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
871}
872
873static void perf_event__thread_map_swap(union perf_event *event,
874 bool sample_id_all __maybe_unused)
875{
876 unsigned i;
877
878 event->thread_map.nr = bswap_64(event->thread_map.nr);
879
880 for (i = 0; i < event->thread_map.nr; i++)
881 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
882}
883
884static void perf_event__cpu_map_swap(union perf_event *event,
885 bool sample_id_all __maybe_unused)
886{
887 struct perf_record_cpu_map_data *data = &event->cpu_map.data;
888 struct cpu_map_entries *cpus;
889 struct perf_record_record_cpu_map *mask;
890 unsigned i;
891
892 data->type = bswap_64(data->type);
893
894 switch (data->type) {
895 case PERF_CPU_MAP__CPUS:
896 cpus = (struct cpu_map_entries *)data->data;
897
898 cpus->nr = bswap_16(cpus->nr);
899
900 for (i = 0; i < cpus->nr; i++)
901 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
902 break;
903 case PERF_CPU_MAP__MASK:
904 mask = (struct perf_record_record_cpu_map *)data->data;
905
906 mask->nr = bswap_16(mask->nr);
907 mask->long_size = bswap_16(mask->long_size);
908
909 switch (mask->long_size) {
910 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
911 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
912 default:
913 pr_err("cpu_map swap: unsupported long size\n");
914 }
915 default:
916 break;
917 }
918}
919
920static void perf_event__stat_config_swap(union perf_event *event,
921 bool sample_id_all __maybe_unused)
922{
923 u64 size;
924
925 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
926 size += 1;
927 mem_bswap_64(&event->stat_config.nr, size);
928}
929
930static void perf_event__stat_swap(union perf_event *event,
931 bool sample_id_all __maybe_unused)
932{
933 event->stat.id = bswap_64(event->stat.id);
934 event->stat.thread = bswap_32(event->stat.thread);
935 event->stat.cpu = bswap_32(event->stat.cpu);
936 event->stat.val = bswap_64(event->stat.val);
937 event->stat.ena = bswap_64(event->stat.ena);
938 event->stat.run = bswap_64(event->stat.run);
939}
940
941static void perf_event__stat_round_swap(union perf_event *event,
942 bool sample_id_all __maybe_unused)
943{
944 event->stat_round.type = bswap_64(event->stat_round.type);
945 event->stat_round.time = bswap_64(event->stat_round.time);
946}
947
948typedef void (*perf_event__swap_op)(union perf_event *event,
949 bool sample_id_all);
950
951static perf_event__swap_op perf_event__swap_ops[] = {
952 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
953 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
954 [PERF_RECORD_COMM] = perf_event__comm_swap,
955 [PERF_RECORD_FORK] = perf_event__task_swap,
956 [PERF_RECORD_EXIT] = perf_event__task_swap,
957 [PERF_RECORD_LOST] = perf_event__all64_swap,
958 [PERF_RECORD_READ] = perf_event__read_swap,
959 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
960 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
961 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
962 [PERF_RECORD_AUX] = perf_event__aux_swap,
963 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
964 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
965 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
966 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
967 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
968 [PERF_RECORD_CGROUP] = perf_event__cgroup_swap,
969 [PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap,
970 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
971 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
972 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
973 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
974 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
975 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
976 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
977 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
978 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
979 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
980 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
981 [PERF_RECORD_STAT] = perf_event__stat_swap,
982 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
983 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
984 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
985 [PERF_RECORD_HEADER_MAX] = NULL,
986};
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027static int process_finished_round(struct perf_tool *tool __maybe_unused,
1028 union perf_event *event __maybe_unused,
1029 struct ordered_events *oe)
1030{
1031 if (dump_trace)
1032 fprintf(stdout, "\n");
1033 return ordered_events__flush(oe, OE_FLUSH__ROUND);
1034}
1035
1036int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1037 u64 timestamp, u64 file_offset)
1038{
1039 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
1040}
1041
1042static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1043{
1044 struct ip_callchain *callchain = sample->callchain;
1045 struct branch_stack *lbr_stack = sample->branch_stack;
1046 struct branch_entry *entries = perf_sample__branch_entries(sample);
1047 u64 kernel_callchain_nr = callchain->nr;
1048 unsigned int i;
1049
1050 for (i = 0; i < kernel_callchain_nr; i++) {
1051 if (callchain->ips[i] == PERF_CONTEXT_USER)
1052 break;
1053 }
1054
1055 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1056 u64 total_nr;
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 total_nr = i + 1 + lbr_stack->nr + 1;
1074 kernel_callchain_nr = i + 1;
1075
1076 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1077
1078 for (i = 0; i < kernel_callchain_nr; i++)
1079 printf("..... %2d: %016" PRIx64 "\n",
1080 i, callchain->ips[i]);
1081
1082 printf("..... %2d: %016" PRIx64 "\n",
1083 (int)(kernel_callchain_nr), entries[0].to);
1084 for (i = 0; i < lbr_stack->nr; i++)
1085 printf("..... %2d: %016" PRIx64 "\n",
1086 (int)(i + kernel_callchain_nr + 1), entries[i].from);
1087 }
1088}
1089
1090static void callchain__printf(struct evsel *evsel,
1091 struct perf_sample *sample)
1092{
1093 unsigned int i;
1094 struct ip_callchain *callchain = sample->callchain;
1095
1096 if (evsel__has_branch_callstack(evsel))
1097 callchain__lbr_callstack_printf(sample);
1098
1099 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1100
1101 for (i = 0; i < callchain->nr; i++)
1102 printf("..... %2d: %016" PRIx64 "\n",
1103 i, callchain->ips[i]);
1104}
1105
1106static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1107{
1108 struct branch_entry *entries = perf_sample__branch_entries(sample);
1109 uint64_t i;
1110
1111 printf("%s: nr:%" PRIu64 "\n",
1112 !callstack ? "... branch stack" : "... branch callstack",
1113 sample->branch_stack->nr);
1114
1115 for (i = 0; i < sample->branch_stack->nr; i++) {
1116 struct branch_entry *e = &entries[i];
1117
1118 if (!callstack) {
1119 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1120 i, e->from, e->to,
1121 (unsigned short)e->flags.cycles,
1122 e->flags.mispred ? "M" : " ",
1123 e->flags.predicted ? "P" : " ",
1124 e->flags.abort ? "A" : " ",
1125 e->flags.in_tx ? "T" : " ",
1126 (unsigned)e->flags.reserved);
1127 } else {
1128 printf("..... %2"PRIu64": %016" PRIx64 "\n",
1129 i, i > 0 ? e->from : e->to);
1130 }
1131 }
1132}
1133
1134static void regs_dump__printf(u64 mask, u64 *regs)
1135{
1136 unsigned rid, i = 0;
1137
1138 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1139 u64 val = regs[i++];
1140
1141 printf(".... %-5s 0x%016" PRIx64 "\n",
1142 perf_reg_name(rid), val);
1143 }
1144}
1145
1146static const char *regs_abi[] = {
1147 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1148 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1149 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1150};
1151
1152static inline const char *regs_dump_abi(struct regs_dump *d)
1153{
1154 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1155 return "unknown";
1156
1157 return regs_abi[d->abi];
1158}
1159
1160static void regs__printf(const char *type, struct regs_dump *regs)
1161{
1162 u64 mask = regs->mask;
1163
1164 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1165 type,
1166 mask,
1167 regs_dump_abi(regs));
1168
1169 regs_dump__printf(mask, regs->regs);
1170}
1171
1172static void regs_user__printf(struct perf_sample *sample)
1173{
1174 struct regs_dump *user_regs = &sample->user_regs;
1175
1176 if (user_regs->regs)
1177 regs__printf("user", user_regs);
1178}
1179
1180static void regs_intr__printf(struct perf_sample *sample)
1181{
1182 struct regs_dump *intr_regs = &sample->intr_regs;
1183
1184 if (intr_regs->regs)
1185 regs__printf("intr", intr_regs);
1186}
1187
1188static void stack_user__printf(struct stack_dump *dump)
1189{
1190 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1191 dump->size, dump->offset);
1192}
1193
1194static void perf_evlist__print_tstamp(struct evlist *evlist,
1195 union perf_event *event,
1196 struct perf_sample *sample)
1197{
1198 u64 sample_type = __evlist__combined_sample_type(evlist);
1199
1200 if (event->header.type != PERF_RECORD_SAMPLE &&
1201 !evlist__sample_id_all(evlist)) {
1202 fputs("-1 -1 ", stdout);
1203 return;
1204 }
1205
1206 if ((sample_type & PERF_SAMPLE_CPU))
1207 printf("%u ", sample->cpu);
1208
1209 if (sample_type & PERF_SAMPLE_TIME)
1210 printf("%" PRIu64 " ", sample->time);
1211}
1212
1213static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1214{
1215 printf("... sample_read:\n");
1216
1217 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1218 printf("...... time enabled %016" PRIx64 "\n",
1219 sample->read.time_enabled);
1220
1221 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1222 printf("...... time running %016" PRIx64 "\n",
1223 sample->read.time_running);
1224
1225 if (read_format & PERF_FORMAT_GROUP) {
1226 u64 i;
1227
1228 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1229
1230 for (i = 0; i < sample->read.group.nr; i++) {
1231 struct sample_read_value *value;
1232
1233 value = &sample->read.group.values[i];
1234 printf("..... id %016" PRIx64
1235 ", value %016" PRIx64 "\n",
1236 value->id, value->value);
1237 }
1238 } else
1239 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1240 sample->read.one.id, sample->read.one.value);
1241}
1242
1243static void dump_event(struct evlist *evlist, union perf_event *event,
1244 u64 file_offset, struct perf_sample *sample)
1245{
1246 if (!dump_trace)
1247 return;
1248
1249 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1250 file_offset, event->header.size, event->header.type);
1251
1252 trace_event(event);
1253 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1254 evlist->trace_event_sample_raw(evlist, event, sample);
1255
1256 if (sample)
1257 perf_evlist__print_tstamp(evlist, event, sample);
1258
1259 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1260 event->header.size, perf_event__name(event->header.type));
1261}
1262
1263static void dump_sample(struct evsel *evsel, union perf_event *event,
1264 struct perf_sample *sample)
1265{
1266 u64 sample_type;
1267
1268 if (!dump_trace)
1269 return;
1270
1271 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1272 event->header.misc, sample->pid, sample->tid, sample->ip,
1273 sample->period, sample->addr);
1274
1275 sample_type = evsel->core.attr.sample_type;
1276
1277 if (evsel__has_callchain(evsel))
1278 callchain__printf(evsel, sample);
1279
1280 if (evsel__has_br_stack(evsel))
1281 branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1282
1283 if (sample_type & PERF_SAMPLE_REGS_USER)
1284 regs_user__printf(sample);
1285
1286 if (sample_type & PERF_SAMPLE_REGS_INTR)
1287 regs_intr__printf(sample);
1288
1289 if (sample_type & PERF_SAMPLE_STACK_USER)
1290 stack_user__printf(&sample->user_stack);
1291
1292 if (sample_type & PERF_SAMPLE_WEIGHT)
1293 printf("... weight: %" PRIu64 "\n", sample->weight);
1294
1295 if (sample_type & PERF_SAMPLE_DATA_SRC)
1296 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1297
1298 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1299 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1300
1301 if (sample_type & PERF_SAMPLE_TRANSACTION)
1302 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1303
1304 if (sample_type & PERF_SAMPLE_READ)
1305 sample_read__printf(sample, evsel->core.attr.read_format);
1306}
1307
1308static void dump_read(struct evsel *evsel, union perf_event *event)
1309{
1310 struct perf_record_read *read_event = &event->read;
1311 u64 read_format;
1312
1313 if (!dump_trace)
1314 return;
1315
1316 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1317 evsel__name(evsel), event->read.value);
1318
1319 if (!evsel)
1320 return;
1321
1322 read_format = evsel->core.attr.read_format;
1323
1324 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1325 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1326
1327 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1328 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1329
1330 if (read_format & PERF_FORMAT_ID)
1331 printf("... id : %" PRI_lu64 "\n", read_event->id);
1332}
1333
1334static struct machine *machines__find_for_cpumode(struct machines *machines,
1335 union perf_event *event,
1336 struct perf_sample *sample)
1337{
1338 struct machine *machine;
1339
1340 if (perf_guest &&
1341 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1342 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1343 u32 pid;
1344
1345 if (event->header.type == PERF_RECORD_MMAP
1346 || event->header.type == PERF_RECORD_MMAP2)
1347 pid = event->mmap.pid;
1348 else
1349 pid = sample->pid;
1350
1351 machine = machines__find(machines, pid);
1352 if (!machine)
1353 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1354 return machine;
1355 }
1356
1357 return &machines->host;
1358}
1359
1360static int deliver_sample_value(struct evlist *evlist,
1361 struct perf_tool *tool,
1362 union perf_event *event,
1363 struct perf_sample *sample,
1364 struct sample_read_value *v,
1365 struct machine *machine)
1366{
1367 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1368 struct evsel *evsel;
1369
1370 if (sid) {
1371 sample->id = v->id;
1372 sample->period = v->value - sid->period;
1373 sid->period = v->value;
1374 }
1375
1376 if (!sid || sid->evsel == NULL) {
1377 ++evlist->stats.nr_unknown_id;
1378 return 0;
1379 }
1380
1381
1382
1383
1384
1385 if (!sample->period)
1386 return 0;
1387
1388 evsel = container_of(sid->evsel, struct evsel, core);
1389 return tool->sample(tool, event, sample, evsel, machine);
1390}
1391
1392static int deliver_sample_group(struct evlist *evlist,
1393 struct perf_tool *tool,
1394 union perf_event *event,
1395 struct perf_sample *sample,
1396 struct machine *machine)
1397{
1398 int ret = -EINVAL;
1399 u64 i;
1400
1401 for (i = 0; i < sample->read.group.nr; i++) {
1402 ret = deliver_sample_value(evlist, tool, event, sample,
1403 &sample->read.group.values[i],
1404 machine);
1405 if (ret)
1406 break;
1407 }
1408
1409 return ret;
1410}
1411
1412static int
1413 perf_evlist__deliver_sample(struct evlist *evlist,
1414 struct perf_tool *tool,
1415 union perf_event *event,
1416 struct perf_sample *sample,
1417 struct evsel *evsel,
1418 struct machine *machine)
1419{
1420
1421 u64 sample_type = evsel->core.attr.sample_type;
1422 u64 read_format = evsel->core.attr.read_format;
1423
1424
1425 if (!(sample_type & PERF_SAMPLE_READ))
1426 return tool->sample(tool, event, sample, evsel, machine);
1427
1428
1429 if (read_format & PERF_FORMAT_GROUP)
1430 return deliver_sample_group(evlist, tool, event, sample,
1431 machine);
1432 else
1433 return deliver_sample_value(evlist, tool, event, sample,
1434 &sample->read.one, machine);
1435}
1436
1437static int machines__deliver_event(struct machines *machines,
1438 struct evlist *evlist,
1439 union perf_event *event,
1440 struct perf_sample *sample,
1441 struct perf_tool *tool, u64 file_offset)
1442{
1443 struct evsel *evsel;
1444 struct machine *machine;
1445
1446 dump_event(evlist, event, file_offset, sample);
1447
1448 evsel = perf_evlist__id2evsel(evlist, sample->id);
1449
1450 machine = machines__find_for_cpumode(machines, event, sample);
1451
1452 switch (event->header.type) {
1453 case PERF_RECORD_SAMPLE:
1454 if (evsel == NULL) {
1455 ++evlist->stats.nr_unknown_id;
1456 return 0;
1457 }
1458 dump_sample(evsel, event, sample);
1459 if (machine == NULL) {
1460 ++evlist->stats.nr_unprocessable_samples;
1461 return 0;
1462 }
1463 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1464 case PERF_RECORD_MMAP:
1465 return tool->mmap(tool, event, sample, machine);
1466 case PERF_RECORD_MMAP2:
1467 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1468 ++evlist->stats.nr_proc_map_timeout;
1469 return tool->mmap2(tool, event, sample, machine);
1470 case PERF_RECORD_COMM:
1471 return tool->comm(tool, event, sample, machine);
1472 case PERF_RECORD_NAMESPACES:
1473 return tool->namespaces(tool, event, sample, machine);
1474 case PERF_RECORD_CGROUP:
1475 return tool->cgroup(tool, event, sample, machine);
1476 case PERF_RECORD_FORK:
1477 return tool->fork(tool, event, sample, machine);
1478 case PERF_RECORD_EXIT:
1479 return tool->exit(tool, event, sample, machine);
1480 case PERF_RECORD_LOST:
1481 if (tool->lost == perf_event__process_lost)
1482 evlist->stats.total_lost += event->lost.lost;
1483 return tool->lost(tool, event, sample, machine);
1484 case PERF_RECORD_LOST_SAMPLES:
1485 if (tool->lost_samples == perf_event__process_lost_samples)
1486 evlist->stats.total_lost_samples += event->lost_samples.lost;
1487 return tool->lost_samples(tool, event, sample, machine);
1488 case PERF_RECORD_READ:
1489 dump_read(evsel, event);
1490 return tool->read(tool, event, sample, evsel, machine);
1491 case PERF_RECORD_THROTTLE:
1492 return tool->throttle(tool, event, sample, machine);
1493 case PERF_RECORD_UNTHROTTLE:
1494 return tool->unthrottle(tool, event, sample, machine);
1495 case PERF_RECORD_AUX:
1496 if (tool->aux == perf_event__process_aux) {
1497 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1498 evlist->stats.total_aux_lost += 1;
1499 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1500 evlist->stats.total_aux_partial += 1;
1501 }
1502 return tool->aux(tool, event, sample, machine);
1503 case PERF_RECORD_ITRACE_START:
1504 return tool->itrace_start(tool, event, sample, machine);
1505 case PERF_RECORD_SWITCH:
1506 case PERF_RECORD_SWITCH_CPU_WIDE:
1507 return tool->context_switch(tool, event, sample, machine);
1508 case PERF_RECORD_KSYMBOL:
1509 return tool->ksymbol(tool, event, sample, machine);
1510 case PERF_RECORD_BPF_EVENT:
1511 return tool->bpf(tool, event, sample, machine);
1512 case PERF_RECORD_TEXT_POKE:
1513 return tool->text_poke(tool, event, sample, machine);
1514 default:
1515 ++evlist->stats.nr_unknown_events;
1516 return -1;
1517 }
1518}
1519
1520static int perf_session__deliver_event(struct perf_session *session,
1521 union perf_event *event,
1522 struct perf_tool *tool,
1523 u64 file_offset)
1524{
1525 struct perf_sample sample;
1526 int ret;
1527
1528 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1529 if (ret) {
1530 pr_err("Can't parse sample, err = %d\n", ret);
1531 return ret;
1532 }
1533
1534 ret = auxtrace__process_event(session, event, &sample, tool);
1535 if (ret < 0)
1536 return ret;
1537 if (ret > 0)
1538 return 0;
1539
1540 ret = machines__deliver_event(&session->machines, session->evlist,
1541 event, &sample, tool, file_offset);
1542
1543 if (dump_trace && sample.aux_sample.size)
1544 auxtrace__dump_auxtrace_sample(session, &sample);
1545
1546 return ret;
1547}
1548
1549static s64 perf_session__process_user_event(struct perf_session *session,
1550 union perf_event *event,
1551 u64 file_offset)
1552{
1553 struct ordered_events *oe = &session->ordered_events;
1554 struct perf_tool *tool = session->tool;
1555 struct perf_sample sample = { .time = 0, };
1556 int fd = perf_data__fd(session->data);
1557 int err;
1558
1559 if (event->header.type != PERF_RECORD_COMPRESSED ||
1560 tool->compressed == perf_session__process_compressed_event_stub)
1561 dump_event(session->evlist, event, file_offset, &sample);
1562
1563
1564 switch (event->header.type) {
1565 case PERF_RECORD_HEADER_ATTR:
1566 err = tool->attr(tool, event, &session->evlist);
1567 if (err == 0) {
1568 perf_session__set_id_hdr_size(session);
1569 perf_session__set_comm_exec(session);
1570 }
1571 return err;
1572 case PERF_RECORD_EVENT_UPDATE:
1573 return tool->event_update(tool, event, &session->evlist);
1574 case PERF_RECORD_HEADER_EVENT_TYPE:
1575
1576
1577
1578
1579 return 0;
1580 case PERF_RECORD_HEADER_TRACING_DATA:
1581
1582
1583
1584
1585
1586 if (!perf_data__is_pipe(session->data))
1587 lseek(fd, file_offset, SEEK_SET);
1588 return tool->tracing_data(session, event);
1589 case PERF_RECORD_HEADER_BUILD_ID:
1590 return tool->build_id(session, event);
1591 case PERF_RECORD_FINISHED_ROUND:
1592 return tool->finished_round(tool, event, oe);
1593 case PERF_RECORD_ID_INDEX:
1594 return tool->id_index(session, event);
1595 case PERF_RECORD_AUXTRACE_INFO:
1596 return tool->auxtrace_info(session, event);
1597 case PERF_RECORD_AUXTRACE:
1598
1599 lseek(fd, file_offset + event->header.size, SEEK_SET);
1600 return tool->auxtrace(session, event);
1601 case PERF_RECORD_AUXTRACE_ERROR:
1602 perf_session__auxtrace_error_inc(session, event);
1603 return tool->auxtrace_error(session, event);
1604 case PERF_RECORD_THREAD_MAP:
1605 return tool->thread_map(session, event);
1606 case PERF_RECORD_CPU_MAP:
1607 return tool->cpu_map(session, event);
1608 case PERF_RECORD_STAT_CONFIG:
1609 return tool->stat_config(session, event);
1610 case PERF_RECORD_STAT:
1611 return tool->stat(session, event);
1612 case PERF_RECORD_STAT_ROUND:
1613 return tool->stat_round(session, event);
1614 case PERF_RECORD_TIME_CONV:
1615 session->time_conv = event->time_conv;
1616 return tool->time_conv(session, event);
1617 case PERF_RECORD_HEADER_FEATURE:
1618 return tool->feature(session, event);
1619 case PERF_RECORD_COMPRESSED:
1620 err = tool->compressed(session, event, file_offset);
1621 if (err)
1622 dump_event(session->evlist, event, file_offset, &sample);
1623 return err;
1624 default:
1625 return -EINVAL;
1626 }
1627}
1628
1629int perf_session__deliver_synth_event(struct perf_session *session,
1630 union perf_event *event,
1631 struct perf_sample *sample)
1632{
1633 struct evlist *evlist = session->evlist;
1634 struct perf_tool *tool = session->tool;
1635
1636 events_stats__inc(&evlist->stats, event->header.type);
1637
1638 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1639 return perf_session__process_user_event(session, event, 0);
1640
1641 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1642}
1643
1644static void event_swap(union perf_event *event, bool sample_id_all)
1645{
1646 perf_event__swap_op swap;
1647
1648 swap = perf_event__swap_ops[event->header.type];
1649 if (swap)
1650 swap(event, sample_id_all);
1651}
1652
1653int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1654 void *buf, size_t buf_sz,
1655 union perf_event **event_ptr,
1656 struct perf_sample *sample)
1657{
1658 union perf_event *event;
1659 size_t hdr_sz, rest;
1660 int fd;
1661
1662 if (session->one_mmap && !session->header.needs_swap) {
1663 event = file_offset - session->one_mmap_offset +
1664 session->one_mmap_addr;
1665 goto out_parse_sample;
1666 }
1667
1668 if (perf_data__is_pipe(session->data))
1669 return -1;
1670
1671 fd = perf_data__fd(session->data);
1672 hdr_sz = sizeof(struct perf_event_header);
1673
1674 if (buf_sz < hdr_sz)
1675 return -1;
1676
1677 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1678 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1679 return -1;
1680
1681 event = (union perf_event *)buf;
1682
1683 if (session->header.needs_swap)
1684 perf_event_header__bswap(&event->header);
1685
1686 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1687 return -1;
1688
1689 rest = event->header.size - hdr_sz;
1690
1691 if (readn(fd, buf, rest) != (ssize_t)rest)
1692 return -1;
1693
1694 if (session->header.needs_swap)
1695 event_swap(event, evlist__sample_id_all(session->evlist));
1696
1697out_parse_sample:
1698
1699 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1700 perf_evlist__parse_sample(session->evlist, event, sample))
1701 return -1;
1702
1703 *event_ptr = event;
1704
1705 return 0;
1706}
1707
1708int perf_session__peek_events(struct perf_session *session, u64 offset,
1709 u64 size, peek_events_cb_t cb, void *data)
1710{
1711 u64 max_offset = offset + size;
1712 char buf[PERF_SAMPLE_MAX_SIZE];
1713 union perf_event *event;
1714 int err;
1715
1716 do {
1717 err = perf_session__peek_event(session, offset, buf,
1718 PERF_SAMPLE_MAX_SIZE, &event,
1719 NULL);
1720 if (err)
1721 return err;
1722
1723 err = cb(session, event, offset, data);
1724 if (err)
1725 return err;
1726
1727 offset += event->header.size;
1728 if (event->header.type == PERF_RECORD_AUXTRACE)
1729 offset += event->auxtrace.size;
1730
1731 } while (offset < max_offset);
1732
1733 return err;
1734}
1735
1736static s64 perf_session__process_event(struct perf_session *session,
1737 union perf_event *event, u64 file_offset)
1738{
1739 struct evlist *evlist = session->evlist;
1740 struct perf_tool *tool = session->tool;
1741 int ret;
1742
1743 if (session->header.needs_swap)
1744 event_swap(event, evlist__sample_id_all(evlist));
1745
1746 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1747 return -EINVAL;
1748
1749 events_stats__inc(&evlist->stats, event->header.type);
1750
1751 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1752 return perf_session__process_user_event(session, event, file_offset);
1753
1754 if (tool->ordered_events) {
1755 u64 timestamp = -1ULL;
1756
1757 ret = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp);
1758 if (ret && ret != -1)
1759 return ret;
1760
1761 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1762 if (ret != -ETIME)
1763 return ret;
1764 }
1765
1766 return perf_session__deliver_event(session, event, tool, file_offset);
1767}
1768
1769void perf_event_header__bswap(struct perf_event_header *hdr)
1770{
1771 hdr->type = bswap_32(hdr->type);
1772 hdr->misc = bswap_16(hdr->misc);
1773 hdr->size = bswap_16(hdr->size);
1774}
1775
1776struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1777{
1778 return machine__findnew_thread(&session->machines.host, -1, pid);
1779}
1780
1781
1782
1783
1784
1785
1786
1787
1788int perf_session__register_idle_thread(struct perf_session *session)
1789{
1790 struct thread *thread;
1791 int err = 0;
1792
1793 thread = machine__findnew_thread(&session->machines.host, 0, 0);
1794 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1795 pr_err("problem inserting idle task.\n");
1796 err = -1;
1797 }
1798
1799 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1800 pr_err("problem inserting idle task.\n");
1801 err = -1;
1802 }
1803
1804
1805 thread__put(thread);
1806 return err;
1807}
1808
1809static void
1810perf_session__warn_order(const struct perf_session *session)
1811{
1812 const struct ordered_events *oe = &session->ordered_events;
1813 struct evsel *evsel;
1814 bool should_warn = true;
1815
1816 evlist__for_each_entry(session->evlist, evsel) {
1817 if (evsel->core.attr.write_backward)
1818 should_warn = false;
1819 }
1820
1821 if (!should_warn)
1822 return;
1823 if (oe->nr_unordered_events != 0)
1824 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1825}
1826
1827static void perf_session__warn_about_errors(const struct perf_session *session)
1828{
1829 const struct events_stats *stats = &session->evlist->stats;
1830
1831 if (session->tool->lost == perf_event__process_lost &&
1832 stats->nr_events[PERF_RECORD_LOST] != 0) {
1833 ui__warning("Processed %d events and lost %d chunks!\n\n"
1834 "Check IO/CPU overload!\n\n",
1835 stats->nr_events[0],
1836 stats->nr_events[PERF_RECORD_LOST]);
1837 }
1838
1839 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1840 double drop_rate;
1841
1842 drop_rate = (double)stats->total_lost_samples /
1843 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1844 if (drop_rate > 0.05) {
1845 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1846 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1847 drop_rate * 100.0);
1848 }
1849 }
1850
1851 if (session->tool->aux == perf_event__process_aux &&
1852 stats->total_aux_lost != 0) {
1853 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1854 stats->total_aux_lost,
1855 stats->nr_events[PERF_RECORD_AUX]);
1856 }
1857
1858 if (session->tool->aux == perf_event__process_aux &&
1859 stats->total_aux_partial != 0) {
1860 bool vmm_exclusive = false;
1861
1862 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1863 &vmm_exclusive);
1864
1865 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1866 "Are you running a KVM guest in the background?%s\n\n",
1867 stats->total_aux_partial,
1868 stats->nr_events[PERF_RECORD_AUX],
1869 vmm_exclusive ?
1870 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1871 "will reduce the gaps to only guest's timeslices." :
1872 "");
1873 }
1874
1875 if (stats->nr_unknown_events != 0) {
1876 ui__warning("Found %u unknown events!\n\n"
1877 "Is this an older tool processing a perf.data "
1878 "file generated by a more recent tool?\n\n"
1879 "If that is not the case, consider "
1880 "reporting to linux-kernel@vger.kernel.org.\n\n",
1881 stats->nr_unknown_events);
1882 }
1883
1884 if (stats->nr_unknown_id != 0) {
1885 ui__warning("%u samples with id not present in the header\n",
1886 stats->nr_unknown_id);
1887 }
1888
1889 if (stats->nr_invalid_chains != 0) {
1890 ui__warning("Found invalid callchains!\n\n"
1891 "%u out of %u events were discarded for this reason.\n\n"
1892 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1893 stats->nr_invalid_chains,
1894 stats->nr_events[PERF_RECORD_SAMPLE]);
1895 }
1896
1897 if (stats->nr_unprocessable_samples != 0) {
1898 ui__warning("%u unprocessable samples recorded.\n"
1899 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1900 stats->nr_unprocessable_samples);
1901 }
1902
1903 perf_session__warn_order(session);
1904
1905 events_stats__auxtrace_error_warn(stats);
1906
1907 if (stats->nr_proc_map_timeout != 0) {
1908 ui__warning("%d map information files for pre-existing threads were\n"
1909 "not processed, if there are samples for addresses they\n"
1910 "will not be resolved, you may find out which are these\n"
1911 "threads by running with -v and redirecting the output\n"
1912 "to a file.\n"
1913 "The time limit to process proc map is too short?\n"
1914 "Increase it by --proc-map-timeout\n",
1915 stats->nr_proc_map_timeout);
1916 }
1917}
1918
1919static int perf_session__flush_thread_stack(struct thread *thread,
1920 void *p __maybe_unused)
1921{
1922 return thread_stack__flush(thread);
1923}
1924
1925static int perf_session__flush_thread_stacks(struct perf_session *session)
1926{
1927 return machines__for_each_thread(&session->machines,
1928 perf_session__flush_thread_stack,
1929 NULL);
1930}
1931
1932volatile int session_done;
1933
1934static int __perf_session__process_decomp_events(struct perf_session *session);
1935
1936static int __perf_session__process_pipe_events(struct perf_session *session)
1937{
1938 struct ordered_events *oe = &session->ordered_events;
1939 struct perf_tool *tool = session->tool;
1940 int fd = perf_data__fd(session->data);
1941 union perf_event *event;
1942 uint32_t size, cur_size = 0;
1943 void *buf = NULL;
1944 s64 skip = 0;
1945 u64 head;
1946 ssize_t err;
1947 void *p;
1948
1949 perf_tool__fill_defaults(tool);
1950
1951 head = 0;
1952 cur_size = sizeof(union perf_event);
1953
1954 buf = malloc(cur_size);
1955 if (!buf)
1956 return -errno;
1957 ordered_events__set_copy_on_queue(oe, true);
1958more:
1959 event = buf;
1960 err = readn(fd, event, sizeof(struct perf_event_header));
1961 if (err <= 0) {
1962 if (err == 0)
1963 goto done;
1964
1965 pr_err("failed to read event header\n");
1966 goto out_err;
1967 }
1968
1969 if (session->header.needs_swap)
1970 perf_event_header__bswap(&event->header);
1971
1972 size = event->header.size;
1973 if (size < sizeof(struct perf_event_header)) {
1974 pr_err("bad event header size\n");
1975 goto out_err;
1976 }
1977
1978 if (size > cur_size) {
1979 void *new = realloc(buf, size);
1980 if (!new) {
1981 pr_err("failed to allocate memory to read event\n");
1982 goto out_err;
1983 }
1984 buf = new;
1985 cur_size = size;
1986 event = buf;
1987 }
1988 p = event;
1989 p += sizeof(struct perf_event_header);
1990
1991 if (size - sizeof(struct perf_event_header)) {
1992 err = readn(fd, p, size - sizeof(struct perf_event_header));
1993 if (err <= 0) {
1994 if (err == 0) {
1995 pr_err("unexpected end of event stream\n");
1996 goto done;
1997 }
1998
1999 pr_err("failed to read event data\n");
2000 goto out_err;
2001 }
2002 }
2003
2004 if ((skip = perf_session__process_event(session, event, head)) < 0) {
2005 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2006 head, event->header.size, event->header.type);
2007 err = -EINVAL;
2008 goto out_err;
2009 }
2010
2011 head += size;
2012
2013 if (skip > 0)
2014 head += skip;
2015
2016 err = __perf_session__process_decomp_events(session);
2017 if (err)
2018 goto out_err;
2019
2020 if (!session_done())
2021 goto more;
2022done:
2023
2024 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2025 if (err)
2026 goto out_err;
2027 err = auxtrace__flush_events(session, tool);
2028 if (err)
2029 goto out_err;
2030 err = perf_session__flush_thread_stacks(session);
2031out_err:
2032 free(buf);
2033 if (!tool->no_warn)
2034 perf_session__warn_about_errors(session);
2035 ordered_events__free(&session->ordered_events);
2036 auxtrace__free_events(session);
2037 return err;
2038}
2039
2040static union perf_event *
2041prefetch_event(char *buf, u64 head, size_t mmap_size,
2042 bool needs_swap, union perf_event *error)
2043{
2044 union perf_event *event;
2045
2046
2047
2048
2049
2050 if (head + sizeof(event->header) > mmap_size)
2051 return NULL;
2052
2053 event = (union perf_event *)(buf + head);
2054 if (needs_swap)
2055 perf_event_header__bswap(&event->header);
2056
2057 if (head + event->header.size <= mmap_size)
2058 return event;
2059
2060
2061 if (needs_swap)
2062 perf_event_header__bswap(&event->header);
2063
2064 pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
2065 " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
2066
2067 return error;
2068}
2069
2070static union perf_event *
2071fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2072{
2073 return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2074}
2075
2076static union perf_event *
2077fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2078{
2079 return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2080}
2081
2082static int __perf_session__process_decomp_events(struct perf_session *session)
2083{
2084 s64 skip;
2085 u64 size, file_pos = 0;
2086 struct decomp *decomp = session->decomp_last;
2087
2088 if (!decomp)
2089 return 0;
2090
2091 while (decomp->head < decomp->size && !session_done()) {
2092 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2093 session->header.needs_swap);
2094
2095 if (!event)
2096 break;
2097
2098 size = event->header.size;
2099
2100 if (size < sizeof(struct perf_event_header) ||
2101 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
2102 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2103 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2104 return -EINVAL;
2105 }
2106
2107 if (skip)
2108 size += skip;
2109
2110 decomp->head += size;
2111 }
2112
2113 return 0;
2114}
2115
2116
2117
2118
2119
2120#if BITS_PER_LONG == 64
2121#define MMAP_SIZE ULLONG_MAX
2122#define NUM_MMAPS 1
2123#else
2124#define MMAP_SIZE (32 * 1024 * 1024ULL)
2125#define NUM_MMAPS 128
2126#endif
2127
2128struct reader;
2129
2130typedef s64 (*reader_cb_t)(struct perf_session *session,
2131 union perf_event *event,
2132 u64 file_offset);
2133
2134struct reader {
2135 int fd;
2136 u64 data_size;
2137 u64 data_offset;
2138 reader_cb_t process;
2139};
2140
2141static int
2142reader__process_events(struct reader *rd, struct perf_session *session,
2143 struct ui_progress *prog)
2144{
2145 u64 data_size = rd->data_size;
2146 u64 head, page_offset, file_offset, file_pos, size;
2147 int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2148 size_t mmap_size;
2149 char *buf, *mmaps[NUM_MMAPS];
2150 union perf_event *event;
2151 s64 skip;
2152
2153 page_offset = page_size * (rd->data_offset / page_size);
2154 file_offset = page_offset;
2155 head = rd->data_offset - page_offset;
2156
2157 ui_progress__init_size(prog, data_size, "Processing events...");
2158
2159 data_size += rd->data_offset;
2160
2161 mmap_size = MMAP_SIZE;
2162 if (mmap_size > data_size) {
2163 mmap_size = data_size;
2164 session->one_mmap = true;
2165 }
2166
2167 memset(mmaps, 0, sizeof(mmaps));
2168
2169 mmap_prot = PROT_READ;
2170 mmap_flags = MAP_SHARED;
2171
2172 if (session->header.needs_swap) {
2173 mmap_prot |= PROT_WRITE;
2174 mmap_flags = MAP_PRIVATE;
2175 }
2176remap:
2177 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2178 file_offset);
2179 if (buf == MAP_FAILED) {
2180 pr_err("failed to mmap file\n");
2181 err = -errno;
2182 goto out;
2183 }
2184 mmaps[map_idx] = buf;
2185 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2186 file_pos = file_offset + head;
2187 if (session->one_mmap) {
2188 session->one_mmap_addr = buf;
2189 session->one_mmap_offset = file_offset;
2190 }
2191
2192more:
2193 event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
2194 if (IS_ERR(event))
2195 return PTR_ERR(event);
2196
2197 if (!event) {
2198 if (mmaps[map_idx]) {
2199 munmap(mmaps[map_idx], mmap_size);
2200 mmaps[map_idx] = NULL;
2201 }
2202
2203 page_offset = page_size * (head / page_size);
2204 file_offset += page_offset;
2205 head -= page_offset;
2206 goto remap;
2207 }
2208
2209 size = event->header.size;
2210
2211 skip = -EINVAL;
2212
2213 if (size < sizeof(struct perf_event_header) ||
2214 (skip = rd->process(session, event, file_pos)) < 0) {
2215 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2216 file_offset + head, event->header.size,
2217 event->header.type, strerror(-skip));
2218 err = skip;
2219 goto out;
2220 }
2221
2222 if (skip)
2223 size += skip;
2224
2225 head += size;
2226 file_pos += size;
2227
2228 err = __perf_session__process_decomp_events(session);
2229 if (err)
2230 goto out;
2231
2232 ui_progress__update(prog, size);
2233
2234 if (session_done())
2235 goto out;
2236
2237 if (file_pos < data_size)
2238 goto more;
2239
2240out:
2241 return err;
2242}
2243
2244static s64 process_simple(struct perf_session *session,
2245 union perf_event *event,
2246 u64 file_offset)
2247{
2248 return perf_session__process_event(session, event, file_offset);
2249}
2250
2251static int __perf_session__process_events(struct perf_session *session)
2252{
2253 struct reader rd = {
2254 .fd = perf_data__fd(session->data),
2255 .data_size = session->header.data_size,
2256 .data_offset = session->header.data_offset,
2257 .process = process_simple,
2258 };
2259 struct ordered_events *oe = &session->ordered_events;
2260 struct perf_tool *tool = session->tool;
2261 struct ui_progress prog;
2262 int err;
2263
2264 perf_tool__fill_defaults(tool);
2265
2266 if (rd.data_size == 0)
2267 return -1;
2268
2269 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2270
2271 err = reader__process_events(&rd, session, &prog);
2272 if (err)
2273 goto out_err;
2274
2275 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2276 if (err)
2277 goto out_err;
2278 err = auxtrace__flush_events(session, tool);
2279 if (err)
2280 goto out_err;
2281 err = perf_session__flush_thread_stacks(session);
2282out_err:
2283 ui_progress__finish();
2284 if (!tool->no_warn)
2285 perf_session__warn_about_errors(session);
2286
2287
2288
2289
2290 ordered_events__reinit(&session->ordered_events);
2291 auxtrace__free_events(session);
2292 session->one_mmap = false;
2293 return err;
2294}
2295
2296int perf_session__process_events(struct perf_session *session)
2297{
2298 if (perf_session__register_idle_thread(session) < 0)
2299 return -ENOMEM;
2300
2301 if (perf_data__is_pipe(session->data))
2302 return __perf_session__process_pipe_events(session);
2303
2304 return __perf_session__process_events(session);
2305}
2306
2307bool perf_session__has_traces(struct perf_session *session, const char *msg)
2308{
2309 struct evsel *evsel;
2310
2311 evlist__for_each_entry(session->evlist, evsel) {
2312 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2313 return true;
2314 }
2315
2316 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2317 return false;
2318}
2319
2320int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2321{
2322 char *bracket;
2323 struct ref_reloc_sym *ref;
2324 struct kmap *kmap;
2325
2326 ref = zalloc(sizeof(struct ref_reloc_sym));
2327 if (ref == NULL)
2328 return -ENOMEM;
2329
2330 ref->name = strdup(symbol_name);
2331 if (ref->name == NULL) {
2332 free(ref);
2333 return -ENOMEM;
2334 }
2335
2336 bracket = strchr(ref->name, ']');
2337 if (bracket)
2338 *bracket = '\0';
2339
2340 ref->addr = addr;
2341
2342 kmap = map__kmap(map);
2343 if (kmap)
2344 kmap->ref_reloc_sym = ref;
2345
2346 return 0;
2347}
2348
2349size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2350{
2351 return machines__fprintf_dsos(&session->machines, fp);
2352}
2353
2354size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2355 bool (skip)(struct dso *dso, int parm), int parm)
2356{
2357 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2358}
2359
2360size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2361{
2362 size_t ret;
2363 const char *msg = "";
2364
2365 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2366 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2367
2368 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2369
2370 ret += events_stats__fprintf(&session->evlist->stats, fp);
2371 return ret;
2372}
2373
2374size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2375{
2376
2377
2378
2379
2380 return machine__fprintf(&session->machines.host, fp);
2381}
2382
2383struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2384 unsigned int type)
2385{
2386 struct evsel *pos;
2387
2388 evlist__for_each_entry(session->evlist, pos) {
2389 if (pos->core.attr.type == type)
2390 return pos;
2391 }
2392 return NULL;
2393}
2394
2395int perf_session__cpu_bitmap(struct perf_session *session,
2396 const char *cpu_list, unsigned long *cpu_bitmap)
2397{
2398 int i, err = -1;
2399 struct perf_cpu_map *map;
2400 int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
2401
2402 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2403 struct evsel *evsel;
2404
2405 evsel = perf_session__find_first_evtype(session, i);
2406 if (!evsel)
2407 continue;
2408
2409 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2410 pr_err("File does not contain CPU events. "
2411 "Remove -C option to proceed.\n");
2412 return -1;
2413 }
2414 }
2415
2416 map = perf_cpu_map__new(cpu_list);
2417 if (map == NULL) {
2418 pr_err("Invalid cpu_list\n");
2419 return -1;
2420 }
2421
2422 for (i = 0; i < map->nr; i++) {
2423 int cpu = map->map[i];
2424
2425 if (cpu >= nr_cpus) {
2426 pr_err("Requested CPU %d too large. "
2427 "Consider raising MAX_NR_CPUS\n", cpu);
2428 goto out_delete_map;
2429 }
2430
2431 set_bit(cpu, cpu_bitmap);
2432 }
2433
2434 err = 0;
2435
2436out_delete_map:
2437 perf_cpu_map__put(map);
2438 return err;
2439}
2440
2441void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2442 bool full)
2443{
2444 if (session == NULL || fp == NULL)
2445 return;
2446
2447 fprintf(fp, "# ========\n");
2448 perf_header__fprintf_info(session, fp, full);
2449 fprintf(fp, "# ========\n#\n");
2450}
2451
2452int perf_event__process_id_index(struct perf_session *session,
2453 union perf_event *event)
2454{
2455 struct evlist *evlist = session->evlist;
2456 struct perf_record_id_index *ie = &event->id_index;
2457 size_t i, nr, max_nr;
2458
2459 max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2460 sizeof(struct id_index_entry);
2461 nr = ie->nr;
2462 if (nr > max_nr)
2463 return -EINVAL;
2464
2465 if (dump_trace)
2466 fprintf(stdout, " nr: %zu\n", nr);
2467
2468 for (i = 0; i < nr; i++) {
2469 struct id_index_entry *e = &ie->entries[i];
2470 struct perf_sample_id *sid;
2471
2472 if (dump_trace) {
2473 fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2474 fprintf(stdout, " idx: %"PRI_lu64, e->idx);
2475 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu);
2476 fprintf(stdout, " tid: %"PRI_ld64"\n", e->tid);
2477 }
2478
2479 sid = perf_evlist__id2sid(evlist, e->id);
2480 if (!sid)
2481 return -ENOENT;
2482 sid->idx = e->idx;
2483 sid->cpu = e->cpu;
2484 sid->tid = e->tid;
2485 }
2486 return 0;
2487}
2488