1#include <linux/kernel.h>
2#include <traceevent/event-parse.h>
3
4#include <byteswap.h>
5#include <unistd.h>
6#include <sys/types.h>
7#include <sys/mman.h>
8
9#include "evlist.h"
10#include "evsel.h"
11#include "session.h"
12#include "tool.h"
13#include "sort.h"
14#include "util.h"
15#include "cpumap.h"
16#include "perf_regs.h"
17#include "asm/bug.h"
18#include "auxtrace.h"
19#include "thread-stack.h"
20#include "stat.h"
21
22static int perf_session__deliver_event(struct perf_session *session,
23 union perf_event *event,
24 struct perf_sample *sample,
25 struct perf_tool *tool,
26 u64 file_offset);
27
28static int perf_session__open(struct perf_session *session)
29{
30 struct perf_data_file *file = session->file;
31
32 if (perf_session__read_header(session) < 0) {
33 pr_err("incompatible file format (rerun with -v to learn more)\n");
34 return -1;
35 }
36
37 if (perf_data_file__is_pipe(file))
38 return 0;
39
40 if (perf_header__has_feat(&session->header, HEADER_STAT))
41 return 0;
42
43 if (!perf_evlist__valid_sample_type(session->evlist)) {
44 pr_err("non matching sample_type\n");
45 return -1;
46 }
47
48 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
49 pr_err("non matching sample_id_all\n");
50 return -1;
51 }
52
53 if (!perf_evlist__valid_read_format(session->evlist)) {
54 pr_err("non matching read_format\n");
55 return -1;
56 }
57
58 return 0;
59}
60
61void perf_session__set_id_hdr_size(struct perf_session *session)
62{
63 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
64
65 machines__set_id_hdr_size(&session->machines, id_hdr_size);
66}
67
68int perf_session__create_kernel_maps(struct perf_session *session)
69{
70 int ret = machine__create_kernel_maps(&session->machines.host);
71
72 if (ret >= 0)
73 ret = machines__create_guest_kernel_maps(&session->machines);
74 return ret;
75}
76
77static void perf_session__destroy_kernel_maps(struct perf_session *session)
78{
79 machines__destroy_kernel_maps(&session->machines);
80}
81
82static bool perf_session__has_comm_exec(struct perf_session *session)
83{
84 struct perf_evsel *evsel;
85
86 evlist__for_each_entry(session->evlist, evsel) {
87 if (evsel->attr.comm_exec)
88 return true;
89 }
90
91 return false;
92}
93
94static void perf_session__set_comm_exec(struct perf_session *session)
95{
96 bool comm_exec = perf_session__has_comm_exec(session);
97
98 machines__set_comm_exec(&session->machines, comm_exec);
99}
100
101static int ordered_events__deliver_event(struct ordered_events *oe,
102 struct ordered_event *event)
103{
104 struct perf_sample sample;
105 struct perf_session *session = container_of(oe, struct perf_session,
106 ordered_events);
107 int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);
108
109 if (ret) {
110 pr_err("Can't parse sample, err = %d\n", ret);
111 return ret;
112 }
113
114 return perf_session__deliver_event(session, event->event, &sample,
115 session->tool, event->file_offset);
116}
117
118struct perf_session *perf_session__new(struct perf_data_file *file,
119 bool repipe, struct perf_tool *tool)
120{
121 struct perf_session *session = zalloc(sizeof(*session));
122
123 if (!session)
124 goto out;
125
126 session->repipe = repipe;
127 session->tool = tool;
128 INIT_LIST_HEAD(&session->auxtrace_index);
129 machines__init(&session->machines);
130 ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
131
132 if (file) {
133 if (perf_data_file__open(file))
134 goto out_delete;
135
136 session->file = file;
137
138 if (perf_data_file__is_read(file)) {
139 if (perf_session__open(session) < 0)
140 goto out_close;
141
142 perf_session__set_id_hdr_size(session);
143 perf_session__set_comm_exec(session);
144 }
145 } else {
146 session->machines.host.env = &perf_env;
147 }
148
149 if (!file || perf_data_file__is_write(file)) {
150
151
152
153
154 if (perf_session__create_kernel_maps(session) < 0)
155 pr_warning("Cannot read kernel map\n");
156 }
157
158 if (tool && tool->ordering_requires_timestamps &&
159 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
160 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
161 tool->ordered_events = false;
162 }
163
164 return session;
165
166 out_close:
167 perf_data_file__close(file);
168 out_delete:
169 perf_session__delete(session);
170 out:
171 return NULL;
172}
173
174static void perf_session__delete_threads(struct perf_session *session)
175{
176 machine__delete_threads(&session->machines.host);
177}
178
179void perf_session__delete(struct perf_session *session)
180{
181 if (session == NULL)
182 return;
183 auxtrace__free(session);
184 auxtrace_index__free(&session->auxtrace_index);
185 perf_session__destroy_kernel_maps(session);
186 perf_session__delete_threads(session);
187 perf_env__exit(&session->header.env);
188 machines__exit(&session->machines);
189 if (session->file)
190 perf_data_file__close(session->file);
191 free(session);
192}
193
194static int process_event_synth_tracing_data_stub(struct perf_tool *tool
195 __maybe_unused,
196 union perf_event *event
197 __maybe_unused,
198 struct perf_session *session
199 __maybe_unused)
200{
201 dump_printf(": unhandled!\n");
202 return 0;
203}
204
205static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
206 union perf_event *event __maybe_unused,
207 struct perf_evlist **pevlist
208 __maybe_unused)
209{
210 dump_printf(": unhandled!\n");
211 return 0;
212}
213
214static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
215 union perf_event *event __maybe_unused,
216 struct perf_evlist **pevlist
217 __maybe_unused)
218{
219 if (dump_trace)
220 perf_event__fprintf_event_update(event, stdout);
221
222 dump_printf(": unhandled!\n");
223 return 0;
224}
225
226static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
227 union perf_event *event __maybe_unused,
228 struct perf_sample *sample __maybe_unused,
229 struct perf_evsel *evsel __maybe_unused,
230 struct machine *machine __maybe_unused)
231{
232 dump_printf(": unhandled!\n");
233 return 0;
234}
235
236static int process_event_stub(struct perf_tool *tool __maybe_unused,
237 union perf_event *event __maybe_unused,
238 struct perf_sample *sample __maybe_unused,
239 struct machine *machine __maybe_unused)
240{
241 dump_printf(": unhandled!\n");
242 return 0;
243}
244
245static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
246 union perf_event *event __maybe_unused,
247 struct ordered_events *oe __maybe_unused)
248{
249 dump_printf(": unhandled!\n");
250 return 0;
251}
252
253static int process_finished_round(struct perf_tool *tool,
254 union perf_event *event,
255 struct ordered_events *oe);
256
257static int skipn(int fd, off_t n)
258{
259 char buf[4096];
260 ssize_t ret;
261
262 while (n > 0) {
263 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
264 if (ret <= 0)
265 return ret;
266 n -= ret;
267 }
268
269 return 0;
270}
271
272static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
273 union perf_event *event,
274 struct perf_session *session
275 __maybe_unused)
276{
277 dump_printf(": unhandled!\n");
278 if (perf_data_file__is_pipe(session->file))
279 skipn(perf_data_file__fd(session->file), event->auxtrace.size);
280 return event->auxtrace.size;
281}
282
283static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
284 union perf_event *event __maybe_unused,
285 struct perf_session *session __maybe_unused)
286{
287 dump_printf(": unhandled!\n");
288 return 0;
289}
290
291
292static
293int process_event_thread_map_stub(struct perf_tool *tool __maybe_unused,
294 union perf_event *event __maybe_unused,
295 struct perf_session *session __maybe_unused)
296{
297 if (dump_trace)
298 perf_event__fprintf_thread_map(event, stdout);
299
300 dump_printf(": unhandled!\n");
301 return 0;
302}
303
304static
305int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
306 union perf_event *event __maybe_unused,
307 struct perf_session *session __maybe_unused)
308{
309 if (dump_trace)
310 perf_event__fprintf_cpu_map(event, stdout);
311
312 dump_printf(": unhandled!\n");
313 return 0;
314}
315
316static
317int process_event_stat_config_stub(struct perf_tool *tool __maybe_unused,
318 union perf_event *event __maybe_unused,
319 struct perf_session *session __maybe_unused)
320{
321 if (dump_trace)
322 perf_event__fprintf_stat_config(event, stdout);
323
324 dump_printf(": unhandled!\n");
325 return 0;
326}
327
328static int process_stat_stub(struct perf_tool *tool __maybe_unused,
329 union perf_event *event __maybe_unused,
330 struct perf_session *perf_session
331 __maybe_unused)
332{
333 if (dump_trace)
334 perf_event__fprintf_stat(event, stdout);
335
336 dump_printf(": unhandled!\n");
337 return 0;
338}
339
340static int process_stat_round_stub(struct perf_tool *tool __maybe_unused,
341 union perf_event *event __maybe_unused,
342 struct perf_session *perf_session
343 __maybe_unused)
344{
345 if (dump_trace)
346 perf_event__fprintf_stat_round(event, stdout);
347
348 dump_printf(": unhandled!\n");
349 return 0;
350}
351
352void perf_tool__fill_defaults(struct perf_tool *tool)
353{
354 if (tool->sample == NULL)
355 tool->sample = process_event_sample_stub;
356 if (tool->mmap == NULL)
357 tool->mmap = process_event_stub;
358 if (tool->mmap2 == NULL)
359 tool->mmap2 = process_event_stub;
360 if (tool->comm == NULL)
361 tool->comm = process_event_stub;
362 if (tool->fork == NULL)
363 tool->fork = process_event_stub;
364 if (tool->exit == NULL)
365 tool->exit = process_event_stub;
366 if (tool->lost == NULL)
367 tool->lost = perf_event__process_lost;
368 if (tool->lost_samples == NULL)
369 tool->lost_samples = perf_event__process_lost_samples;
370 if (tool->aux == NULL)
371 tool->aux = perf_event__process_aux;
372 if (tool->itrace_start == NULL)
373 tool->itrace_start = perf_event__process_itrace_start;
374 if (tool->context_switch == NULL)
375 tool->context_switch = perf_event__process_switch;
376 if (tool->read == NULL)
377 tool->read = process_event_sample_stub;
378 if (tool->throttle == NULL)
379 tool->throttle = process_event_stub;
380 if (tool->unthrottle == NULL)
381 tool->unthrottle = process_event_stub;
382 if (tool->attr == NULL)
383 tool->attr = process_event_synth_attr_stub;
384 if (tool->event_update == NULL)
385 tool->event_update = process_event_synth_event_update_stub;
386 if (tool->tracing_data == NULL)
387 tool->tracing_data = process_event_synth_tracing_data_stub;
388 if (tool->build_id == NULL)
389 tool->build_id = process_event_op2_stub;
390 if (tool->finished_round == NULL) {
391 if (tool->ordered_events)
392 tool->finished_round = process_finished_round;
393 else
394 tool->finished_round = process_finished_round_stub;
395 }
396 if (tool->id_index == NULL)
397 tool->id_index = process_event_op2_stub;
398 if (tool->auxtrace_info == NULL)
399 tool->auxtrace_info = process_event_op2_stub;
400 if (tool->auxtrace == NULL)
401 tool->auxtrace = process_event_auxtrace_stub;
402 if (tool->auxtrace_error == NULL)
403 tool->auxtrace_error = process_event_op2_stub;
404 if (tool->thread_map == NULL)
405 tool->thread_map = process_event_thread_map_stub;
406 if (tool->cpu_map == NULL)
407 tool->cpu_map = process_event_cpu_map_stub;
408 if (tool->stat_config == NULL)
409 tool->stat_config = process_event_stat_config_stub;
410 if (tool->stat == NULL)
411 tool->stat = process_stat_stub;
412 if (tool->stat_round == NULL)
413 tool->stat_round = process_stat_round_stub;
414 if (tool->time_conv == NULL)
415 tool->time_conv = process_event_op2_stub;
416}
417
418static void swap_sample_id_all(union perf_event *event, void *data)
419{
420 void *end = (void *) event + event->header.size;
421 int size = end - data;
422
423 BUG_ON(size % sizeof(u64));
424 mem_bswap_64(data, size);
425}
426
427static void perf_event__all64_swap(union perf_event *event,
428 bool sample_id_all __maybe_unused)
429{
430 struct perf_event_header *hdr = &event->header;
431 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
432}
433
434static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
435{
436 event->comm.pid = bswap_32(event->comm.pid);
437 event->comm.tid = bswap_32(event->comm.tid);
438
439 if (sample_id_all) {
440 void *data = &event->comm.comm;
441
442 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
443 swap_sample_id_all(event, data);
444 }
445}
446
447static void perf_event__mmap_swap(union perf_event *event,
448 bool sample_id_all)
449{
450 event->mmap.pid = bswap_32(event->mmap.pid);
451 event->mmap.tid = bswap_32(event->mmap.tid);
452 event->mmap.start = bswap_64(event->mmap.start);
453 event->mmap.len = bswap_64(event->mmap.len);
454 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
455
456 if (sample_id_all) {
457 void *data = &event->mmap.filename;
458
459 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
460 swap_sample_id_all(event, data);
461 }
462}
463
464static void perf_event__mmap2_swap(union perf_event *event,
465 bool sample_id_all)
466{
467 event->mmap2.pid = bswap_32(event->mmap2.pid);
468 event->mmap2.tid = bswap_32(event->mmap2.tid);
469 event->mmap2.start = bswap_64(event->mmap2.start);
470 event->mmap2.len = bswap_64(event->mmap2.len);
471 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
472 event->mmap2.maj = bswap_32(event->mmap2.maj);
473 event->mmap2.min = bswap_32(event->mmap2.min);
474 event->mmap2.ino = bswap_64(event->mmap2.ino);
475
476 if (sample_id_all) {
477 void *data = &event->mmap2.filename;
478
479 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
480 swap_sample_id_all(event, data);
481 }
482}
483static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
484{
485 event->fork.pid = bswap_32(event->fork.pid);
486 event->fork.tid = bswap_32(event->fork.tid);
487 event->fork.ppid = bswap_32(event->fork.ppid);
488 event->fork.ptid = bswap_32(event->fork.ptid);
489 event->fork.time = bswap_64(event->fork.time);
490
491 if (sample_id_all)
492 swap_sample_id_all(event, &event->fork + 1);
493}
494
495static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
496{
497 event->read.pid = bswap_32(event->read.pid);
498 event->read.tid = bswap_32(event->read.tid);
499 event->read.value = bswap_64(event->read.value);
500 event->read.time_enabled = bswap_64(event->read.time_enabled);
501 event->read.time_running = bswap_64(event->read.time_running);
502 event->read.id = bswap_64(event->read.id);
503
504 if (sample_id_all)
505 swap_sample_id_all(event, &event->read + 1);
506}
507
508static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
509{
510 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
511 event->aux.aux_size = bswap_64(event->aux.aux_size);
512 event->aux.flags = bswap_64(event->aux.flags);
513
514 if (sample_id_all)
515 swap_sample_id_all(event, &event->aux + 1);
516}
517
518static void perf_event__itrace_start_swap(union perf_event *event,
519 bool sample_id_all)
520{
521 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
522 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
523
524 if (sample_id_all)
525 swap_sample_id_all(event, &event->itrace_start + 1);
526}
527
528static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
529{
530 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
531 event->context_switch.next_prev_pid =
532 bswap_32(event->context_switch.next_prev_pid);
533 event->context_switch.next_prev_tid =
534 bswap_32(event->context_switch.next_prev_tid);
535 }
536
537 if (sample_id_all)
538 swap_sample_id_all(event, &event->context_switch + 1);
539}
540
541static void perf_event__throttle_swap(union perf_event *event,
542 bool sample_id_all)
543{
544 event->throttle.time = bswap_64(event->throttle.time);
545 event->throttle.id = bswap_64(event->throttle.id);
546 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
547
548 if (sample_id_all)
549 swap_sample_id_all(event, &event->throttle + 1);
550}
551
552static u8 revbyte(u8 b)
553{
554 int rev = (b >> 4) | ((b & 0xf) << 4);
555 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
556 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
557 return (u8) rev;
558}
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574static void swap_bitfield(u8 *p, unsigned len)
575{
576 unsigned i;
577
578 for (i = 0; i < len; i++) {
579 *p = revbyte(*p);
580 p++;
581 }
582}
583
584
585void perf_event__attr_swap(struct perf_event_attr *attr)
586{
587 attr->type = bswap_32(attr->type);
588 attr->size = bswap_32(attr->size);
589
590#define bswap_safe(f, n) \
591 (attr->size > (offsetof(struct perf_event_attr, f) + \
592 sizeof(attr->f) * (n)))
593#define bswap_field(f, sz) \
594do { \
595 if (bswap_safe(f, 0)) \
596 attr->f = bswap_##sz(attr->f); \
597} while(0)
598#define bswap_field_16(f) bswap_field(f, 16)
599#define bswap_field_32(f) bswap_field(f, 32)
600#define bswap_field_64(f) bswap_field(f, 64)
601
602 bswap_field_64(config);
603 bswap_field_64(sample_period);
604 bswap_field_64(sample_type);
605 bswap_field_64(read_format);
606 bswap_field_32(wakeup_events);
607 bswap_field_32(bp_type);
608 bswap_field_64(bp_addr);
609 bswap_field_64(bp_len);
610 bswap_field_64(branch_sample_type);
611 bswap_field_64(sample_regs_user);
612 bswap_field_32(sample_stack_user);
613 bswap_field_32(aux_watermark);
614 bswap_field_16(sample_max_stack);
615
616
617
618
619
620 if (bswap_safe(read_format, 1))
621 swap_bitfield((u8 *) (&attr->read_format + 1),
622 sizeof(u64));
623#undef bswap_field_64
624#undef bswap_field_32
625#undef bswap_field
626#undef bswap_safe
627}
628
629static void perf_event__hdr_attr_swap(union perf_event *event,
630 bool sample_id_all __maybe_unused)
631{
632 size_t size;
633
634 perf_event__attr_swap(&event->attr.attr);
635
636 size = event->header.size;
637 size -= (void *)&event->attr.id - (void *)event;
638 mem_bswap_64(event->attr.id, size);
639}
640
641static void perf_event__event_update_swap(union perf_event *event,
642 bool sample_id_all __maybe_unused)
643{
644 event->event_update.type = bswap_64(event->event_update.type);
645 event->event_update.id = bswap_64(event->event_update.id);
646}
647
648static void perf_event__event_type_swap(union perf_event *event,
649 bool sample_id_all __maybe_unused)
650{
651 event->event_type.event_type.event_id =
652 bswap_64(event->event_type.event_type.event_id);
653}
654
655static void perf_event__tracing_data_swap(union perf_event *event,
656 bool sample_id_all __maybe_unused)
657{
658 event->tracing_data.size = bswap_32(event->tracing_data.size);
659}
660
661static void perf_event__auxtrace_info_swap(union perf_event *event,
662 bool sample_id_all __maybe_unused)
663{
664 size_t size;
665
666 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
667
668 size = event->header.size;
669 size -= (void *)&event->auxtrace_info.priv - (void *)event;
670 mem_bswap_64(event->auxtrace_info.priv, size);
671}
672
673static void perf_event__auxtrace_swap(union perf_event *event,
674 bool sample_id_all __maybe_unused)
675{
676 event->auxtrace.size = bswap_64(event->auxtrace.size);
677 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
678 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
679 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
680 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
681 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
682}
683
684static void perf_event__auxtrace_error_swap(union perf_event *event,
685 bool sample_id_all __maybe_unused)
686{
687 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
688 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
689 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
690 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
691 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
692 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
693}
694
695static void perf_event__thread_map_swap(union perf_event *event,
696 bool sample_id_all __maybe_unused)
697{
698 unsigned i;
699
700 event->thread_map.nr = bswap_64(event->thread_map.nr);
701
702 for (i = 0; i < event->thread_map.nr; i++)
703 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
704}
705
706static void perf_event__cpu_map_swap(union perf_event *event,
707 bool sample_id_all __maybe_unused)
708{
709 struct cpu_map_data *data = &event->cpu_map.data;
710 struct cpu_map_entries *cpus;
711 struct cpu_map_mask *mask;
712 unsigned i;
713
714 data->type = bswap_64(data->type);
715
716 switch (data->type) {
717 case PERF_CPU_MAP__CPUS:
718 cpus = (struct cpu_map_entries *)data->data;
719
720 cpus->nr = bswap_16(cpus->nr);
721
722 for (i = 0; i < cpus->nr; i++)
723 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
724 break;
725 case PERF_CPU_MAP__MASK:
726 mask = (struct cpu_map_mask *) data->data;
727
728 mask->nr = bswap_16(mask->nr);
729 mask->long_size = bswap_16(mask->long_size);
730
731 switch (mask->long_size) {
732 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
733 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
734 default:
735 pr_err("cpu_map swap: unsupported long size\n");
736 }
737 default:
738 break;
739 }
740}
741
742static void perf_event__stat_config_swap(union perf_event *event,
743 bool sample_id_all __maybe_unused)
744{
745 u64 size;
746
747 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
748 size += 1;
749 mem_bswap_64(&event->stat_config.nr, size);
750}
751
752static void perf_event__stat_swap(union perf_event *event,
753 bool sample_id_all __maybe_unused)
754{
755 event->stat.id = bswap_64(event->stat.id);
756 event->stat.thread = bswap_32(event->stat.thread);
757 event->stat.cpu = bswap_32(event->stat.cpu);
758 event->stat.val = bswap_64(event->stat.val);
759 event->stat.ena = bswap_64(event->stat.ena);
760 event->stat.run = bswap_64(event->stat.run);
761}
762
763static void perf_event__stat_round_swap(union perf_event *event,
764 bool sample_id_all __maybe_unused)
765{
766 event->stat_round.type = bswap_64(event->stat_round.type);
767 event->stat_round.time = bswap_64(event->stat_round.time);
768}
769
770typedef void (*perf_event__swap_op)(union perf_event *event,
771 bool sample_id_all);
772
773static perf_event__swap_op perf_event__swap_ops[] = {
774 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
775 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
776 [PERF_RECORD_COMM] = perf_event__comm_swap,
777 [PERF_RECORD_FORK] = perf_event__task_swap,
778 [PERF_RECORD_EXIT] = perf_event__task_swap,
779 [PERF_RECORD_LOST] = perf_event__all64_swap,
780 [PERF_RECORD_READ] = perf_event__read_swap,
781 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
782 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
783 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
784 [PERF_RECORD_AUX] = perf_event__aux_swap,
785 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
786 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
787 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
788 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
789 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
790 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
791 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
792 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
793 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
794 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
795 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
796 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
797 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
798 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
799 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
800 [PERF_RECORD_STAT] = perf_event__stat_swap,
801 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
802 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
803 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
804 [PERF_RECORD_HEADER_MAX] = NULL,
805};
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846static int process_finished_round(struct perf_tool *tool __maybe_unused,
847 union perf_event *event __maybe_unused,
848 struct ordered_events *oe)
849{
850 if (dump_trace)
851 fprintf(stdout, "\n");
852 return ordered_events__flush(oe, OE_FLUSH__ROUND);
853}
854
855int perf_session__queue_event(struct perf_session *s, union perf_event *event,
856 struct perf_sample *sample, u64 file_offset)
857{
858 return ordered_events__queue(&s->ordered_events, event, sample, file_offset);
859}
860
861static void callchain__lbr_callstack_printf(struct perf_sample *sample)
862{
863 struct ip_callchain *callchain = sample->callchain;
864 struct branch_stack *lbr_stack = sample->branch_stack;
865 u64 kernel_callchain_nr = callchain->nr;
866 unsigned int i;
867
868 for (i = 0; i < kernel_callchain_nr; i++) {
869 if (callchain->ips[i] == PERF_CONTEXT_USER)
870 break;
871 }
872
873 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
874 u64 total_nr;
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891 total_nr = i + 1 + lbr_stack->nr + 1;
892 kernel_callchain_nr = i + 1;
893
894 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
895
896 for (i = 0; i < kernel_callchain_nr; i++)
897 printf("..... %2d: %016" PRIx64 "\n",
898 i, callchain->ips[i]);
899
900 printf("..... %2d: %016" PRIx64 "\n",
901 (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
902 for (i = 0; i < lbr_stack->nr; i++)
903 printf("..... %2d: %016" PRIx64 "\n",
904 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
905 }
906}
907
908static void callchain__printf(struct perf_evsel *evsel,
909 struct perf_sample *sample)
910{
911 unsigned int i;
912 struct ip_callchain *callchain = sample->callchain;
913
914 if (perf_evsel__has_branch_callstack(evsel))
915 callchain__lbr_callstack_printf(sample);
916
917 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
918
919 for (i = 0; i < callchain->nr; i++)
920 printf("..... %2d: %016" PRIx64 "\n",
921 i, callchain->ips[i]);
922}
923
924static void branch_stack__printf(struct perf_sample *sample)
925{
926 uint64_t i;
927
928 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
929
930 for (i = 0; i < sample->branch_stack->nr; i++) {
931 struct branch_entry *e = &sample->branch_stack->entries[i];
932
933 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
934 i, e->from, e->to,
935 e->flags.cycles,
936 e->flags.mispred ? "M" : " ",
937 e->flags.predicted ? "P" : " ",
938 e->flags.abort ? "A" : " ",
939 e->flags.in_tx ? "T" : " ",
940 (unsigned)e->flags.reserved);
941 }
942}
943
944static void regs_dump__printf(u64 mask, u64 *regs)
945{
946 unsigned rid, i = 0;
947
948 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
949 u64 val = regs[i++];
950
951 printf(".... %-5s 0x%" PRIx64 "\n",
952 perf_reg_name(rid), val);
953 }
954}
955
956static const char *regs_abi[] = {
957 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
958 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
959 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
960};
961
962static inline const char *regs_dump_abi(struct regs_dump *d)
963{
964 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
965 return "unknown";
966
967 return regs_abi[d->abi];
968}
969
970static void regs__printf(const char *type, struct regs_dump *regs)
971{
972 u64 mask = regs->mask;
973
974 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
975 type,
976 mask,
977 regs_dump_abi(regs));
978
979 regs_dump__printf(mask, regs->regs);
980}
981
982static void regs_user__printf(struct perf_sample *sample)
983{
984 struct regs_dump *user_regs = &sample->user_regs;
985
986 if (user_regs->regs)
987 regs__printf("user", user_regs);
988}
989
990static void regs_intr__printf(struct perf_sample *sample)
991{
992 struct regs_dump *intr_regs = &sample->intr_regs;
993
994 if (intr_regs->regs)
995 regs__printf("intr", intr_regs);
996}
997
998static void stack_user__printf(struct stack_dump *dump)
999{
1000 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1001 dump->size, dump->offset);
1002}
1003
1004static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
1005 union perf_event *event,
1006 struct perf_sample *sample)
1007{
1008 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1009
1010 if (event->header.type != PERF_RECORD_SAMPLE &&
1011 !perf_evlist__sample_id_all(evlist)) {
1012 fputs("-1 -1 ", stdout);
1013 return;
1014 }
1015
1016 if ((sample_type & PERF_SAMPLE_CPU))
1017 printf("%u ", sample->cpu);
1018
1019 if (sample_type & PERF_SAMPLE_TIME)
1020 printf("%" PRIu64 " ", sample->time);
1021}
1022
1023static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1024{
1025 printf("... sample_read:\n");
1026
1027 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1028 printf("...... time enabled %016" PRIx64 "\n",
1029 sample->read.time_enabled);
1030
1031 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1032 printf("...... time running %016" PRIx64 "\n",
1033 sample->read.time_running);
1034
1035 if (read_format & PERF_FORMAT_GROUP) {
1036 u64 i;
1037
1038 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1039
1040 for (i = 0; i < sample->read.group.nr; i++) {
1041 struct sample_read_value *value;
1042
1043 value = &sample->read.group.values[i];
1044 printf("..... id %016" PRIx64
1045 ", value %016" PRIx64 "\n",
1046 value->id, value->value);
1047 }
1048 } else
1049 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1050 sample->read.one.id, sample->read.one.value);
1051}
1052
1053static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1054 u64 file_offset, struct perf_sample *sample)
1055{
1056 if (!dump_trace)
1057 return;
1058
1059 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1060 file_offset, event->header.size, event->header.type);
1061
1062 trace_event(event);
1063
1064 if (sample)
1065 perf_evlist__print_tstamp(evlist, event, sample);
1066
1067 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1068 event->header.size, perf_event__name(event->header.type));
1069}
1070
1071static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1072 struct perf_sample *sample)
1073{
1074 u64 sample_type;
1075
1076 if (!dump_trace)
1077 return;
1078
1079 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1080 event->header.misc, sample->pid, sample->tid, sample->ip,
1081 sample->period, sample->addr);
1082
1083 sample_type = evsel->attr.sample_type;
1084
1085 if (sample_type & PERF_SAMPLE_CALLCHAIN)
1086 callchain__printf(evsel, sample);
1087
1088 if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1089 branch_stack__printf(sample);
1090
1091 if (sample_type & PERF_SAMPLE_REGS_USER)
1092 regs_user__printf(sample);
1093
1094 if (sample_type & PERF_SAMPLE_REGS_INTR)
1095 regs_intr__printf(sample);
1096
1097 if (sample_type & PERF_SAMPLE_STACK_USER)
1098 stack_user__printf(&sample->user_stack);
1099
1100 if (sample_type & PERF_SAMPLE_WEIGHT)
1101 printf("... weight: %" PRIu64 "\n", sample->weight);
1102
1103 if (sample_type & PERF_SAMPLE_DATA_SRC)
1104 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1105
1106 if (sample_type & PERF_SAMPLE_TRANSACTION)
1107 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1108
1109 if (sample_type & PERF_SAMPLE_READ)
1110 sample_read__printf(sample, evsel->attr.read_format);
1111}
1112
1113static struct machine *machines__find_for_cpumode(struct machines *machines,
1114 union perf_event *event,
1115 struct perf_sample *sample)
1116{
1117 struct machine *machine;
1118
1119 if (perf_guest &&
1120 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1121 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1122 u32 pid;
1123
1124 if (event->header.type == PERF_RECORD_MMAP
1125 || event->header.type == PERF_RECORD_MMAP2)
1126 pid = event->mmap.pid;
1127 else
1128 pid = sample->pid;
1129
1130 machine = machines__find(machines, pid);
1131 if (!machine)
1132 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1133 return machine;
1134 }
1135
1136 return &machines->host;
1137}
1138
1139static int deliver_sample_value(struct perf_evlist *evlist,
1140 struct perf_tool *tool,
1141 union perf_event *event,
1142 struct perf_sample *sample,
1143 struct sample_read_value *v,
1144 struct machine *machine)
1145{
1146 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1147
1148 if (sid) {
1149 sample->id = v->id;
1150 sample->period = v->value - sid->period;
1151 sid->period = v->value;
1152 }
1153
1154 if (!sid || sid->evsel == NULL) {
1155 ++evlist->stats.nr_unknown_id;
1156 return 0;
1157 }
1158
1159 return tool->sample(tool, event, sample, sid->evsel, machine);
1160}
1161
1162static int deliver_sample_group(struct perf_evlist *evlist,
1163 struct perf_tool *tool,
1164 union perf_event *event,
1165 struct perf_sample *sample,
1166 struct machine *machine)
1167{
1168 int ret = -EINVAL;
1169 u64 i;
1170
1171 for (i = 0; i < sample->read.group.nr; i++) {
1172 ret = deliver_sample_value(evlist, tool, event, sample,
1173 &sample->read.group.values[i],
1174 machine);
1175 if (ret)
1176 break;
1177 }
1178
1179 return ret;
1180}
1181
1182static int
1183 perf_evlist__deliver_sample(struct perf_evlist *evlist,
1184 struct perf_tool *tool,
1185 union perf_event *event,
1186 struct perf_sample *sample,
1187 struct perf_evsel *evsel,
1188 struct machine *machine)
1189{
1190
1191 u64 sample_type = evsel->attr.sample_type;
1192 u64 read_format = evsel->attr.read_format;
1193
1194
1195 if (!(sample_type & PERF_SAMPLE_READ))
1196 return tool->sample(tool, event, sample, evsel, machine);
1197
1198
1199 if (read_format & PERF_FORMAT_GROUP)
1200 return deliver_sample_group(evlist, tool, event, sample,
1201 machine);
1202 else
1203 return deliver_sample_value(evlist, tool, event, sample,
1204 &sample->read.one, machine);
1205}
1206
1207static int machines__deliver_event(struct machines *machines,
1208 struct perf_evlist *evlist,
1209 union perf_event *event,
1210 struct perf_sample *sample,
1211 struct perf_tool *tool, u64 file_offset)
1212{
1213 struct perf_evsel *evsel;
1214 struct machine *machine;
1215
1216 dump_event(evlist, event, file_offset, sample);
1217
1218 evsel = perf_evlist__id2evsel(evlist, sample->id);
1219
1220 machine = machines__find_for_cpumode(machines, event, sample);
1221
1222 switch (event->header.type) {
1223 case PERF_RECORD_SAMPLE:
1224 if (evsel == NULL) {
1225 ++evlist->stats.nr_unknown_id;
1226 return 0;
1227 }
1228 dump_sample(evsel, event, sample);
1229 if (machine == NULL) {
1230 ++evlist->stats.nr_unprocessable_samples;
1231 return 0;
1232 }
1233 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1234 case PERF_RECORD_MMAP:
1235 return tool->mmap(tool, event, sample, machine);
1236 case PERF_RECORD_MMAP2:
1237 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1238 ++evlist->stats.nr_proc_map_timeout;
1239 return tool->mmap2(tool, event, sample, machine);
1240 case PERF_RECORD_COMM:
1241 return tool->comm(tool, event, sample, machine);
1242 case PERF_RECORD_FORK:
1243 return tool->fork(tool, event, sample, machine);
1244 case PERF_RECORD_EXIT:
1245 return tool->exit(tool, event, sample, machine);
1246 case PERF_RECORD_LOST:
1247 if (tool->lost == perf_event__process_lost)
1248 evlist->stats.total_lost += event->lost.lost;
1249 return tool->lost(tool, event, sample, machine);
1250 case PERF_RECORD_LOST_SAMPLES:
1251 if (tool->lost_samples == perf_event__process_lost_samples)
1252 evlist->stats.total_lost_samples += event->lost_samples.lost;
1253 return tool->lost_samples(tool, event, sample, machine);
1254 case PERF_RECORD_READ:
1255 return tool->read(tool, event, sample, evsel, machine);
1256 case PERF_RECORD_THROTTLE:
1257 return tool->throttle(tool, event, sample, machine);
1258 case PERF_RECORD_UNTHROTTLE:
1259 return tool->unthrottle(tool, event, sample, machine);
1260 case PERF_RECORD_AUX:
1261 if (tool->aux == perf_event__process_aux &&
1262 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED))
1263 evlist->stats.total_aux_lost += 1;
1264 return tool->aux(tool, event, sample, machine);
1265 case PERF_RECORD_ITRACE_START:
1266 return tool->itrace_start(tool, event, sample, machine);
1267 case PERF_RECORD_SWITCH:
1268 case PERF_RECORD_SWITCH_CPU_WIDE:
1269 return tool->context_switch(tool, event, sample, machine);
1270 default:
1271 ++evlist->stats.nr_unknown_events;
1272 return -1;
1273 }
1274}
1275
1276static int perf_session__deliver_event(struct perf_session *session,
1277 union perf_event *event,
1278 struct perf_sample *sample,
1279 struct perf_tool *tool,
1280 u64 file_offset)
1281{
1282 int ret;
1283
1284 ret = auxtrace__process_event(session, event, sample, tool);
1285 if (ret < 0)
1286 return ret;
1287 if (ret > 0)
1288 return 0;
1289
1290 return machines__deliver_event(&session->machines, session->evlist,
1291 event, sample, tool, file_offset);
1292}
1293
1294static s64 perf_session__process_user_event(struct perf_session *session,
1295 union perf_event *event,
1296 u64 file_offset)
1297{
1298 struct ordered_events *oe = &session->ordered_events;
1299 struct perf_tool *tool = session->tool;
1300 int fd = perf_data_file__fd(session->file);
1301 int err;
1302
1303 dump_event(session->evlist, event, file_offset, NULL);
1304
1305
1306 switch (event->header.type) {
1307 case PERF_RECORD_HEADER_ATTR:
1308 err = tool->attr(tool, event, &session->evlist);
1309 if (err == 0) {
1310 perf_session__set_id_hdr_size(session);
1311 perf_session__set_comm_exec(session);
1312 }
1313 return err;
1314 case PERF_RECORD_EVENT_UPDATE:
1315 return tool->event_update(tool, event, &session->evlist);
1316 case PERF_RECORD_HEADER_EVENT_TYPE:
1317
1318
1319
1320
1321 return 0;
1322 case PERF_RECORD_HEADER_TRACING_DATA:
1323
1324 lseek(fd, file_offset, SEEK_SET);
1325 return tool->tracing_data(tool, event, session);
1326 case PERF_RECORD_HEADER_BUILD_ID:
1327 return tool->build_id(tool, event, session);
1328 case PERF_RECORD_FINISHED_ROUND:
1329 return tool->finished_round(tool, event, oe);
1330 case PERF_RECORD_ID_INDEX:
1331 return tool->id_index(tool, event, session);
1332 case PERF_RECORD_AUXTRACE_INFO:
1333 return tool->auxtrace_info(tool, event, session);
1334 case PERF_RECORD_AUXTRACE:
1335
1336 lseek(fd, file_offset + event->header.size, SEEK_SET);
1337 return tool->auxtrace(tool, event, session);
1338 case PERF_RECORD_AUXTRACE_ERROR:
1339 perf_session__auxtrace_error_inc(session, event);
1340 return tool->auxtrace_error(tool, event, session);
1341 case PERF_RECORD_THREAD_MAP:
1342 return tool->thread_map(tool, event, session);
1343 case PERF_RECORD_CPU_MAP:
1344 return tool->cpu_map(tool, event, session);
1345 case PERF_RECORD_STAT_CONFIG:
1346 return tool->stat_config(tool, event, session);
1347 case PERF_RECORD_STAT:
1348 return tool->stat(tool, event, session);
1349 case PERF_RECORD_STAT_ROUND:
1350 return tool->stat_round(tool, event, session);
1351 case PERF_RECORD_TIME_CONV:
1352 session->time_conv = event->time_conv;
1353 return tool->time_conv(tool, event, session);
1354 default:
1355 return -EINVAL;
1356 }
1357}
1358
1359int perf_session__deliver_synth_event(struct perf_session *session,
1360 union perf_event *event,
1361 struct perf_sample *sample)
1362{
1363 struct perf_evlist *evlist = session->evlist;
1364 struct perf_tool *tool = session->tool;
1365
1366 events_stats__inc(&evlist->stats, event->header.type);
1367
1368 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1369 return perf_session__process_user_event(session, event, 0);
1370
1371 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1372}
1373
1374static void event_swap(union perf_event *event, bool sample_id_all)
1375{
1376 perf_event__swap_op swap;
1377
1378 swap = perf_event__swap_ops[event->header.type];
1379 if (swap)
1380 swap(event, sample_id_all);
1381}
1382
1383int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1384 void *buf, size_t buf_sz,
1385 union perf_event **event_ptr,
1386 struct perf_sample *sample)
1387{
1388 union perf_event *event;
1389 size_t hdr_sz, rest;
1390 int fd;
1391
1392 if (session->one_mmap && !session->header.needs_swap) {
1393 event = file_offset - session->one_mmap_offset +
1394 session->one_mmap_addr;
1395 goto out_parse_sample;
1396 }
1397
1398 if (perf_data_file__is_pipe(session->file))
1399 return -1;
1400
1401 fd = perf_data_file__fd(session->file);
1402 hdr_sz = sizeof(struct perf_event_header);
1403
1404 if (buf_sz < hdr_sz)
1405 return -1;
1406
1407 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1408 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1409 return -1;
1410
1411 event = (union perf_event *)buf;
1412
1413 if (session->header.needs_swap)
1414 perf_event_header__bswap(&event->header);
1415
1416 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1417 return -1;
1418
1419 rest = event->header.size - hdr_sz;
1420
1421 if (readn(fd, buf, rest) != (ssize_t)rest)
1422 return -1;
1423
1424 if (session->header.needs_swap)
1425 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1426
1427out_parse_sample:
1428
1429 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1430 perf_evlist__parse_sample(session->evlist, event, sample))
1431 return -1;
1432
1433 *event_ptr = event;
1434
1435 return 0;
1436}
1437
1438static s64 perf_session__process_event(struct perf_session *session,
1439 union perf_event *event, u64 file_offset)
1440{
1441 struct perf_evlist *evlist = session->evlist;
1442 struct perf_tool *tool = session->tool;
1443 struct perf_sample sample;
1444 int ret;
1445
1446 if (session->header.needs_swap)
1447 event_swap(event, perf_evlist__sample_id_all(evlist));
1448
1449 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1450 return -EINVAL;
1451
1452 events_stats__inc(&evlist->stats, event->header.type);
1453
1454 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1455 return perf_session__process_user_event(session, event, file_offset);
1456
1457
1458
1459
1460 ret = perf_evlist__parse_sample(evlist, event, &sample);
1461 if (ret)
1462 return ret;
1463
1464 if (tool->ordered_events) {
1465 ret = perf_session__queue_event(session, event, &sample, file_offset);
1466 if (ret != -ETIME)
1467 return ret;
1468 }
1469
1470 return perf_session__deliver_event(session, event, &sample, tool,
1471 file_offset);
1472}
1473
1474void perf_event_header__bswap(struct perf_event_header *hdr)
1475{
1476 hdr->type = bswap_32(hdr->type);
1477 hdr->misc = bswap_16(hdr->misc);
1478 hdr->size = bswap_16(hdr->size);
1479}
1480
1481struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1482{
1483 return machine__findnew_thread(&session->machines.host, -1, pid);
1484}
1485
1486int perf_session__register_idle_thread(struct perf_session *session)
1487{
1488 struct thread *thread;
1489 int err = 0;
1490
1491 thread = machine__findnew_thread(&session->machines.host, 0, 0);
1492 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1493 pr_err("problem inserting idle task.\n");
1494 err = -1;
1495 }
1496
1497
1498 thread__put(thread);
1499 return err;
1500}
1501
1502static void
1503perf_session__warn_order(const struct perf_session *session)
1504{
1505 const struct ordered_events *oe = &session->ordered_events;
1506 struct perf_evsel *evsel;
1507 bool should_warn = true;
1508
1509 evlist__for_each_entry(session->evlist, evsel) {
1510 if (evsel->attr.write_backward)
1511 should_warn = false;
1512 }
1513
1514 if (!should_warn)
1515 return;
1516 if (oe->nr_unordered_events != 0)
1517 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1518}
1519
1520static void perf_session__warn_about_errors(const struct perf_session *session)
1521{
1522 const struct events_stats *stats = &session->evlist->stats;
1523
1524 if (session->tool->lost == perf_event__process_lost &&
1525 stats->nr_events[PERF_RECORD_LOST] != 0) {
1526 ui__warning("Processed %d events and lost %d chunks!\n\n"
1527 "Check IO/CPU overload!\n\n",
1528 stats->nr_events[0],
1529 stats->nr_events[PERF_RECORD_LOST]);
1530 }
1531
1532 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1533 double drop_rate;
1534
1535 drop_rate = (double)stats->total_lost_samples /
1536 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1537 if (drop_rate > 0.05) {
1538 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
1539 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1540 drop_rate * 100.0);
1541 }
1542 }
1543
1544 if (session->tool->aux == perf_event__process_aux &&
1545 stats->total_aux_lost != 0) {
1546 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1547 stats->total_aux_lost,
1548 stats->nr_events[PERF_RECORD_AUX]);
1549 }
1550
1551 if (stats->nr_unknown_events != 0) {
1552 ui__warning("Found %u unknown events!\n\n"
1553 "Is this an older tool processing a perf.data "
1554 "file generated by a more recent tool?\n\n"
1555 "If that is not the case, consider "
1556 "reporting to linux-kernel@vger.kernel.org.\n\n",
1557 stats->nr_unknown_events);
1558 }
1559
1560 if (stats->nr_unknown_id != 0) {
1561 ui__warning("%u samples with id not present in the header\n",
1562 stats->nr_unknown_id);
1563 }
1564
1565 if (stats->nr_invalid_chains != 0) {
1566 ui__warning("Found invalid callchains!\n\n"
1567 "%u out of %u events were discarded for this reason.\n\n"
1568 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1569 stats->nr_invalid_chains,
1570 stats->nr_events[PERF_RECORD_SAMPLE]);
1571 }
1572
1573 if (stats->nr_unprocessable_samples != 0) {
1574 ui__warning("%u unprocessable samples recorded.\n"
1575 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1576 stats->nr_unprocessable_samples);
1577 }
1578
1579 perf_session__warn_order(session);
1580
1581 events_stats__auxtrace_error_warn(stats);
1582
1583 if (stats->nr_proc_map_timeout != 0) {
1584 ui__warning("%d map information files for pre-existing threads were\n"
1585 "not processed, if there are samples for addresses they\n"
1586 "will not be resolved, you may find out which are these\n"
1587 "threads by running with -v and redirecting the output\n"
1588 "to a file.\n"
1589 "The time limit to process proc map is too short?\n"
1590 "Increase it by --proc-map-timeout\n",
1591 stats->nr_proc_map_timeout);
1592 }
1593}
1594
1595static int perf_session__flush_thread_stack(struct thread *thread,
1596 void *p __maybe_unused)
1597{
1598 return thread_stack__flush(thread);
1599}
1600
1601static int perf_session__flush_thread_stacks(struct perf_session *session)
1602{
1603 return machines__for_each_thread(&session->machines,
1604 perf_session__flush_thread_stack,
1605 NULL);
1606}
1607
1608volatile int session_done;
1609
1610static int __perf_session__process_pipe_events(struct perf_session *session)
1611{
1612 struct ordered_events *oe = &session->ordered_events;
1613 struct perf_tool *tool = session->tool;
1614 int fd = perf_data_file__fd(session->file);
1615 union perf_event *event;
1616 uint32_t size, cur_size = 0;
1617 void *buf = NULL;
1618 s64 skip = 0;
1619 u64 head;
1620 ssize_t err;
1621 void *p;
1622
1623 perf_tool__fill_defaults(tool);
1624
1625 head = 0;
1626 cur_size = sizeof(union perf_event);
1627
1628 buf = malloc(cur_size);
1629 if (!buf)
1630 return -errno;
1631more:
1632 event = buf;
1633 err = readn(fd, event, sizeof(struct perf_event_header));
1634 if (err <= 0) {
1635 if (err == 0)
1636 goto done;
1637
1638 pr_err("failed to read event header\n");
1639 goto out_err;
1640 }
1641
1642 if (session->header.needs_swap)
1643 perf_event_header__bswap(&event->header);
1644
1645 size = event->header.size;
1646 if (size < sizeof(struct perf_event_header)) {
1647 pr_err("bad event header size\n");
1648 goto out_err;
1649 }
1650
1651 if (size > cur_size) {
1652 void *new = realloc(buf, size);
1653 if (!new) {
1654 pr_err("failed to allocate memory to read event\n");
1655 goto out_err;
1656 }
1657 buf = new;
1658 cur_size = size;
1659 event = buf;
1660 }
1661 p = event;
1662 p += sizeof(struct perf_event_header);
1663
1664 if (size - sizeof(struct perf_event_header)) {
1665 err = readn(fd, p, size - sizeof(struct perf_event_header));
1666 if (err <= 0) {
1667 if (err == 0) {
1668 pr_err("unexpected end of event stream\n");
1669 goto done;
1670 }
1671
1672 pr_err("failed to read event data\n");
1673 goto out_err;
1674 }
1675 }
1676
1677 if ((skip = perf_session__process_event(session, event, head)) < 0) {
1678 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1679 head, event->header.size, event->header.type);
1680 err = -EINVAL;
1681 goto out_err;
1682 }
1683
1684 head += size;
1685
1686 if (skip > 0)
1687 head += skip;
1688
1689 if (!session_done())
1690 goto more;
1691done:
1692
1693 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1694 if (err)
1695 goto out_err;
1696 err = auxtrace__flush_events(session, tool);
1697 if (err)
1698 goto out_err;
1699 err = perf_session__flush_thread_stacks(session);
1700out_err:
1701 free(buf);
1702 perf_session__warn_about_errors(session);
1703 ordered_events__free(&session->ordered_events);
1704 auxtrace__free_events(session);
1705 return err;
1706}
1707
1708static union perf_event *
1709fetch_mmaped_event(struct perf_session *session,
1710 u64 head, size_t mmap_size, char *buf)
1711{
1712 union perf_event *event;
1713
1714
1715
1716
1717
1718 if (head + sizeof(event->header) > mmap_size)
1719 return NULL;
1720
1721 event = (union perf_event *)(buf + head);
1722
1723 if (session->header.needs_swap)
1724 perf_event_header__bswap(&event->header);
1725
1726 if (head + event->header.size > mmap_size) {
1727
1728 if (session->header.needs_swap)
1729 perf_event_header__bswap(&event->header);
1730 return NULL;
1731 }
1732
1733 return event;
1734}
1735
1736
1737
1738
1739
1740#if BITS_PER_LONG == 64
1741#define MMAP_SIZE ULLONG_MAX
1742#define NUM_MMAPS 1
1743#else
1744#define MMAP_SIZE (32 * 1024 * 1024ULL)
1745#define NUM_MMAPS 128
1746#endif
1747
1748static int __perf_session__process_events(struct perf_session *session,
1749 u64 data_offset, u64 data_size,
1750 u64 file_size)
1751{
1752 struct ordered_events *oe = &session->ordered_events;
1753 struct perf_tool *tool = session->tool;
1754 int fd = perf_data_file__fd(session->file);
1755 u64 head, page_offset, file_offset, file_pos, size;
1756 int err, mmap_prot, mmap_flags, map_idx = 0;
1757 size_t mmap_size;
1758 char *buf, *mmaps[NUM_MMAPS];
1759 union perf_event *event;
1760 struct ui_progress prog;
1761 s64 skip;
1762
1763 perf_tool__fill_defaults(tool);
1764
1765 page_offset = page_size * (data_offset / page_size);
1766 file_offset = page_offset;
1767 head = data_offset - page_offset;
1768
1769 if (data_size == 0)
1770 goto out;
1771
1772 if (data_offset + data_size < file_size)
1773 file_size = data_offset + data_size;
1774
1775 ui_progress__init(&prog, file_size, "Processing events...");
1776
1777 mmap_size = MMAP_SIZE;
1778 if (mmap_size > file_size) {
1779 mmap_size = file_size;
1780 session->one_mmap = true;
1781 }
1782
1783 memset(mmaps, 0, sizeof(mmaps));
1784
1785 mmap_prot = PROT_READ;
1786 mmap_flags = MAP_SHARED;
1787
1788 if (session->header.needs_swap) {
1789 mmap_prot |= PROT_WRITE;
1790 mmap_flags = MAP_PRIVATE;
1791 }
1792remap:
1793 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1794 file_offset);
1795 if (buf == MAP_FAILED) {
1796 pr_err("failed to mmap file\n");
1797 err = -errno;
1798 goto out_err;
1799 }
1800 mmaps[map_idx] = buf;
1801 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1802 file_pos = file_offset + head;
1803 if (session->one_mmap) {
1804 session->one_mmap_addr = buf;
1805 session->one_mmap_offset = file_offset;
1806 }
1807
1808more:
1809 event = fetch_mmaped_event(session, head, mmap_size, buf);
1810 if (!event) {
1811 if (mmaps[map_idx]) {
1812 munmap(mmaps[map_idx], mmap_size);
1813 mmaps[map_idx] = NULL;
1814 }
1815
1816 page_offset = page_size * (head / page_size);
1817 file_offset += page_offset;
1818 head -= page_offset;
1819 goto remap;
1820 }
1821
1822 size = event->header.size;
1823
1824 if (size < sizeof(struct perf_event_header) ||
1825 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1826 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1827 file_offset + head, event->header.size,
1828 event->header.type);
1829 err = -EINVAL;
1830 goto out_err;
1831 }
1832
1833 if (skip)
1834 size += skip;
1835
1836 head += size;
1837 file_pos += size;
1838
1839 ui_progress__update(&prog, size);
1840
1841 if (session_done())
1842 goto out;
1843
1844 if (file_pos < file_size)
1845 goto more;
1846
1847out:
1848
1849 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1850 if (err)
1851 goto out_err;
1852 err = auxtrace__flush_events(session, tool);
1853 if (err)
1854 goto out_err;
1855 err = perf_session__flush_thread_stacks(session);
1856out_err:
1857 ui_progress__finish();
1858 perf_session__warn_about_errors(session);
1859
1860
1861
1862
1863 ordered_events__reinit(&session->ordered_events);
1864 auxtrace__free_events(session);
1865 session->one_mmap = false;
1866 return err;
1867}
1868
1869int perf_session__process_events(struct perf_session *session)
1870{
1871 u64 size = perf_data_file__size(session->file);
1872 int err;
1873
1874 if (perf_session__register_idle_thread(session) < 0)
1875 return -ENOMEM;
1876
1877 if (!perf_data_file__is_pipe(session->file))
1878 err = __perf_session__process_events(session,
1879 session->header.data_offset,
1880 session->header.data_size, size);
1881 else
1882 err = __perf_session__process_pipe_events(session);
1883
1884 return err;
1885}
1886
1887bool perf_session__has_traces(struct perf_session *session, const char *msg)
1888{
1889 struct perf_evsel *evsel;
1890
1891 evlist__for_each_entry(session->evlist, evsel) {
1892 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1893 return true;
1894 }
1895
1896 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1897 return false;
1898}
1899
1900int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1901 const char *symbol_name, u64 addr)
1902{
1903 char *bracket;
1904 enum map_type i;
1905 struct ref_reloc_sym *ref;
1906
1907 ref = zalloc(sizeof(struct ref_reloc_sym));
1908 if (ref == NULL)
1909 return -ENOMEM;
1910
1911 ref->name = strdup(symbol_name);
1912 if (ref->name == NULL) {
1913 free(ref);
1914 return -ENOMEM;
1915 }
1916
1917 bracket = strchr(ref->name, ']');
1918 if (bracket)
1919 *bracket = '\0';
1920
1921 ref->addr = addr;
1922
1923 for (i = 0; i < MAP__NR_TYPES; ++i) {
1924 struct kmap *kmap = map__kmap(maps[i]);
1925
1926 if (!kmap)
1927 continue;
1928 kmap->ref_reloc_sym = ref;
1929 }
1930
1931 return 0;
1932}
1933
1934size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1935{
1936 return machines__fprintf_dsos(&session->machines, fp);
1937}
1938
1939size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
1940 bool (skip)(struct dso *dso, int parm), int parm)
1941{
1942 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
1943}
1944
1945size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1946{
1947 size_t ret;
1948 const char *msg = "";
1949
1950 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
1951 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
1952
1953 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
1954
1955 ret += events_stats__fprintf(&session->evlist->stats, fp);
1956 return ret;
1957}
1958
1959size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1960{
1961
1962
1963
1964
1965 return machine__fprintf(&session->machines.host, fp);
1966}
1967
1968struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1969 unsigned int type)
1970{
1971 struct perf_evsel *pos;
1972
1973 evlist__for_each_entry(session->evlist, pos) {
1974 if (pos->attr.type == type)
1975 return pos;
1976 }
1977 return NULL;
1978}
1979
1980int perf_session__cpu_bitmap(struct perf_session *session,
1981 const char *cpu_list, unsigned long *cpu_bitmap)
1982{
1983 int i, err = -1;
1984 struct cpu_map *map;
1985
1986 for (i = 0; i < PERF_TYPE_MAX; ++i) {
1987 struct perf_evsel *evsel;
1988
1989 evsel = perf_session__find_first_evtype(session, i);
1990 if (!evsel)
1991 continue;
1992
1993 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
1994 pr_err("File does not contain CPU events. "
1995 "Remove -c option to proceed.\n");
1996 return -1;
1997 }
1998 }
1999
2000 map = cpu_map__new(cpu_list);
2001 if (map == NULL) {
2002 pr_err("Invalid cpu_list\n");
2003 return -1;
2004 }
2005
2006 for (i = 0; i < map->nr; i++) {
2007 int cpu = map->map[i];
2008
2009 if (cpu >= MAX_NR_CPUS) {
2010 pr_err("Requested CPU %d too large. "
2011 "Consider raising MAX_NR_CPUS\n", cpu);
2012 goto out_delete_map;
2013 }
2014
2015 set_bit(cpu, cpu_bitmap);
2016 }
2017
2018 err = 0;
2019
2020out_delete_map:
2021 cpu_map__put(map);
2022 return err;
2023}
2024
2025void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2026 bool full)
2027{
2028 struct stat st;
2029 int fd, ret;
2030
2031 if (session == NULL || fp == NULL)
2032 return;
2033
2034 fd = perf_data_file__fd(session->file);
2035
2036 ret = fstat(fd, &st);
2037 if (ret == -1)
2038 return;
2039
2040 fprintf(fp, "# ========\n");
2041 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2042 perf_header__fprintf_info(session, fp, full);
2043 fprintf(fp, "# ========\n#\n");
2044}
2045
2046
2047int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2048 const struct perf_evsel_str_handler *assocs,
2049 size_t nr_assocs)
2050{
2051 struct perf_evsel *evsel;
2052 size_t i;
2053 int err;
2054
2055 for (i = 0; i < nr_assocs; i++) {
2056
2057
2058
2059
2060 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2061 if (evsel == NULL)
2062 continue;
2063
2064 err = -EEXIST;
2065 if (evsel->handler != NULL)
2066 goto out;
2067 evsel->handler = assocs[i].handler;
2068 }
2069
2070 err = 0;
2071out:
2072 return err;
2073}
2074
2075int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
2076 union perf_event *event,
2077 struct perf_session *session)
2078{
2079 struct perf_evlist *evlist = session->evlist;
2080 struct id_index_event *ie = &event->id_index;
2081 size_t i, nr, max_nr;
2082
2083 max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2084 sizeof(struct id_index_entry);
2085 nr = ie->nr;
2086 if (nr > max_nr)
2087 return -EINVAL;
2088
2089 if (dump_trace)
2090 fprintf(stdout, " nr: %zu\n", nr);
2091
2092 for (i = 0; i < nr; i++) {
2093 struct id_index_entry *e = &ie->entries[i];
2094 struct perf_sample_id *sid;
2095
2096 if (dump_trace) {
2097 fprintf(stdout, " ... id: %"PRIu64, e->id);
2098 fprintf(stdout, " idx: %"PRIu64, e->idx);
2099 fprintf(stdout, " cpu: %"PRId64, e->cpu);
2100 fprintf(stdout, " tid: %"PRId64"\n", e->tid);
2101 }
2102
2103 sid = perf_evlist__id2sid(evlist, e->id);
2104 if (!sid)
2105 return -ENOENT;
2106 sid->idx = e->idx;
2107 sid->cpu = e->cpu;
2108 sid->tid = e->tid;
2109 }
2110 return 0;
2111}
2112
2113int perf_event__synthesize_id_index(struct perf_tool *tool,
2114 perf_event__handler_t process,
2115 struct perf_evlist *evlist,
2116 struct machine *machine)
2117{
2118 union perf_event *ev;
2119 struct perf_evsel *evsel;
2120 size_t nr = 0, i = 0, sz, max_nr, n;
2121 int err;
2122
2123 pr_debug2("Synthesizing id index\n");
2124
2125 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2126 sizeof(struct id_index_entry);
2127
2128 evlist__for_each_entry(evlist, evsel)
2129 nr += evsel->ids;
2130
2131 n = nr > max_nr ? max_nr : nr;
2132 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2133 ev = zalloc(sz);
2134 if (!ev)
2135 return -ENOMEM;
2136
2137 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2138 ev->id_index.header.size = sz;
2139 ev->id_index.nr = n;
2140
2141 evlist__for_each_entry(evlist, evsel) {
2142 u32 j;
2143
2144 for (j = 0; j < evsel->ids; j++) {
2145 struct id_index_entry *e;
2146 struct perf_sample_id *sid;
2147
2148 if (i >= n) {
2149 err = process(tool, ev, NULL, machine);
2150 if (err)
2151 goto out_err;
2152 nr -= n;
2153 i = 0;
2154 }
2155
2156 e = &ev->id_index.entries[i++];
2157
2158 e->id = evsel->id[j];
2159
2160 sid = perf_evlist__id2sid(evlist, e->id);
2161 if (!sid) {
2162 free(ev);
2163 return -ENOENT;
2164 }
2165
2166 e->idx = sid->idx;
2167 e->cpu = sid->cpu;
2168 e->tid = sid->tid;
2169 }
2170 }
2171
2172 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2173 ev->id_index.header.size = sz;
2174 ev->id_index.nr = nr;
2175
2176 err = process(tool, ev, NULL, machine);
2177out_err:
2178 free(ev);
2179
2180 return err;
2181}
2182