1
2#include <errno.h>
3#include <inttypes.h>
4#include <linux/kernel.h>
5#include <traceevent/event-parse.h>
6#include <api/fs/fs.h>
7
8#include <byteswap.h>
9#include <unistd.h>
10#include <sys/types.h>
11#include <sys/mman.h>
12
13#include "evlist.h"
14#include "evsel.h"
15#include "memswap.h"
16#include "session.h"
17#include "tool.h"
18#include "sort.h"
19#include "util.h"
20#include "cpumap.h"
21#include "perf_regs.h"
22#include "asm/bug.h"
23#include "auxtrace.h"
24#include "thread.h"
25#include "thread-stack.h"
26#include "stat.h"
27
28static int perf_session__deliver_event(struct perf_session *session,
29 union perf_event *event,
30 struct perf_tool *tool,
31 u64 file_offset);
32
33static int perf_session__open(struct perf_session *session)
34{
35 struct perf_data *data = session->data;
36
37 if (perf_session__read_header(session) < 0) {
38 pr_err("incompatible file format (rerun with -v to learn more)\n");
39 return -1;
40 }
41
42 if (perf_data__is_pipe(data))
43 return 0;
44
45 if (perf_header__has_feat(&session->header, HEADER_STAT))
46 return 0;
47
48 if (!perf_evlist__valid_sample_type(session->evlist)) {
49 pr_err("non matching sample_type\n");
50 return -1;
51 }
52
53 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
54 pr_err("non matching sample_id_all\n");
55 return -1;
56 }
57
58 if (!perf_evlist__valid_read_format(session->evlist)) {
59 pr_err("non matching read_format\n");
60 return -1;
61 }
62
63 return 0;
64}
65
66void perf_session__set_id_hdr_size(struct perf_session *session)
67{
68 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
69
70 machines__set_id_hdr_size(&session->machines, id_hdr_size);
71}
72
73int perf_session__create_kernel_maps(struct perf_session *session)
74{
75 int ret = machine__create_kernel_maps(&session->machines.host);
76
77 if (ret >= 0)
78 ret = machines__create_guest_kernel_maps(&session->machines);
79 return ret;
80}
81
82static void perf_session__destroy_kernel_maps(struct perf_session *session)
83{
84 machines__destroy_kernel_maps(&session->machines);
85}
86
87static bool perf_session__has_comm_exec(struct perf_session *session)
88{
89 struct perf_evsel *evsel;
90
91 evlist__for_each_entry(session->evlist, evsel) {
92 if (evsel->attr.comm_exec)
93 return true;
94 }
95
96 return false;
97}
98
99static void perf_session__set_comm_exec(struct perf_session *session)
100{
101 bool comm_exec = perf_session__has_comm_exec(session);
102
103 machines__set_comm_exec(&session->machines, comm_exec);
104}
105
106static int ordered_events__deliver_event(struct ordered_events *oe,
107 struct ordered_event *event)
108{
109 struct perf_session *session = container_of(oe, struct perf_session,
110 ordered_events);
111
112 return perf_session__deliver_event(session, event->event,
113 session->tool, event->file_offset);
114}
115
116struct perf_session *perf_session__new(struct perf_data *data,
117 bool repipe, struct perf_tool *tool)
118{
119 struct perf_session *session = zalloc(sizeof(*session));
120
121 if (!session)
122 goto out;
123
124 session->repipe = repipe;
125 session->tool = tool;
126 INIT_LIST_HEAD(&session->auxtrace_index);
127 machines__init(&session->machines);
128 ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
129
130 if (data) {
131 if (perf_data__open(data))
132 goto out_delete;
133
134 session->data = data;
135
136 if (perf_data__is_read(data)) {
137 if (perf_session__open(session) < 0)
138 goto out_close;
139
140
141
142
143
144 if (!data->is_pipe) {
145 perf_session__set_id_hdr_size(session);
146 perf_session__set_comm_exec(session);
147 }
148 }
149 } else {
150 session->machines.host.env = &perf_env;
151 }
152
153 if (!data || perf_data__is_write(data)) {
154
155
156
157
158 if (perf_session__create_kernel_maps(session) < 0)
159 pr_warning("Cannot read kernel map\n");
160 }
161
162
163
164
165
166 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
167 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
168 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
169 tool->ordered_events = false;
170 }
171
172 return session;
173
174 out_close:
175 perf_data__close(data);
176 out_delete:
177 perf_session__delete(session);
178 out:
179 return NULL;
180}
181
182static void perf_session__delete_threads(struct perf_session *session)
183{
184 machine__delete_threads(&session->machines.host);
185}
186
187void perf_session__delete(struct perf_session *session)
188{
189 if (session == NULL)
190 return;
191 auxtrace__free(session);
192 auxtrace_index__free(&session->auxtrace_index);
193 perf_session__destroy_kernel_maps(session);
194 perf_session__delete_threads(session);
195 perf_env__exit(&session->header.env);
196 machines__exit(&session->machines);
197 if (session->data)
198 perf_data__close(session->data);
199 free(session);
200}
201
202static int process_event_synth_tracing_data_stub(struct perf_tool *tool
203 __maybe_unused,
204 union perf_event *event
205 __maybe_unused,
206 struct perf_session *session
207 __maybe_unused)
208{
209 dump_printf(": unhandled!\n");
210 return 0;
211}
212
213static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
214 union perf_event *event __maybe_unused,
215 struct perf_evlist **pevlist
216 __maybe_unused)
217{
218 dump_printf(": unhandled!\n");
219 return 0;
220}
221
222static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
223 union perf_event *event __maybe_unused,
224 struct perf_evlist **pevlist
225 __maybe_unused)
226{
227 if (dump_trace)
228 perf_event__fprintf_event_update(event, stdout);
229
230 dump_printf(": unhandled!\n");
231 return 0;
232}
233
234static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
235 union perf_event *event __maybe_unused,
236 struct perf_sample *sample __maybe_unused,
237 struct perf_evsel *evsel __maybe_unused,
238 struct machine *machine __maybe_unused)
239{
240 dump_printf(": unhandled!\n");
241 return 0;
242}
243
244static int process_event_stub(struct perf_tool *tool __maybe_unused,
245 union perf_event *event __maybe_unused,
246 struct perf_sample *sample __maybe_unused,
247 struct machine *machine __maybe_unused)
248{
249 dump_printf(": unhandled!\n");
250 return 0;
251}
252
253static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
254 union perf_event *event __maybe_unused,
255 struct ordered_events *oe __maybe_unused)
256{
257 dump_printf(": unhandled!\n");
258 return 0;
259}
260
261static int process_finished_round(struct perf_tool *tool,
262 union perf_event *event,
263 struct ordered_events *oe);
264
265static int skipn(int fd, off_t n)
266{
267 char buf[4096];
268 ssize_t ret;
269
270 while (n > 0) {
271 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
272 if (ret <= 0)
273 return ret;
274 n -= ret;
275 }
276
277 return 0;
278}
279
280static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
281 union perf_event *event,
282 struct perf_session *session
283 __maybe_unused)
284{
285 dump_printf(": unhandled!\n");
286 if (perf_data__is_pipe(session->data))
287 skipn(perf_data__fd(session->data), event->auxtrace.size);
288 return event->auxtrace.size;
289}
290
291static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
292 union perf_event *event __maybe_unused,
293 struct perf_session *session __maybe_unused)
294{
295 dump_printf(": unhandled!\n");
296 return 0;
297}
298
299
300static
301int process_event_thread_map_stub(struct perf_tool *tool __maybe_unused,
302 union perf_event *event __maybe_unused,
303 struct perf_session *session __maybe_unused)
304{
305 if (dump_trace)
306 perf_event__fprintf_thread_map(event, stdout);
307
308 dump_printf(": unhandled!\n");
309 return 0;
310}
311
312static
313int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
314 union perf_event *event __maybe_unused,
315 struct perf_session *session __maybe_unused)
316{
317 if (dump_trace)
318 perf_event__fprintf_cpu_map(event, stdout);
319
320 dump_printf(": unhandled!\n");
321 return 0;
322}
323
324static
325int process_event_stat_config_stub(struct perf_tool *tool __maybe_unused,
326 union perf_event *event __maybe_unused,
327 struct perf_session *session __maybe_unused)
328{
329 if (dump_trace)
330 perf_event__fprintf_stat_config(event, stdout);
331
332 dump_printf(": unhandled!\n");
333 return 0;
334}
335
336static int process_stat_stub(struct perf_tool *tool __maybe_unused,
337 union perf_event *event __maybe_unused,
338 struct perf_session *perf_session
339 __maybe_unused)
340{
341 if (dump_trace)
342 perf_event__fprintf_stat(event, stdout);
343
344 dump_printf(": unhandled!\n");
345 return 0;
346}
347
348static int process_stat_round_stub(struct perf_tool *tool __maybe_unused,
349 union perf_event *event __maybe_unused,
350 struct perf_session *perf_session
351 __maybe_unused)
352{
353 if (dump_trace)
354 perf_event__fprintf_stat_round(event, stdout);
355
356 dump_printf(": unhandled!\n");
357 return 0;
358}
359
360void perf_tool__fill_defaults(struct perf_tool *tool)
361{
362 if (tool->sample == NULL)
363 tool->sample = process_event_sample_stub;
364 if (tool->mmap == NULL)
365 tool->mmap = process_event_stub;
366 if (tool->mmap2 == NULL)
367 tool->mmap2 = process_event_stub;
368 if (tool->comm == NULL)
369 tool->comm = process_event_stub;
370 if (tool->namespaces == NULL)
371 tool->namespaces = process_event_stub;
372 if (tool->fork == NULL)
373 tool->fork = process_event_stub;
374 if (tool->exit == NULL)
375 tool->exit = process_event_stub;
376 if (tool->lost == NULL)
377 tool->lost = perf_event__process_lost;
378 if (tool->lost_samples == NULL)
379 tool->lost_samples = perf_event__process_lost_samples;
380 if (tool->aux == NULL)
381 tool->aux = perf_event__process_aux;
382 if (tool->itrace_start == NULL)
383 tool->itrace_start = perf_event__process_itrace_start;
384 if (tool->context_switch == NULL)
385 tool->context_switch = perf_event__process_switch;
386 if (tool->read == NULL)
387 tool->read = process_event_sample_stub;
388 if (tool->throttle == NULL)
389 tool->throttle = process_event_stub;
390 if (tool->unthrottle == NULL)
391 tool->unthrottle = process_event_stub;
392 if (tool->attr == NULL)
393 tool->attr = process_event_synth_attr_stub;
394 if (tool->event_update == NULL)
395 tool->event_update = process_event_synth_event_update_stub;
396 if (tool->tracing_data == NULL)
397 tool->tracing_data = process_event_synth_tracing_data_stub;
398 if (tool->build_id == NULL)
399 tool->build_id = process_event_op2_stub;
400 if (tool->finished_round == NULL) {
401 if (tool->ordered_events)
402 tool->finished_round = process_finished_round;
403 else
404 tool->finished_round = process_finished_round_stub;
405 }
406 if (tool->id_index == NULL)
407 tool->id_index = process_event_op2_stub;
408 if (tool->auxtrace_info == NULL)
409 tool->auxtrace_info = process_event_op2_stub;
410 if (tool->auxtrace == NULL)
411 tool->auxtrace = process_event_auxtrace_stub;
412 if (tool->auxtrace_error == NULL)
413 tool->auxtrace_error = process_event_op2_stub;
414 if (tool->thread_map == NULL)
415 tool->thread_map = process_event_thread_map_stub;
416 if (tool->cpu_map == NULL)
417 tool->cpu_map = process_event_cpu_map_stub;
418 if (tool->stat_config == NULL)
419 tool->stat_config = process_event_stat_config_stub;
420 if (tool->stat == NULL)
421 tool->stat = process_stat_stub;
422 if (tool->stat_round == NULL)
423 tool->stat_round = process_stat_round_stub;
424 if (tool->time_conv == NULL)
425 tool->time_conv = process_event_op2_stub;
426 if (tool->feature == NULL)
427 tool->feature = process_event_op2_stub;
428}
429
430static void swap_sample_id_all(union perf_event *event, void *data)
431{
432 void *end = (void *) event + event->header.size;
433 int size = end - data;
434
435 BUG_ON(size % sizeof(u64));
436 mem_bswap_64(data, size);
437}
438
439static void perf_event__all64_swap(union perf_event *event,
440 bool sample_id_all __maybe_unused)
441{
442 struct perf_event_header *hdr = &event->header;
443 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
444}
445
446static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
447{
448 event->comm.pid = bswap_32(event->comm.pid);
449 event->comm.tid = bswap_32(event->comm.tid);
450
451 if (sample_id_all) {
452 void *data = &event->comm.comm;
453
454 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
455 swap_sample_id_all(event, data);
456 }
457}
458
459static void perf_event__mmap_swap(union perf_event *event,
460 bool sample_id_all)
461{
462 event->mmap.pid = bswap_32(event->mmap.pid);
463 event->mmap.tid = bswap_32(event->mmap.tid);
464 event->mmap.start = bswap_64(event->mmap.start);
465 event->mmap.len = bswap_64(event->mmap.len);
466 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
467
468 if (sample_id_all) {
469 void *data = &event->mmap.filename;
470
471 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
472 swap_sample_id_all(event, data);
473 }
474}
475
476static void perf_event__mmap2_swap(union perf_event *event,
477 bool sample_id_all)
478{
479 event->mmap2.pid = bswap_32(event->mmap2.pid);
480 event->mmap2.tid = bswap_32(event->mmap2.tid);
481 event->mmap2.start = bswap_64(event->mmap2.start);
482 event->mmap2.len = bswap_64(event->mmap2.len);
483 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
484 event->mmap2.maj = bswap_32(event->mmap2.maj);
485 event->mmap2.min = bswap_32(event->mmap2.min);
486 event->mmap2.ino = bswap_64(event->mmap2.ino);
487
488 if (sample_id_all) {
489 void *data = &event->mmap2.filename;
490
491 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
492 swap_sample_id_all(event, data);
493 }
494}
495static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
496{
497 event->fork.pid = bswap_32(event->fork.pid);
498 event->fork.tid = bswap_32(event->fork.tid);
499 event->fork.ppid = bswap_32(event->fork.ppid);
500 event->fork.ptid = bswap_32(event->fork.ptid);
501 event->fork.time = bswap_64(event->fork.time);
502
503 if (sample_id_all)
504 swap_sample_id_all(event, &event->fork + 1);
505}
506
507static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
508{
509 event->read.pid = bswap_32(event->read.pid);
510 event->read.tid = bswap_32(event->read.tid);
511 event->read.value = bswap_64(event->read.value);
512 event->read.time_enabled = bswap_64(event->read.time_enabled);
513 event->read.time_running = bswap_64(event->read.time_running);
514 event->read.id = bswap_64(event->read.id);
515
516 if (sample_id_all)
517 swap_sample_id_all(event, &event->read + 1);
518}
519
520static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
521{
522 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
523 event->aux.aux_size = bswap_64(event->aux.aux_size);
524 event->aux.flags = bswap_64(event->aux.flags);
525
526 if (sample_id_all)
527 swap_sample_id_all(event, &event->aux + 1);
528}
529
530static void perf_event__itrace_start_swap(union perf_event *event,
531 bool sample_id_all)
532{
533 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
534 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
535
536 if (sample_id_all)
537 swap_sample_id_all(event, &event->itrace_start + 1);
538}
539
540static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
541{
542 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
543 event->context_switch.next_prev_pid =
544 bswap_32(event->context_switch.next_prev_pid);
545 event->context_switch.next_prev_tid =
546 bswap_32(event->context_switch.next_prev_tid);
547 }
548
549 if (sample_id_all)
550 swap_sample_id_all(event, &event->context_switch + 1);
551}
552
553static void perf_event__throttle_swap(union perf_event *event,
554 bool sample_id_all)
555{
556 event->throttle.time = bswap_64(event->throttle.time);
557 event->throttle.id = bswap_64(event->throttle.id);
558 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
559
560 if (sample_id_all)
561 swap_sample_id_all(event, &event->throttle + 1);
562}
563
564static u8 revbyte(u8 b)
565{
566 int rev = (b >> 4) | ((b & 0xf) << 4);
567 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
568 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
569 return (u8) rev;
570}
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586static void swap_bitfield(u8 *p, unsigned len)
587{
588 unsigned i;
589
590 for (i = 0; i < len; i++) {
591 *p = revbyte(*p);
592 p++;
593 }
594}
595
596
597void perf_event__attr_swap(struct perf_event_attr *attr)
598{
599 attr->type = bswap_32(attr->type);
600 attr->size = bswap_32(attr->size);
601
602#define bswap_safe(f, n) \
603 (attr->size > (offsetof(struct perf_event_attr, f) + \
604 sizeof(attr->f) * (n)))
605#define bswap_field(f, sz) \
606do { \
607 if (bswap_safe(f, 0)) \
608 attr->f = bswap_##sz(attr->f); \
609} while(0)
610#define bswap_field_16(f) bswap_field(f, 16)
611#define bswap_field_32(f) bswap_field(f, 32)
612#define bswap_field_64(f) bswap_field(f, 64)
613
614 bswap_field_64(config);
615 bswap_field_64(sample_period);
616 bswap_field_64(sample_type);
617 bswap_field_64(read_format);
618 bswap_field_32(wakeup_events);
619 bswap_field_32(bp_type);
620 bswap_field_64(bp_addr);
621 bswap_field_64(bp_len);
622 bswap_field_64(branch_sample_type);
623 bswap_field_64(sample_regs_user);
624 bswap_field_32(sample_stack_user);
625 bswap_field_32(aux_watermark);
626 bswap_field_16(sample_max_stack);
627
628
629
630
631
632 if (bswap_safe(read_format, 1))
633 swap_bitfield((u8 *) (&attr->read_format + 1),
634 sizeof(u64));
635#undef bswap_field_64
636#undef bswap_field_32
637#undef bswap_field
638#undef bswap_safe
639}
640
641static void perf_event__hdr_attr_swap(union perf_event *event,
642 bool sample_id_all __maybe_unused)
643{
644 size_t size;
645
646 perf_event__attr_swap(&event->attr.attr);
647
648 size = event->header.size;
649 size -= (void *)&event->attr.id - (void *)event;
650 mem_bswap_64(event->attr.id, size);
651}
652
653static void perf_event__event_update_swap(union perf_event *event,
654 bool sample_id_all __maybe_unused)
655{
656 event->event_update.type = bswap_64(event->event_update.type);
657 event->event_update.id = bswap_64(event->event_update.id);
658}
659
660static void perf_event__event_type_swap(union perf_event *event,
661 bool sample_id_all __maybe_unused)
662{
663 event->event_type.event_type.event_id =
664 bswap_64(event->event_type.event_type.event_id);
665}
666
667static void perf_event__tracing_data_swap(union perf_event *event,
668 bool sample_id_all __maybe_unused)
669{
670 event->tracing_data.size = bswap_32(event->tracing_data.size);
671}
672
673static void perf_event__auxtrace_info_swap(union perf_event *event,
674 bool sample_id_all __maybe_unused)
675{
676 size_t size;
677
678 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
679
680 size = event->header.size;
681 size -= (void *)&event->auxtrace_info.priv - (void *)event;
682 mem_bswap_64(event->auxtrace_info.priv, size);
683}
684
685static void perf_event__auxtrace_swap(union perf_event *event,
686 bool sample_id_all __maybe_unused)
687{
688 event->auxtrace.size = bswap_64(event->auxtrace.size);
689 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
690 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
691 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
692 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
693 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
694}
695
696static void perf_event__auxtrace_error_swap(union perf_event *event,
697 bool sample_id_all __maybe_unused)
698{
699 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
700 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
701 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
702 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
703 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
704 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
705}
706
707static void perf_event__thread_map_swap(union perf_event *event,
708 bool sample_id_all __maybe_unused)
709{
710 unsigned i;
711
712 event->thread_map.nr = bswap_64(event->thread_map.nr);
713
714 for (i = 0; i < event->thread_map.nr; i++)
715 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
716}
717
718static void perf_event__cpu_map_swap(union perf_event *event,
719 bool sample_id_all __maybe_unused)
720{
721 struct cpu_map_data *data = &event->cpu_map.data;
722 struct cpu_map_entries *cpus;
723 struct cpu_map_mask *mask;
724 unsigned i;
725
726 data->type = bswap_64(data->type);
727
728 switch (data->type) {
729 case PERF_CPU_MAP__CPUS:
730 cpus = (struct cpu_map_entries *)data->data;
731
732 cpus->nr = bswap_16(cpus->nr);
733
734 for (i = 0; i < cpus->nr; i++)
735 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
736 break;
737 case PERF_CPU_MAP__MASK:
738 mask = (struct cpu_map_mask *) data->data;
739
740 mask->nr = bswap_16(mask->nr);
741 mask->long_size = bswap_16(mask->long_size);
742
743 switch (mask->long_size) {
744 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
745 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
746 default:
747 pr_err("cpu_map swap: unsupported long size\n");
748 }
749 default:
750 break;
751 }
752}
753
754static void perf_event__stat_config_swap(union perf_event *event,
755 bool sample_id_all __maybe_unused)
756{
757 u64 size;
758
759 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
760 size += 1;
761 mem_bswap_64(&event->stat_config.nr, size);
762}
763
764static void perf_event__stat_swap(union perf_event *event,
765 bool sample_id_all __maybe_unused)
766{
767 event->stat.id = bswap_64(event->stat.id);
768 event->stat.thread = bswap_32(event->stat.thread);
769 event->stat.cpu = bswap_32(event->stat.cpu);
770 event->stat.val = bswap_64(event->stat.val);
771 event->stat.ena = bswap_64(event->stat.ena);
772 event->stat.run = bswap_64(event->stat.run);
773}
774
775static void perf_event__stat_round_swap(union perf_event *event,
776 bool sample_id_all __maybe_unused)
777{
778 event->stat_round.type = bswap_64(event->stat_round.type);
779 event->stat_round.time = bswap_64(event->stat_round.time);
780}
781
782typedef void (*perf_event__swap_op)(union perf_event *event,
783 bool sample_id_all);
784
785static perf_event__swap_op perf_event__swap_ops[] = {
786 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
787 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
788 [PERF_RECORD_COMM] = perf_event__comm_swap,
789 [PERF_RECORD_FORK] = perf_event__task_swap,
790 [PERF_RECORD_EXIT] = perf_event__task_swap,
791 [PERF_RECORD_LOST] = perf_event__all64_swap,
792 [PERF_RECORD_READ] = perf_event__read_swap,
793 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
794 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
795 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
796 [PERF_RECORD_AUX] = perf_event__aux_swap,
797 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
798 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
799 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
800 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
801 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
802 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
803 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
804 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
805 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
806 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
807 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
808 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
809 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
810 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
811 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
812 [PERF_RECORD_STAT] = perf_event__stat_swap,
813 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
814 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
815 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
816 [PERF_RECORD_HEADER_MAX] = NULL,
817};
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858static int process_finished_round(struct perf_tool *tool __maybe_unused,
859 union perf_event *event __maybe_unused,
860 struct ordered_events *oe)
861{
862 if (dump_trace)
863 fprintf(stdout, "\n");
864 return ordered_events__flush(oe, OE_FLUSH__ROUND);
865}
866
867int perf_session__queue_event(struct perf_session *s, union perf_event *event,
868 u64 timestamp, u64 file_offset)
869{
870 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
871}
872
873static void callchain__lbr_callstack_printf(struct perf_sample *sample)
874{
875 struct ip_callchain *callchain = sample->callchain;
876 struct branch_stack *lbr_stack = sample->branch_stack;
877 u64 kernel_callchain_nr = callchain->nr;
878 unsigned int i;
879
880 for (i = 0; i < kernel_callchain_nr; i++) {
881 if (callchain->ips[i] == PERF_CONTEXT_USER)
882 break;
883 }
884
885 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
886 u64 total_nr;
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903 total_nr = i + 1 + lbr_stack->nr + 1;
904 kernel_callchain_nr = i + 1;
905
906 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
907
908 for (i = 0; i < kernel_callchain_nr; i++)
909 printf("..... %2d: %016" PRIx64 "\n",
910 i, callchain->ips[i]);
911
912 printf("..... %2d: %016" PRIx64 "\n",
913 (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
914 for (i = 0; i < lbr_stack->nr; i++)
915 printf("..... %2d: %016" PRIx64 "\n",
916 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
917 }
918}
919
920static void callchain__printf(struct perf_evsel *evsel,
921 struct perf_sample *sample)
922{
923 unsigned int i;
924 struct ip_callchain *callchain = sample->callchain;
925
926 if (perf_evsel__has_branch_callstack(evsel))
927 callchain__lbr_callstack_printf(sample);
928
929 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
930
931 for (i = 0; i < callchain->nr; i++)
932 printf("..... %2d: %016" PRIx64 "\n",
933 i, callchain->ips[i]);
934}
935
936static void branch_stack__printf(struct perf_sample *sample)
937{
938 uint64_t i;
939
940 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
941
942 for (i = 0; i < sample->branch_stack->nr; i++) {
943 struct branch_entry *e = &sample->branch_stack->entries[i];
944
945 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
946 i, e->from, e->to,
947 (unsigned short)e->flags.cycles,
948 e->flags.mispred ? "M" : " ",
949 e->flags.predicted ? "P" : " ",
950 e->flags.abort ? "A" : " ",
951 e->flags.in_tx ? "T" : " ",
952 (unsigned)e->flags.reserved);
953 }
954}
955
956static void regs_dump__printf(u64 mask, u64 *regs)
957{
958 unsigned rid, i = 0;
959
960 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
961 u64 val = regs[i++];
962
963 printf(".... %-5s 0x%" PRIx64 "\n",
964 perf_reg_name(rid), val);
965 }
966}
967
968static const char *regs_abi[] = {
969 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
970 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
971 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
972};
973
974static inline const char *regs_dump_abi(struct regs_dump *d)
975{
976 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
977 return "unknown";
978
979 return regs_abi[d->abi];
980}
981
982static void regs__printf(const char *type, struct regs_dump *regs)
983{
984 u64 mask = regs->mask;
985
986 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
987 type,
988 mask,
989 regs_dump_abi(regs));
990
991 regs_dump__printf(mask, regs->regs);
992}
993
994static void regs_user__printf(struct perf_sample *sample)
995{
996 struct regs_dump *user_regs = &sample->user_regs;
997
998 if (user_regs->regs)
999 regs__printf("user", user_regs);
1000}
1001
1002static void regs_intr__printf(struct perf_sample *sample)
1003{
1004 struct regs_dump *intr_regs = &sample->intr_regs;
1005
1006 if (intr_regs->regs)
1007 regs__printf("intr", intr_regs);
1008}
1009
1010static void stack_user__printf(struct stack_dump *dump)
1011{
1012 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1013 dump->size, dump->offset);
1014}
1015
1016static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
1017 union perf_event *event,
1018 struct perf_sample *sample)
1019{
1020 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1021
1022 if (event->header.type != PERF_RECORD_SAMPLE &&
1023 !perf_evlist__sample_id_all(evlist)) {
1024 fputs("-1 -1 ", stdout);
1025 return;
1026 }
1027
1028 if ((sample_type & PERF_SAMPLE_CPU))
1029 printf("%u ", sample->cpu);
1030
1031 if (sample_type & PERF_SAMPLE_TIME)
1032 printf("%" PRIu64 " ", sample->time);
1033}
1034
1035static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1036{
1037 printf("... sample_read:\n");
1038
1039 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1040 printf("...... time enabled %016" PRIx64 "\n",
1041 sample->read.time_enabled);
1042
1043 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1044 printf("...... time running %016" PRIx64 "\n",
1045 sample->read.time_running);
1046
1047 if (read_format & PERF_FORMAT_GROUP) {
1048 u64 i;
1049
1050 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1051
1052 for (i = 0; i < sample->read.group.nr; i++) {
1053 struct sample_read_value *value;
1054
1055 value = &sample->read.group.values[i];
1056 printf("..... id %016" PRIx64
1057 ", value %016" PRIx64 "\n",
1058 value->id, value->value);
1059 }
1060 } else
1061 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1062 sample->read.one.id, sample->read.one.value);
1063}
1064
1065static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1066 u64 file_offset, struct perf_sample *sample)
1067{
1068 if (!dump_trace)
1069 return;
1070
1071 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1072 file_offset, event->header.size, event->header.type);
1073
1074 trace_event(event);
1075
1076 if (sample)
1077 perf_evlist__print_tstamp(evlist, event, sample);
1078
1079 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1080 event->header.size, perf_event__name(event->header.type));
1081}
1082
1083static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1084 struct perf_sample *sample)
1085{
1086 u64 sample_type;
1087
1088 if (!dump_trace)
1089 return;
1090
1091 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1092 event->header.misc, sample->pid, sample->tid, sample->ip,
1093 sample->period, sample->addr);
1094
1095 sample_type = evsel->attr.sample_type;
1096
1097 if (sample_type & PERF_SAMPLE_CALLCHAIN)
1098 callchain__printf(evsel, sample);
1099
1100 if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1101 branch_stack__printf(sample);
1102
1103 if (sample_type & PERF_SAMPLE_REGS_USER)
1104 regs_user__printf(sample);
1105
1106 if (sample_type & PERF_SAMPLE_REGS_INTR)
1107 regs_intr__printf(sample);
1108
1109 if (sample_type & PERF_SAMPLE_STACK_USER)
1110 stack_user__printf(&sample->user_stack);
1111
1112 if (sample_type & PERF_SAMPLE_WEIGHT)
1113 printf("... weight: %" PRIu64 "\n", sample->weight);
1114
1115 if (sample_type & PERF_SAMPLE_DATA_SRC)
1116 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1117
1118 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1119 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1120
1121 if (sample_type & PERF_SAMPLE_TRANSACTION)
1122 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1123
1124 if (sample_type & PERF_SAMPLE_READ)
1125 sample_read__printf(sample, evsel->attr.read_format);
1126}
1127
1128static void dump_read(struct perf_evsel *evsel, union perf_event *event)
1129{
1130 struct read_event *read_event = &event->read;
1131 u64 read_format;
1132
1133 if (!dump_trace)
1134 return;
1135
1136 printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
1137 evsel ? perf_evsel__name(evsel) : "FAIL",
1138 event->read.value);
1139
1140 read_format = evsel->attr.read_format;
1141
1142 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1143 printf("... time enabled : %" PRIu64 "\n", read_event->time_enabled);
1144
1145 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1146 printf("... time running : %" PRIu64 "\n", read_event->time_running);
1147
1148 if (read_format & PERF_FORMAT_ID)
1149 printf("... id : %" PRIu64 "\n", read_event->id);
1150}
1151
1152static struct machine *machines__find_for_cpumode(struct machines *machines,
1153 union perf_event *event,
1154 struct perf_sample *sample)
1155{
1156 struct machine *machine;
1157
1158 if (perf_guest &&
1159 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1160 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1161 u32 pid;
1162
1163 if (event->header.type == PERF_RECORD_MMAP
1164 || event->header.type == PERF_RECORD_MMAP2)
1165 pid = event->mmap.pid;
1166 else
1167 pid = sample->pid;
1168
1169 machine = machines__find(machines, pid);
1170 if (!machine)
1171 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1172 return machine;
1173 }
1174
1175 return &machines->host;
1176}
1177
1178static int deliver_sample_value(struct perf_evlist *evlist,
1179 struct perf_tool *tool,
1180 union perf_event *event,
1181 struct perf_sample *sample,
1182 struct sample_read_value *v,
1183 struct machine *machine)
1184{
1185 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1186
1187 if (sid) {
1188 sample->id = v->id;
1189 sample->period = v->value - sid->period;
1190 sid->period = v->value;
1191 }
1192
1193 if (!sid || sid->evsel == NULL) {
1194 ++evlist->stats.nr_unknown_id;
1195 return 0;
1196 }
1197
1198 return tool->sample(tool, event, sample, sid->evsel, machine);
1199}
1200
1201static int deliver_sample_group(struct perf_evlist *evlist,
1202 struct perf_tool *tool,
1203 union perf_event *event,
1204 struct perf_sample *sample,
1205 struct machine *machine)
1206{
1207 int ret = -EINVAL;
1208 u64 i;
1209
1210 for (i = 0; i < sample->read.group.nr; i++) {
1211 ret = deliver_sample_value(evlist, tool, event, sample,
1212 &sample->read.group.values[i],
1213 machine);
1214 if (ret)
1215 break;
1216 }
1217
1218 return ret;
1219}
1220
1221static int
1222 perf_evlist__deliver_sample(struct perf_evlist *evlist,
1223 struct perf_tool *tool,
1224 union perf_event *event,
1225 struct perf_sample *sample,
1226 struct perf_evsel *evsel,
1227 struct machine *machine)
1228{
1229
1230 u64 sample_type = evsel->attr.sample_type;
1231 u64 read_format = evsel->attr.read_format;
1232
1233
1234 if (!(sample_type & PERF_SAMPLE_READ))
1235 return tool->sample(tool, event, sample, evsel, machine);
1236
1237
1238 if (read_format & PERF_FORMAT_GROUP)
1239 return deliver_sample_group(evlist, tool, event, sample,
1240 machine);
1241 else
1242 return deliver_sample_value(evlist, tool, event, sample,
1243 &sample->read.one, machine);
1244}
1245
1246static int machines__deliver_event(struct machines *machines,
1247 struct perf_evlist *evlist,
1248 union perf_event *event,
1249 struct perf_sample *sample,
1250 struct perf_tool *tool, u64 file_offset)
1251{
1252 struct perf_evsel *evsel;
1253 struct machine *machine;
1254
1255 dump_event(evlist, event, file_offset, sample);
1256
1257 evsel = perf_evlist__id2evsel(evlist, sample->id);
1258
1259 machine = machines__find_for_cpumode(machines, event, sample);
1260
1261 switch (event->header.type) {
1262 case PERF_RECORD_SAMPLE:
1263 if (evsel == NULL) {
1264 ++evlist->stats.nr_unknown_id;
1265 return 0;
1266 }
1267 dump_sample(evsel, event, sample);
1268 if (machine == NULL) {
1269 ++evlist->stats.nr_unprocessable_samples;
1270 return 0;
1271 }
1272 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1273 case PERF_RECORD_MMAP:
1274 return tool->mmap(tool, event, sample, machine);
1275 case PERF_RECORD_MMAP2:
1276 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1277 ++evlist->stats.nr_proc_map_timeout;
1278 return tool->mmap2(tool, event, sample, machine);
1279 case PERF_RECORD_COMM:
1280 return tool->comm(tool, event, sample, machine);
1281 case PERF_RECORD_NAMESPACES:
1282 return tool->namespaces(tool, event, sample, machine);
1283 case PERF_RECORD_FORK:
1284 return tool->fork(tool, event, sample, machine);
1285 case PERF_RECORD_EXIT:
1286 return tool->exit(tool, event, sample, machine);
1287 case PERF_RECORD_LOST:
1288 if (tool->lost == perf_event__process_lost)
1289 evlist->stats.total_lost += event->lost.lost;
1290 return tool->lost(tool, event, sample, machine);
1291 case PERF_RECORD_LOST_SAMPLES:
1292 if (tool->lost_samples == perf_event__process_lost_samples)
1293 evlist->stats.total_lost_samples += event->lost_samples.lost;
1294 return tool->lost_samples(tool, event, sample, machine);
1295 case PERF_RECORD_READ:
1296 dump_read(evsel, event);
1297 return tool->read(tool, event, sample, evsel, machine);
1298 case PERF_RECORD_THROTTLE:
1299 return tool->throttle(tool, event, sample, machine);
1300 case PERF_RECORD_UNTHROTTLE:
1301 return tool->unthrottle(tool, event, sample, machine);
1302 case PERF_RECORD_AUX:
1303 if (tool->aux == perf_event__process_aux) {
1304 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1305 evlist->stats.total_aux_lost += 1;
1306 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1307 evlist->stats.total_aux_partial += 1;
1308 }
1309 return tool->aux(tool, event, sample, machine);
1310 case PERF_RECORD_ITRACE_START:
1311 return tool->itrace_start(tool, event, sample, machine);
1312 case PERF_RECORD_SWITCH:
1313 case PERF_RECORD_SWITCH_CPU_WIDE:
1314 return tool->context_switch(tool, event, sample, machine);
1315 default:
1316 ++evlist->stats.nr_unknown_events;
1317 return -1;
1318 }
1319}
1320
1321static int perf_session__deliver_event(struct perf_session *session,
1322 union perf_event *event,
1323 struct perf_tool *tool,
1324 u64 file_offset)
1325{
1326 struct perf_sample sample;
1327 int ret;
1328
1329 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1330 if (ret) {
1331 pr_err("Can't parse sample, err = %d\n", ret);
1332 return ret;
1333 }
1334
1335 ret = auxtrace__process_event(session, event, &sample, tool);
1336 if (ret < 0)
1337 return ret;
1338 if (ret > 0)
1339 return 0;
1340
1341 return machines__deliver_event(&session->machines, session->evlist,
1342 event, &sample, tool, file_offset);
1343}
1344
1345static s64 perf_session__process_user_event(struct perf_session *session,
1346 union perf_event *event,
1347 u64 file_offset)
1348{
1349 struct ordered_events *oe = &session->ordered_events;
1350 struct perf_tool *tool = session->tool;
1351 struct perf_sample sample = { .time = 0, };
1352 int fd = perf_data__fd(session->data);
1353 int err;
1354
1355 dump_event(session->evlist, event, file_offset, &sample);
1356
1357
1358 switch (event->header.type) {
1359 case PERF_RECORD_HEADER_ATTR:
1360 err = tool->attr(tool, event, &session->evlist);
1361 if (err == 0) {
1362 perf_session__set_id_hdr_size(session);
1363 perf_session__set_comm_exec(session);
1364 }
1365 return err;
1366 case PERF_RECORD_EVENT_UPDATE:
1367 return tool->event_update(tool, event, &session->evlist);
1368 case PERF_RECORD_HEADER_EVENT_TYPE:
1369
1370
1371
1372
1373 return 0;
1374 case PERF_RECORD_HEADER_TRACING_DATA:
1375
1376 lseek(fd, file_offset, SEEK_SET);
1377 return tool->tracing_data(tool, event, session);
1378 case PERF_RECORD_HEADER_BUILD_ID:
1379 return tool->build_id(tool, event, session);
1380 case PERF_RECORD_FINISHED_ROUND:
1381 return tool->finished_round(tool, event, oe);
1382 case PERF_RECORD_ID_INDEX:
1383 return tool->id_index(tool, event, session);
1384 case PERF_RECORD_AUXTRACE_INFO:
1385 return tool->auxtrace_info(tool, event, session);
1386 case PERF_RECORD_AUXTRACE:
1387
1388 lseek(fd, file_offset + event->header.size, SEEK_SET);
1389 return tool->auxtrace(tool, event, session);
1390 case PERF_RECORD_AUXTRACE_ERROR:
1391 perf_session__auxtrace_error_inc(session, event);
1392 return tool->auxtrace_error(tool, event, session);
1393 case PERF_RECORD_THREAD_MAP:
1394 return tool->thread_map(tool, event, session);
1395 case PERF_RECORD_CPU_MAP:
1396 return tool->cpu_map(tool, event, session);
1397 case PERF_RECORD_STAT_CONFIG:
1398 return tool->stat_config(tool, event, session);
1399 case PERF_RECORD_STAT:
1400 return tool->stat(tool, event, session);
1401 case PERF_RECORD_STAT_ROUND:
1402 return tool->stat_round(tool, event, session);
1403 case PERF_RECORD_TIME_CONV:
1404 session->time_conv = event->time_conv;
1405 return tool->time_conv(tool, event, session);
1406 case PERF_RECORD_HEADER_FEATURE:
1407 return tool->feature(tool, event, session);
1408 default:
1409 return -EINVAL;
1410 }
1411}
1412
1413int perf_session__deliver_synth_event(struct perf_session *session,
1414 union perf_event *event,
1415 struct perf_sample *sample)
1416{
1417 struct perf_evlist *evlist = session->evlist;
1418 struct perf_tool *tool = session->tool;
1419
1420 events_stats__inc(&evlist->stats, event->header.type);
1421
1422 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1423 return perf_session__process_user_event(session, event, 0);
1424
1425 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1426}
1427
1428static void event_swap(union perf_event *event, bool sample_id_all)
1429{
1430 perf_event__swap_op swap;
1431
1432 swap = perf_event__swap_ops[event->header.type];
1433 if (swap)
1434 swap(event, sample_id_all);
1435}
1436
1437int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1438 void *buf, size_t buf_sz,
1439 union perf_event **event_ptr,
1440 struct perf_sample *sample)
1441{
1442 union perf_event *event;
1443 size_t hdr_sz, rest;
1444 int fd;
1445
1446 if (session->one_mmap && !session->header.needs_swap) {
1447 event = file_offset - session->one_mmap_offset +
1448 session->one_mmap_addr;
1449 goto out_parse_sample;
1450 }
1451
1452 if (perf_data__is_pipe(session->data))
1453 return -1;
1454
1455 fd = perf_data__fd(session->data);
1456 hdr_sz = sizeof(struct perf_event_header);
1457
1458 if (buf_sz < hdr_sz)
1459 return -1;
1460
1461 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1462 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1463 return -1;
1464
1465 event = (union perf_event *)buf;
1466
1467 if (session->header.needs_swap)
1468 perf_event_header__bswap(&event->header);
1469
1470 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1471 return -1;
1472
1473 rest = event->header.size - hdr_sz;
1474
1475 if (readn(fd, buf, rest) != (ssize_t)rest)
1476 return -1;
1477
1478 if (session->header.needs_swap)
1479 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1480
1481out_parse_sample:
1482
1483 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1484 perf_evlist__parse_sample(session->evlist, event, sample))
1485 return -1;
1486
1487 *event_ptr = event;
1488
1489 return 0;
1490}
1491
1492static s64 perf_session__process_event(struct perf_session *session,
1493 union perf_event *event, u64 file_offset)
1494{
1495 struct perf_evlist *evlist = session->evlist;
1496 struct perf_tool *tool = session->tool;
1497 int ret;
1498
1499 if (session->header.needs_swap)
1500 event_swap(event, perf_evlist__sample_id_all(evlist));
1501
1502 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1503 return -EINVAL;
1504
1505 events_stats__inc(&evlist->stats, event->header.type);
1506
1507 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1508 return perf_session__process_user_event(session, event, file_offset);
1509
1510 if (tool->ordered_events) {
1511 u64 timestamp = -1ULL;
1512
1513 ret = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp);
1514 if (ret && ret != -1)
1515 return ret;
1516
1517 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1518 if (ret != -ETIME)
1519 return ret;
1520 }
1521
1522 return perf_session__deliver_event(session, event, tool, file_offset);
1523}
1524
1525void perf_event_header__bswap(struct perf_event_header *hdr)
1526{
1527 hdr->type = bswap_32(hdr->type);
1528 hdr->misc = bswap_16(hdr->misc);
1529 hdr->size = bswap_16(hdr->size);
1530}
1531
1532struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1533{
1534 return machine__findnew_thread(&session->machines.host, -1, pid);
1535}
1536
1537int perf_session__register_idle_thread(struct perf_session *session)
1538{
1539 struct thread *thread;
1540 int err = 0;
1541
1542 thread = machine__findnew_thread(&session->machines.host, 0, 0);
1543 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1544 pr_err("problem inserting idle task.\n");
1545 err = -1;
1546 }
1547
1548 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1549 pr_err("problem inserting idle task.\n");
1550 err = -1;
1551 }
1552
1553
1554 thread__put(thread);
1555 return err;
1556}
1557
1558static void
1559perf_session__warn_order(const struct perf_session *session)
1560{
1561 const struct ordered_events *oe = &session->ordered_events;
1562 struct perf_evsel *evsel;
1563 bool should_warn = true;
1564
1565 evlist__for_each_entry(session->evlist, evsel) {
1566 if (evsel->attr.write_backward)
1567 should_warn = false;
1568 }
1569
1570 if (!should_warn)
1571 return;
1572 if (oe->nr_unordered_events != 0)
1573 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1574}
1575
1576static void perf_session__warn_about_errors(const struct perf_session *session)
1577{
1578 const struct events_stats *stats = &session->evlist->stats;
1579
1580 if (session->tool->lost == perf_event__process_lost &&
1581 stats->nr_events[PERF_RECORD_LOST] != 0) {
1582 ui__warning("Processed %d events and lost %d chunks!\n\n"
1583 "Check IO/CPU overload!\n\n",
1584 stats->nr_events[0],
1585 stats->nr_events[PERF_RECORD_LOST]);
1586 }
1587
1588 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1589 double drop_rate;
1590
1591 drop_rate = (double)stats->total_lost_samples /
1592 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1593 if (drop_rate > 0.05) {
1594 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
1595 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1596 drop_rate * 100.0);
1597 }
1598 }
1599
1600 if (session->tool->aux == perf_event__process_aux &&
1601 stats->total_aux_lost != 0) {
1602 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1603 stats->total_aux_lost,
1604 stats->nr_events[PERF_RECORD_AUX]);
1605 }
1606
1607 if (session->tool->aux == perf_event__process_aux &&
1608 stats->total_aux_partial != 0) {
1609 bool vmm_exclusive = false;
1610
1611 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1612 &vmm_exclusive);
1613
1614 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1615 "Are you running a KVM guest in the background?%s\n\n",
1616 stats->total_aux_partial,
1617 stats->nr_events[PERF_RECORD_AUX],
1618 vmm_exclusive ?
1619 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1620 "will reduce the gaps to only guest's timeslices." :
1621 "");
1622 }
1623
1624 if (stats->nr_unknown_events != 0) {
1625 ui__warning("Found %u unknown events!\n\n"
1626 "Is this an older tool processing a perf.data "
1627 "file generated by a more recent tool?\n\n"
1628 "If that is not the case, consider "
1629 "reporting to linux-kernel@vger.kernel.org.\n\n",
1630 stats->nr_unknown_events);
1631 }
1632
1633 if (stats->nr_unknown_id != 0) {
1634 ui__warning("%u samples with id not present in the header\n",
1635 stats->nr_unknown_id);
1636 }
1637
1638 if (stats->nr_invalid_chains != 0) {
1639 ui__warning("Found invalid callchains!\n\n"
1640 "%u out of %u events were discarded for this reason.\n\n"
1641 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1642 stats->nr_invalid_chains,
1643 stats->nr_events[PERF_RECORD_SAMPLE]);
1644 }
1645
1646 if (stats->nr_unprocessable_samples != 0) {
1647 ui__warning("%u unprocessable samples recorded.\n"
1648 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1649 stats->nr_unprocessable_samples);
1650 }
1651
1652 perf_session__warn_order(session);
1653
1654 events_stats__auxtrace_error_warn(stats);
1655
1656 if (stats->nr_proc_map_timeout != 0) {
1657 ui__warning("%d map information files for pre-existing threads were\n"
1658 "not processed, if there are samples for addresses they\n"
1659 "will not be resolved, you may find out which are these\n"
1660 "threads by running with -v and redirecting the output\n"
1661 "to a file.\n"
1662 "The time limit to process proc map is too short?\n"
1663 "Increase it by --proc-map-timeout\n",
1664 stats->nr_proc_map_timeout);
1665 }
1666}
1667
1668static int perf_session__flush_thread_stack(struct thread *thread,
1669 void *p __maybe_unused)
1670{
1671 return thread_stack__flush(thread);
1672}
1673
1674static int perf_session__flush_thread_stacks(struct perf_session *session)
1675{
1676 return machines__for_each_thread(&session->machines,
1677 perf_session__flush_thread_stack,
1678 NULL);
1679}
1680
1681volatile int session_done;
1682
1683static int __perf_session__process_pipe_events(struct perf_session *session)
1684{
1685 struct ordered_events *oe = &session->ordered_events;
1686 struct perf_tool *tool = session->tool;
1687 int fd = perf_data__fd(session->data);
1688 union perf_event *event;
1689 uint32_t size, cur_size = 0;
1690 void *buf = NULL;
1691 s64 skip = 0;
1692 u64 head;
1693 ssize_t err;
1694 void *p;
1695
1696 perf_tool__fill_defaults(tool);
1697
1698 head = 0;
1699 cur_size = sizeof(union perf_event);
1700
1701 buf = malloc(cur_size);
1702 if (!buf)
1703 return -errno;
1704 ordered_events__set_copy_on_queue(oe, true);
1705more:
1706 event = buf;
1707 err = readn(fd, event, sizeof(struct perf_event_header));
1708 if (err <= 0) {
1709 if (err == 0)
1710 goto done;
1711
1712 pr_err("failed to read event header\n");
1713 goto out_err;
1714 }
1715
1716 if (session->header.needs_swap)
1717 perf_event_header__bswap(&event->header);
1718
1719 size = event->header.size;
1720 if (size < sizeof(struct perf_event_header)) {
1721 pr_err("bad event header size\n");
1722 goto out_err;
1723 }
1724
1725 if (size > cur_size) {
1726 void *new = realloc(buf, size);
1727 if (!new) {
1728 pr_err("failed to allocate memory to read event\n");
1729 goto out_err;
1730 }
1731 buf = new;
1732 cur_size = size;
1733 event = buf;
1734 }
1735 p = event;
1736 p += sizeof(struct perf_event_header);
1737
1738 if (size - sizeof(struct perf_event_header)) {
1739 err = readn(fd, p, size - sizeof(struct perf_event_header));
1740 if (err <= 0) {
1741 if (err == 0) {
1742 pr_err("unexpected end of event stream\n");
1743 goto done;
1744 }
1745
1746 pr_err("failed to read event data\n");
1747 goto out_err;
1748 }
1749 }
1750
1751 if ((skip = perf_session__process_event(session, event, head)) < 0) {
1752 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1753 head, event->header.size, event->header.type);
1754 err = -EINVAL;
1755 goto out_err;
1756 }
1757
1758 head += size;
1759
1760 if (skip > 0)
1761 head += skip;
1762
1763 if (!session_done())
1764 goto more;
1765done:
1766
1767 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1768 if (err)
1769 goto out_err;
1770 err = auxtrace__flush_events(session, tool);
1771 if (err)
1772 goto out_err;
1773 err = perf_session__flush_thread_stacks(session);
1774out_err:
1775 free(buf);
1776 if (!tool->no_warn)
1777 perf_session__warn_about_errors(session);
1778 ordered_events__free(&session->ordered_events);
1779 auxtrace__free_events(session);
1780 return err;
1781}
1782
1783static union perf_event *
1784fetch_mmaped_event(struct perf_session *session,
1785 u64 head, size_t mmap_size, char *buf)
1786{
1787 union perf_event *event;
1788
1789
1790
1791
1792
1793 if (head + sizeof(event->header) > mmap_size)
1794 return NULL;
1795
1796 event = (union perf_event *)(buf + head);
1797
1798 if (session->header.needs_swap)
1799 perf_event_header__bswap(&event->header);
1800
1801 if (head + event->header.size > mmap_size) {
1802
1803 if (session->header.needs_swap)
1804 perf_event_header__bswap(&event->header);
1805 return NULL;
1806 }
1807
1808 return event;
1809}
1810
1811
1812
1813
1814
1815#if BITS_PER_LONG == 64
1816#define MMAP_SIZE ULLONG_MAX
1817#define NUM_MMAPS 1
1818#else
1819#define MMAP_SIZE (32 * 1024 * 1024ULL)
1820#define NUM_MMAPS 128
1821#endif
1822
1823static int __perf_session__process_events(struct perf_session *session,
1824 u64 data_offset, u64 data_size,
1825 u64 file_size)
1826{
1827 struct ordered_events *oe = &session->ordered_events;
1828 struct perf_tool *tool = session->tool;
1829 int fd = perf_data__fd(session->data);
1830 u64 head, page_offset, file_offset, file_pos, size;
1831 int err, mmap_prot, mmap_flags, map_idx = 0;
1832 size_t mmap_size;
1833 char *buf, *mmaps[NUM_MMAPS];
1834 union perf_event *event;
1835 struct ui_progress prog;
1836 s64 skip;
1837
1838 perf_tool__fill_defaults(tool);
1839
1840 page_offset = page_size * (data_offset / page_size);
1841 file_offset = page_offset;
1842 head = data_offset - page_offset;
1843
1844 if (data_size == 0)
1845 goto out;
1846
1847 if (data_offset + data_size < file_size)
1848 file_size = data_offset + data_size;
1849
1850 ui_progress__init_size(&prog, file_size, "Processing events...");
1851
1852 mmap_size = MMAP_SIZE;
1853 if (mmap_size > file_size) {
1854 mmap_size = file_size;
1855 session->one_mmap = true;
1856 }
1857
1858 memset(mmaps, 0, sizeof(mmaps));
1859
1860 mmap_prot = PROT_READ;
1861 mmap_flags = MAP_SHARED;
1862
1863 if (session->header.needs_swap) {
1864 mmap_prot |= PROT_WRITE;
1865 mmap_flags = MAP_PRIVATE;
1866 }
1867remap:
1868 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1869 file_offset);
1870 if (buf == MAP_FAILED) {
1871 pr_err("failed to mmap file\n");
1872 err = -errno;
1873 goto out_err;
1874 }
1875 mmaps[map_idx] = buf;
1876 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1877 file_pos = file_offset + head;
1878 if (session->one_mmap) {
1879 session->one_mmap_addr = buf;
1880 session->one_mmap_offset = file_offset;
1881 }
1882
1883more:
1884 event = fetch_mmaped_event(session, head, mmap_size, buf);
1885 if (!event) {
1886 if (mmaps[map_idx]) {
1887 munmap(mmaps[map_idx], mmap_size);
1888 mmaps[map_idx] = NULL;
1889 }
1890
1891 page_offset = page_size * (head / page_size);
1892 file_offset += page_offset;
1893 head -= page_offset;
1894 goto remap;
1895 }
1896
1897 size = event->header.size;
1898
1899 if (size < sizeof(struct perf_event_header) ||
1900 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1901 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1902 file_offset + head, event->header.size,
1903 event->header.type);
1904 err = -EINVAL;
1905 goto out_err;
1906 }
1907
1908 if (skip)
1909 size += skip;
1910
1911 head += size;
1912 file_pos += size;
1913
1914 ui_progress__update(&prog, size);
1915
1916 if (session_done())
1917 goto out;
1918
1919 if (file_pos < file_size)
1920 goto more;
1921
1922out:
1923
1924 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1925 if (err)
1926 goto out_err;
1927 err = auxtrace__flush_events(session, tool);
1928 if (err)
1929 goto out_err;
1930 err = perf_session__flush_thread_stacks(session);
1931out_err:
1932 ui_progress__finish();
1933 if (!tool->no_warn)
1934 perf_session__warn_about_errors(session);
1935
1936
1937
1938
1939 ordered_events__reinit(&session->ordered_events);
1940 auxtrace__free_events(session);
1941 session->one_mmap = false;
1942 return err;
1943}
1944
1945int perf_session__process_events(struct perf_session *session)
1946{
1947 u64 size = perf_data__size(session->data);
1948 int err;
1949
1950 if (perf_session__register_idle_thread(session) < 0)
1951 return -ENOMEM;
1952
1953 if (!perf_data__is_pipe(session->data))
1954 err = __perf_session__process_events(session,
1955 session->header.data_offset,
1956 session->header.data_size, size);
1957 else
1958 err = __perf_session__process_pipe_events(session);
1959
1960 return err;
1961}
1962
1963bool perf_session__has_traces(struct perf_session *session, const char *msg)
1964{
1965 struct perf_evsel *evsel;
1966
1967 evlist__for_each_entry(session->evlist, evsel) {
1968 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1969 return true;
1970 }
1971
1972 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1973 return false;
1974}
1975
1976int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1977 const char *symbol_name, u64 addr)
1978{
1979 char *bracket;
1980 int i;
1981 struct ref_reloc_sym *ref;
1982
1983 ref = zalloc(sizeof(struct ref_reloc_sym));
1984 if (ref == NULL)
1985 return -ENOMEM;
1986
1987 ref->name = strdup(symbol_name);
1988 if (ref->name == NULL) {
1989 free(ref);
1990 return -ENOMEM;
1991 }
1992
1993 bracket = strchr(ref->name, ']');
1994 if (bracket)
1995 *bracket = '\0';
1996
1997 ref->addr = addr;
1998
1999 for (i = 0; i < MAP__NR_TYPES; ++i) {
2000 struct kmap *kmap = map__kmap(maps[i]);
2001
2002 if (!kmap)
2003 continue;
2004 kmap->ref_reloc_sym = ref;
2005 }
2006
2007 return 0;
2008}
2009
2010size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2011{
2012 return machines__fprintf_dsos(&session->machines, fp);
2013}
2014
2015size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2016 bool (skip)(struct dso *dso, int parm), int parm)
2017{
2018 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2019}
2020
2021size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2022{
2023 size_t ret;
2024 const char *msg = "";
2025
2026 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2027 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2028
2029 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2030
2031 ret += events_stats__fprintf(&session->evlist->stats, fp);
2032 return ret;
2033}
2034
2035size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2036{
2037
2038
2039
2040
2041 return machine__fprintf(&session->machines.host, fp);
2042}
2043
2044struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
2045 unsigned int type)
2046{
2047 struct perf_evsel *pos;
2048
2049 evlist__for_each_entry(session->evlist, pos) {
2050 if (pos->attr.type == type)
2051 return pos;
2052 }
2053 return NULL;
2054}
2055
2056int perf_session__cpu_bitmap(struct perf_session *session,
2057 const char *cpu_list, unsigned long *cpu_bitmap)
2058{
2059 int i, err = -1;
2060 struct cpu_map *map;
2061
2062 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2063 struct perf_evsel *evsel;
2064
2065 evsel = perf_session__find_first_evtype(session, i);
2066 if (!evsel)
2067 continue;
2068
2069 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
2070 pr_err("File does not contain CPU events. "
2071 "Remove -C option to proceed.\n");
2072 return -1;
2073 }
2074 }
2075
2076 map = cpu_map__new(cpu_list);
2077 if (map == NULL) {
2078 pr_err("Invalid cpu_list\n");
2079 return -1;
2080 }
2081
2082 for (i = 0; i < map->nr; i++) {
2083 int cpu = map->map[i];
2084
2085 if (cpu >= MAX_NR_CPUS) {
2086 pr_err("Requested CPU %d too large. "
2087 "Consider raising MAX_NR_CPUS\n", cpu);
2088 goto out_delete_map;
2089 }
2090
2091 set_bit(cpu, cpu_bitmap);
2092 }
2093
2094 err = 0;
2095
2096out_delete_map:
2097 cpu_map__put(map);
2098 return err;
2099}
2100
2101void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2102 bool full)
2103{
2104 if (session == NULL || fp == NULL)
2105 return;
2106
2107 fprintf(fp, "# ========\n");
2108 perf_header__fprintf_info(session, fp, full);
2109 fprintf(fp, "# ========\n#\n");
2110}
2111
2112
2113int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2114 const struct perf_evsel_str_handler *assocs,
2115 size_t nr_assocs)
2116{
2117 struct perf_evsel *evsel;
2118 size_t i;
2119 int err;
2120
2121 for (i = 0; i < nr_assocs; i++) {
2122
2123
2124
2125
2126 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2127 if (evsel == NULL)
2128 continue;
2129
2130 err = -EEXIST;
2131 if (evsel->handler != NULL)
2132 goto out;
2133 evsel->handler = assocs[i].handler;
2134 }
2135
2136 err = 0;
2137out:
2138 return err;
2139}
2140
2141int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
2142 union perf_event *event,
2143 struct perf_session *session)
2144{
2145 struct perf_evlist *evlist = session->evlist;
2146 struct id_index_event *ie = &event->id_index;
2147 size_t i, nr, max_nr;
2148
2149 max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2150 sizeof(struct id_index_entry);
2151 nr = ie->nr;
2152 if (nr > max_nr)
2153 return -EINVAL;
2154
2155 if (dump_trace)
2156 fprintf(stdout, " nr: %zu\n", nr);
2157
2158 for (i = 0; i < nr; i++) {
2159 struct id_index_entry *e = &ie->entries[i];
2160 struct perf_sample_id *sid;
2161
2162 if (dump_trace) {
2163 fprintf(stdout, " ... id: %"PRIu64, e->id);
2164 fprintf(stdout, " idx: %"PRIu64, e->idx);
2165 fprintf(stdout, " cpu: %"PRId64, e->cpu);
2166 fprintf(stdout, " tid: %"PRId64"\n", e->tid);
2167 }
2168
2169 sid = perf_evlist__id2sid(evlist, e->id);
2170 if (!sid)
2171 return -ENOENT;
2172 sid->idx = e->idx;
2173 sid->cpu = e->cpu;
2174 sid->tid = e->tid;
2175 }
2176 return 0;
2177}
2178
2179int perf_event__synthesize_id_index(struct perf_tool *tool,
2180 perf_event__handler_t process,
2181 struct perf_evlist *evlist,
2182 struct machine *machine)
2183{
2184 union perf_event *ev;
2185 struct perf_evsel *evsel;
2186 size_t nr = 0, i = 0, sz, max_nr, n;
2187 int err;
2188
2189 pr_debug2("Synthesizing id index\n");
2190
2191 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2192 sizeof(struct id_index_entry);
2193
2194 evlist__for_each_entry(evlist, evsel)
2195 nr += evsel->ids;
2196
2197 n = nr > max_nr ? max_nr : nr;
2198 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2199 ev = zalloc(sz);
2200 if (!ev)
2201 return -ENOMEM;
2202
2203 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2204 ev->id_index.header.size = sz;
2205 ev->id_index.nr = n;
2206
2207 evlist__for_each_entry(evlist, evsel) {
2208 u32 j;
2209
2210 for (j = 0; j < evsel->ids; j++) {
2211 struct id_index_entry *e;
2212 struct perf_sample_id *sid;
2213
2214 if (i >= n) {
2215 err = process(tool, ev, NULL, machine);
2216 if (err)
2217 goto out_err;
2218 nr -= n;
2219 i = 0;
2220 }
2221
2222 e = &ev->id_index.entries[i++];
2223
2224 e->id = evsel->id[j];
2225
2226 sid = perf_evlist__id2sid(evlist, e->id);
2227 if (!sid) {
2228 free(ev);
2229 return -ENOENT;
2230 }
2231
2232 e->idx = sid->idx;
2233 e->cpu = sid->cpu;
2234 e->tid = sid->tid;
2235 }
2236 }
2237
2238 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2239 ev->id_index.header.size = sz;
2240 ev->id_index.nr = nr;
2241
2242 err = process(tool, ev, NULL, machine);
2243out_err:
2244 free(ev);
2245
2246 return err;
2247}
2248