1
2#include <errno.h>
3#include <inttypes.h>
4#include <linux/kernel.h>
5#include <traceevent/event-parse.h>
6#include <api/fs/fs.h>
7
8#include <byteswap.h>
9#include <unistd.h>
10#include <sys/types.h>
11#include <sys/mman.h>
12
13#include "evlist.h"
14#include "evsel.h"
15#include "memswap.h"
16#include "session.h"
17#include "tool.h"
18#include "sort.h"
19#include "util.h"
20#include "cpumap.h"
21#include "perf_regs.h"
22#include "asm/bug.h"
23#include "auxtrace.h"
24#include "thread.h"
25#include "thread-stack.h"
26#include "stat.h"
27#include "arch/common.h"
28
29static int perf_session__deliver_event(struct perf_session *session,
30 union perf_event *event,
31 struct perf_tool *tool,
32 u64 file_offset);
33
34static int perf_session__open(struct perf_session *session)
35{
36 struct perf_data *data = session->data;
37
38 if (perf_session__read_header(session) < 0) {
39 pr_err("incompatible file format (rerun with -v to learn more)\n");
40 return -1;
41 }
42
43 if (perf_data__is_pipe(data))
44 return 0;
45
46 if (perf_header__has_feat(&session->header, HEADER_STAT))
47 return 0;
48
49 if (!perf_evlist__valid_sample_type(session->evlist)) {
50 pr_err("non matching sample_type\n");
51 return -1;
52 }
53
54 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
55 pr_err("non matching sample_id_all\n");
56 return -1;
57 }
58
59 if (!perf_evlist__valid_read_format(session->evlist)) {
60 pr_err("non matching read_format\n");
61 return -1;
62 }
63
64 return 0;
65}
66
67void perf_session__set_id_hdr_size(struct perf_session *session)
68{
69 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
70
71 machines__set_id_hdr_size(&session->machines, id_hdr_size);
72}
73
74int perf_session__create_kernel_maps(struct perf_session *session)
75{
76 int ret = machine__create_kernel_maps(&session->machines.host);
77
78 if (ret >= 0)
79 ret = machines__create_guest_kernel_maps(&session->machines);
80 return ret;
81}
82
83static void perf_session__destroy_kernel_maps(struct perf_session *session)
84{
85 machines__destroy_kernel_maps(&session->machines);
86}
87
88static bool perf_session__has_comm_exec(struct perf_session *session)
89{
90 struct perf_evsel *evsel;
91
92 evlist__for_each_entry(session->evlist, evsel) {
93 if (evsel->attr.comm_exec)
94 return true;
95 }
96
97 return false;
98}
99
100static void perf_session__set_comm_exec(struct perf_session *session)
101{
102 bool comm_exec = perf_session__has_comm_exec(session);
103
104 machines__set_comm_exec(&session->machines, comm_exec);
105}
106
107static int ordered_events__deliver_event(struct ordered_events *oe,
108 struct ordered_event *event)
109{
110 struct perf_session *session = container_of(oe, struct perf_session,
111 ordered_events);
112
113 return perf_session__deliver_event(session, event->event,
114 session->tool, event->file_offset);
115}
116
117struct perf_session *perf_session__new(struct perf_data *data,
118 bool repipe, struct perf_tool *tool)
119{
120 struct perf_session *session = zalloc(sizeof(*session));
121
122 if (!session)
123 goto out;
124
125 session->repipe = repipe;
126 session->tool = tool;
127 INIT_LIST_HEAD(&session->auxtrace_index);
128 machines__init(&session->machines);
129 ordered_events__init(&session->ordered_events,
130 ordered_events__deliver_event, NULL);
131
132 if (data) {
133 if (perf_data__open(data))
134 goto out_delete;
135
136 session->data = data;
137
138 if (perf_data__is_read(data)) {
139 if (perf_session__open(session) < 0)
140 goto out_close;
141
142
143
144
145
146 if (!data->is_pipe) {
147 perf_session__set_id_hdr_size(session);
148 perf_session__set_comm_exec(session);
149 }
150 }
151 } else {
152 session->machines.host.env = &perf_env;
153 }
154
155 session->machines.host.single_address_space =
156 perf_env__single_address_space(session->machines.host.env);
157
158 if (!data || perf_data__is_write(data)) {
159
160
161
162
163 if (perf_session__create_kernel_maps(session) < 0)
164 pr_warning("Cannot read kernel map\n");
165 }
166
167
168
169
170
171 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
172 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
173 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
174 tool->ordered_events = false;
175 }
176
177 return session;
178
179 out_close:
180 perf_data__close(data);
181 out_delete:
182 perf_session__delete(session);
183 out:
184 return NULL;
185}
186
187static void perf_session__delete_threads(struct perf_session *session)
188{
189 machine__delete_threads(&session->machines.host);
190}
191
192void perf_session__delete(struct perf_session *session)
193{
194 if (session == NULL)
195 return;
196 auxtrace__free(session);
197 auxtrace_index__free(&session->auxtrace_index);
198 perf_session__destroy_kernel_maps(session);
199 perf_session__delete_threads(session);
200 perf_env__exit(&session->header.env);
201 machines__exit(&session->machines);
202 if (session->data)
203 perf_data__close(session->data);
204 free(session);
205}
206
207static int process_event_synth_tracing_data_stub(struct perf_session *session
208 __maybe_unused,
209 union perf_event *event
210 __maybe_unused)
211{
212 dump_printf(": unhandled!\n");
213 return 0;
214}
215
216static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
217 union perf_event *event __maybe_unused,
218 struct perf_evlist **pevlist
219 __maybe_unused)
220{
221 dump_printf(": unhandled!\n");
222 return 0;
223}
224
225static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
226 union perf_event *event __maybe_unused,
227 struct perf_evlist **pevlist
228 __maybe_unused)
229{
230 if (dump_trace)
231 perf_event__fprintf_event_update(event, stdout);
232
233 dump_printf(": unhandled!\n");
234 return 0;
235}
236
237static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
238 union perf_event *event __maybe_unused,
239 struct perf_sample *sample __maybe_unused,
240 struct perf_evsel *evsel __maybe_unused,
241 struct machine *machine __maybe_unused)
242{
243 dump_printf(": unhandled!\n");
244 return 0;
245}
246
247static int process_event_stub(struct perf_tool *tool __maybe_unused,
248 union perf_event *event __maybe_unused,
249 struct perf_sample *sample __maybe_unused,
250 struct machine *machine __maybe_unused)
251{
252 dump_printf(": unhandled!\n");
253 return 0;
254}
255
256static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
257 union perf_event *event __maybe_unused,
258 struct ordered_events *oe __maybe_unused)
259{
260 dump_printf(": unhandled!\n");
261 return 0;
262}
263
264static int process_finished_round(struct perf_tool *tool,
265 union perf_event *event,
266 struct ordered_events *oe);
267
268static int skipn(int fd, off_t n)
269{
270 char buf[4096];
271 ssize_t ret;
272
273 while (n > 0) {
274 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
275 if (ret <= 0)
276 return ret;
277 n -= ret;
278 }
279
280 return 0;
281}
282
283static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
284 union perf_event *event)
285{
286 dump_printf(": unhandled!\n");
287 if (perf_data__is_pipe(session->data))
288 skipn(perf_data__fd(session->data), event->auxtrace.size);
289 return event->auxtrace.size;
290}
291
292static int process_event_op2_stub(struct perf_session *session __maybe_unused,
293 union perf_event *event __maybe_unused)
294{
295 dump_printf(": unhandled!\n");
296 return 0;
297}
298
299
300static
301int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
302 union perf_event *event __maybe_unused)
303{
304 if (dump_trace)
305 perf_event__fprintf_thread_map(event, stdout);
306
307 dump_printf(": unhandled!\n");
308 return 0;
309}
310
311static
312int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
313 union perf_event *event __maybe_unused)
314{
315 if (dump_trace)
316 perf_event__fprintf_cpu_map(event, stdout);
317
318 dump_printf(": unhandled!\n");
319 return 0;
320}
321
322static
323int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
324 union perf_event *event __maybe_unused)
325{
326 if (dump_trace)
327 perf_event__fprintf_stat_config(event, stdout);
328
329 dump_printf(": unhandled!\n");
330 return 0;
331}
332
333static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
334 union perf_event *event)
335{
336 if (dump_trace)
337 perf_event__fprintf_stat(event, stdout);
338
339 dump_printf(": unhandled!\n");
340 return 0;
341}
342
343static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
344 union perf_event *event)
345{
346 if (dump_trace)
347 perf_event__fprintf_stat_round(event, stdout);
348
349 dump_printf(": unhandled!\n");
350 return 0;
351}
352
353void perf_tool__fill_defaults(struct perf_tool *tool)
354{
355 if (tool->sample == NULL)
356 tool->sample = process_event_sample_stub;
357 if (tool->mmap == NULL)
358 tool->mmap = process_event_stub;
359 if (tool->mmap2 == NULL)
360 tool->mmap2 = process_event_stub;
361 if (tool->comm == NULL)
362 tool->comm = process_event_stub;
363 if (tool->namespaces == NULL)
364 tool->namespaces = process_event_stub;
365 if (tool->fork == NULL)
366 tool->fork = process_event_stub;
367 if (tool->exit == NULL)
368 tool->exit = process_event_stub;
369 if (tool->lost == NULL)
370 tool->lost = perf_event__process_lost;
371 if (tool->lost_samples == NULL)
372 tool->lost_samples = perf_event__process_lost_samples;
373 if (tool->aux == NULL)
374 tool->aux = perf_event__process_aux;
375 if (tool->itrace_start == NULL)
376 tool->itrace_start = perf_event__process_itrace_start;
377 if (tool->context_switch == NULL)
378 tool->context_switch = perf_event__process_switch;
379 if (tool->read == NULL)
380 tool->read = process_event_sample_stub;
381 if (tool->throttle == NULL)
382 tool->throttle = process_event_stub;
383 if (tool->unthrottle == NULL)
384 tool->unthrottle = process_event_stub;
385 if (tool->attr == NULL)
386 tool->attr = process_event_synth_attr_stub;
387 if (tool->event_update == NULL)
388 tool->event_update = process_event_synth_event_update_stub;
389 if (tool->tracing_data == NULL)
390 tool->tracing_data = process_event_synth_tracing_data_stub;
391 if (tool->build_id == NULL)
392 tool->build_id = process_event_op2_stub;
393 if (tool->finished_round == NULL) {
394 if (tool->ordered_events)
395 tool->finished_round = process_finished_round;
396 else
397 tool->finished_round = process_finished_round_stub;
398 }
399 if (tool->id_index == NULL)
400 tool->id_index = process_event_op2_stub;
401 if (tool->auxtrace_info == NULL)
402 tool->auxtrace_info = process_event_op2_stub;
403 if (tool->auxtrace == NULL)
404 tool->auxtrace = process_event_auxtrace_stub;
405 if (tool->auxtrace_error == NULL)
406 tool->auxtrace_error = process_event_op2_stub;
407 if (tool->thread_map == NULL)
408 tool->thread_map = process_event_thread_map_stub;
409 if (tool->cpu_map == NULL)
410 tool->cpu_map = process_event_cpu_map_stub;
411 if (tool->stat_config == NULL)
412 tool->stat_config = process_event_stat_config_stub;
413 if (tool->stat == NULL)
414 tool->stat = process_stat_stub;
415 if (tool->stat_round == NULL)
416 tool->stat_round = process_stat_round_stub;
417 if (tool->time_conv == NULL)
418 tool->time_conv = process_event_op2_stub;
419 if (tool->feature == NULL)
420 tool->feature = process_event_op2_stub;
421}
422
423static void swap_sample_id_all(union perf_event *event, void *data)
424{
425 void *end = (void *) event + event->header.size;
426 int size = end - data;
427
428 BUG_ON(size % sizeof(u64));
429 mem_bswap_64(data, size);
430}
431
432static void perf_event__all64_swap(union perf_event *event,
433 bool sample_id_all __maybe_unused)
434{
435 struct perf_event_header *hdr = &event->header;
436 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
437}
438
439static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
440{
441 event->comm.pid = bswap_32(event->comm.pid);
442 event->comm.tid = bswap_32(event->comm.tid);
443
444 if (sample_id_all) {
445 void *data = &event->comm.comm;
446
447 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
448 swap_sample_id_all(event, data);
449 }
450}
451
452static void perf_event__mmap_swap(union perf_event *event,
453 bool sample_id_all)
454{
455 event->mmap.pid = bswap_32(event->mmap.pid);
456 event->mmap.tid = bswap_32(event->mmap.tid);
457 event->mmap.start = bswap_64(event->mmap.start);
458 event->mmap.len = bswap_64(event->mmap.len);
459 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
460
461 if (sample_id_all) {
462 void *data = &event->mmap.filename;
463
464 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
465 swap_sample_id_all(event, data);
466 }
467}
468
469static void perf_event__mmap2_swap(union perf_event *event,
470 bool sample_id_all)
471{
472 event->mmap2.pid = bswap_32(event->mmap2.pid);
473 event->mmap2.tid = bswap_32(event->mmap2.tid);
474 event->mmap2.start = bswap_64(event->mmap2.start);
475 event->mmap2.len = bswap_64(event->mmap2.len);
476 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
477 event->mmap2.maj = bswap_32(event->mmap2.maj);
478 event->mmap2.min = bswap_32(event->mmap2.min);
479 event->mmap2.ino = bswap_64(event->mmap2.ino);
480
481 if (sample_id_all) {
482 void *data = &event->mmap2.filename;
483
484 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
485 swap_sample_id_all(event, data);
486 }
487}
488static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
489{
490 event->fork.pid = bswap_32(event->fork.pid);
491 event->fork.tid = bswap_32(event->fork.tid);
492 event->fork.ppid = bswap_32(event->fork.ppid);
493 event->fork.ptid = bswap_32(event->fork.ptid);
494 event->fork.time = bswap_64(event->fork.time);
495
496 if (sample_id_all)
497 swap_sample_id_all(event, &event->fork + 1);
498}
499
500static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
501{
502 event->read.pid = bswap_32(event->read.pid);
503 event->read.tid = bswap_32(event->read.tid);
504 event->read.value = bswap_64(event->read.value);
505 event->read.time_enabled = bswap_64(event->read.time_enabled);
506 event->read.time_running = bswap_64(event->read.time_running);
507 event->read.id = bswap_64(event->read.id);
508
509 if (sample_id_all)
510 swap_sample_id_all(event, &event->read + 1);
511}
512
513static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
514{
515 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
516 event->aux.aux_size = bswap_64(event->aux.aux_size);
517 event->aux.flags = bswap_64(event->aux.flags);
518
519 if (sample_id_all)
520 swap_sample_id_all(event, &event->aux + 1);
521}
522
523static void perf_event__itrace_start_swap(union perf_event *event,
524 bool sample_id_all)
525{
526 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
527 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
528
529 if (sample_id_all)
530 swap_sample_id_all(event, &event->itrace_start + 1);
531}
532
533static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
534{
535 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
536 event->context_switch.next_prev_pid =
537 bswap_32(event->context_switch.next_prev_pid);
538 event->context_switch.next_prev_tid =
539 bswap_32(event->context_switch.next_prev_tid);
540 }
541
542 if (sample_id_all)
543 swap_sample_id_all(event, &event->context_switch + 1);
544}
545
546static void perf_event__throttle_swap(union perf_event *event,
547 bool sample_id_all)
548{
549 event->throttle.time = bswap_64(event->throttle.time);
550 event->throttle.id = bswap_64(event->throttle.id);
551 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
552
553 if (sample_id_all)
554 swap_sample_id_all(event, &event->throttle + 1);
555}
556
557static u8 revbyte(u8 b)
558{
559 int rev = (b >> 4) | ((b & 0xf) << 4);
560 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
561 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
562 return (u8) rev;
563}
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579static void swap_bitfield(u8 *p, unsigned len)
580{
581 unsigned i;
582
583 for (i = 0; i < len; i++) {
584 *p = revbyte(*p);
585 p++;
586 }
587}
588
589
590void perf_event__attr_swap(struct perf_event_attr *attr)
591{
592 attr->type = bswap_32(attr->type);
593 attr->size = bswap_32(attr->size);
594
595#define bswap_safe(f, n) \
596 (attr->size > (offsetof(struct perf_event_attr, f) + \
597 sizeof(attr->f) * (n)))
598#define bswap_field(f, sz) \
599do { \
600 if (bswap_safe(f, 0)) \
601 attr->f = bswap_##sz(attr->f); \
602} while(0)
603#define bswap_field_16(f) bswap_field(f, 16)
604#define bswap_field_32(f) bswap_field(f, 32)
605#define bswap_field_64(f) bswap_field(f, 64)
606
607 bswap_field_64(config);
608 bswap_field_64(sample_period);
609 bswap_field_64(sample_type);
610 bswap_field_64(read_format);
611 bswap_field_32(wakeup_events);
612 bswap_field_32(bp_type);
613 bswap_field_64(bp_addr);
614 bswap_field_64(bp_len);
615 bswap_field_64(branch_sample_type);
616 bswap_field_64(sample_regs_user);
617 bswap_field_32(sample_stack_user);
618 bswap_field_32(aux_watermark);
619 bswap_field_16(sample_max_stack);
620
621
622
623
624
625 if (bswap_safe(read_format, 1))
626 swap_bitfield((u8 *) (&attr->read_format + 1),
627 sizeof(u64));
628#undef bswap_field_64
629#undef bswap_field_32
630#undef bswap_field
631#undef bswap_safe
632}
633
634static void perf_event__hdr_attr_swap(union perf_event *event,
635 bool sample_id_all __maybe_unused)
636{
637 size_t size;
638
639 perf_event__attr_swap(&event->attr.attr);
640
641 size = event->header.size;
642 size -= (void *)&event->attr.id - (void *)event;
643 mem_bswap_64(event->attr.id, size);
644}
645
646static void perf_event__event_update_swap(union perf_event *event,
647 bool sample_id_all __maybe_unused)
648{
649 event->event_update.type = bswap_64(event->event_update.type);
650 event->event_update.id = bswap_64(event->event_update.id);
651}
652
653static void perf_event__event_type_swap(union perf_event *event,
654 bool sample_id_all __maybe_unused)
655{
656 event->event_type.event_type.event_id =
657 bswap_64(event->event_type.event_type.event_id);
658}
659
660static void perf_event__tracing_data_swap(union perf_event *event,
661 bool sample_id_all __maybe_unused)
662{
663 event->tracing_data.size = bswap_32(event->tracing_data.size);
664}
665
666static void perf_event__auxtrace_info_swap(union perf_event *event,
667 bool sample_id_all __maybe_unused)
668{
669 size_t size;
670
671 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
672
673 size = event->header.size;
674 size -= (void *)&event->auxtrace_info.priv - (void *)event;
675 mem_bswap_64(event->auxtrace_info.priv, size);
676}
677
678static void perf_event__auxtrace_swap(union perf_event *event,
679 bool sample_id_all __maybe_unused)
680{
681 event->auxtrace.size = bswap_64(event->auxtrace.size);
682 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
683 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
684 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
685 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
686 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
687}
688
689static void perf_event__auxtrace_error_swap(union perf_event *event,
690 bool sample_id_all __maybe_unused)
691{
692 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
693 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
694 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
695 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
696 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
697 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
698}
699
700static void perf_event__thread_map_swap(union perf_event *event,
701 bool sample_id_all __maybe_unused)
702{
703 unsigned i;
704
705 event->thread_map.nr = bswap_64(event->thread_map.nr);
706
707 for (i = 0; i < event->thread_map.nr; i++)
708 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
709}
710
711static void perf_event__cpu_map_swap(union perf_event *event,
712 bool sample_id_all __maybe_unused)
713{
714 struct cpu_map_data *data = &event->cpu_map.data;
715 struct cpu_map_entries *cpus;
716 struct cpu_map_mask *mask;
717 unsigned i;
718
719 data->type = bswap_64(data->type);
720
721 switch (data->type) {
722 case PERF_CPU_MAP__CPUS:
723 cpus = (struct cpu_map_entries *)data->data;
724
725 cpus->nr = bswap_16(cpus->nr);
726
727 for (i = 0; i < cpus->nr; i++)
728 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
729 break;
730 case PERF_CPU_MAP__MASK:
731 mask = (struct cpu_map_mask *) data->data;
732
733 mask->nr = bswap_16(mask->nr);
734 mask->long_size = bswap_16(mask->long_size);
735
736 switch (mask->long_size) {
737 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
738 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
739 default:
740 pr_err("cpu_map swap: unsupported long size\n");
741 }
742 default:
743 break;
744 }
745}
746
747static void perf_event__stat_config_swap(union perf_event *event,
748 bool sample_id_all __maybe_unused)
749{
750 u64 size;
751
752 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
753 size += 1;
754 mem_bswap_64(&event->stat_config.nr, size);
755}
756
757static void perf_event__stat_swap(union perf_event *event,
758 bool sample_id_all __maybe_unused)
759{
760 event->stat.id = bswap_64(event->stat.id);
761 event->stat.thread = bswap_32(event->stat.thread);
762 event->stat.cpu = bswap_32(event->stat.cpu);
763 event->stat.val = bswap_64(event->stat.val);
764 event->stat.ena = bswap_64(event->stat.ena);
765 event->stat.run = bswap_64(event->stat.run);
766}
767
768static void perf_event__stat_round_swap(union perf_event *event,
769 bool sample_id_all __maybe_unused)
770{
771 event->stat_round.type = bswap_64(event->stat_round.type);
772 event->stat_round.time = bswap_64(event->stat_round.time);
773}
774
775typedef void (*perf_event__swap_op)(union perf_event *event,
776 bool sample_id_all);
777
778static perf_event__swap_op perf_event__swap_ops[] = {
779 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
780 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
781 [PERF_RECORD_COMM] = perf_event__comm_swap,
782 [PERF_RECORD_FORK] = perf_event__task_swap,
783 [PERF_RECORD_EXIT] = perf_event__task_swap,
784 [PERF_RECORD_LOST] = perf_event__all64_swap,
785 [PERF_RECORD_READ] = perf_event__read_swap,
786 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
787 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
788 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
789 [PERF_RECORD_AUX] = perf_event__aux_swap,
790 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
791 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
792 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
793 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
794 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
795 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
796 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
797 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
798 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
799 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
800 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
801 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
802 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
803 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
804 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
805 [PERF_RECORD_STAT] = perf_event__stat_swap,
806 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
807 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
808 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
809 [PERF_RECORD_HEADER_MAX] = NULL,
810};
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851static int process_finished_round(struct perf_tool *tool __maybe_unused,
852 union perf_event *event __maybe_unused,
853 struct ordered_events *oe)
854{
855 if (dump_trace)
856 fprintf(stdout, "\n");
857 return ordered_events__flush(oe, OE_FLUSH__ROUND);
858}
859
860int perf_session__queue_event(struct perf_session *s, union perf_event *event,
861 u64 timestamp, u64 file_offset)
862{
863 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
864}
865
866static void callchain__lbr_callstack_printf(struct perf_sample *sample)
867{
868 struct ip_callchain *callchain = sample->callchain;
869 struct branch_stack *lbr_stack = sample->branch_stack;
870 u64 kernel_callchain_nr = callchain->nr;
871 unsigned int i;
872
873 for (i = 0; i < kernel_callchain_nr; i++) {
874 if (callchain->ips[i] == PERF_CONTEXT_USER)
875 break;
876 }
877
878 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
879 u64 total_nr;
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896 total_nr = i + 1 + lbr_stack->nr + 1;
897 kernel_callchain_nr = i + 1;
898
899 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
900
901 for (i = 0; i < kernel_callchain_nr; i++)
902 printf("..... %2d: %016" PRIx64 "\n",
903 i, callchain->ips[i]);
904
905 printf("..... %2d: %016" PRIx64 "\n",
906 (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
907 for (i = 0; i < lbr_stack->nr; i++)
908 printf("..... %2d: %016" PRIx64 "\n",
909 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
910 }
911}
912
913static void callchain__printf(struct perf_evsel *evsel,
914 struct perf_sample *sample)
915{
916 unsigned int i;
917 struct ip_callchain *callchain = sample->callchain;
918
919 if (perf_evsel__has_branch_callstack(evsel))
920 callchain__lbr_callstack_printf(sample);
921
922 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
923
924 for (i = 0; i < callchain->nr; i++)
925 printf("..... %2d: %016" PRIx64 "\n",
926 i, callchain->ips[i]);
927}
928
929static void branch_stack__printf(struct perf_sample *sample)
930{
931 uint64_t i;
932
933 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
934
935 for (i = 0; i < sample->branch_stack->nr; i++) {
936 struct branch_entry *e = &sample->branch_stack->entries[i];
937
938 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
939 i, e->from, e->to,
940 (unsigned short)e->flags.cycles,
941 e->flags.mispred ? "M" : " ",
942 e->flags.predicted ? "P" : " ",
943 e->flags.abort ? "A" : " ",
944 e->flags.in_tx ? "T" : " ",
945 (unsigned)e->flags.reserved);
946 }
947}
948
949static void regs_dump__printf(u64 mask, u64 *regs)
950{
951 unsigned rid, i = 0;
952
953 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
954 u64 val = regs[i++];
955
956 printf(".... %-5s 0x%" PRIx64 "\n",
957 perf_reg_name(rid), val);
958 }
959}
960
961static const char *regs_abi[] = {
962 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
963 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
964 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
965};
966
967static inline const char *regs_dump_abi(struct regs_dump *d)
968{
969 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
970 return "unknown";
971
972 return regs_abi[d->abi];
973}
974
975static void regs__printf(const char *type, struct regs_dump *regs)
976{
977 u64 mask = regs->mask;
978
979 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
980 type,
981 mask,
982 regs_dump_abi(regs));
983
984 regs_dump__printf(mask, regs->regs);
985}
986
987static void regs_user__printf(struct perf_sample *sample)
988{
989 struct regs_dump *user_regs = &sample->user_regs;
990
991 if (user_regs->regs)
992 regs__printf("user", user_regs);
993}
994
995static void regs_intr__printf(struct perf_sample *sample)
996{
997 struct regs_dump *intr_regs = &sample->intr_regs;
998
999 if (intr_regs->regs)
1000 regs__printf("intr", intr_regs);
1001}
1002
1003static void stack_user__printf(struct stack_dump *dump)
1004{
1005 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1006 dump->size, dump->offset);
1007}
1008
1009static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
1010 union perf_event *event,
1011 struct perf_sample *sample)
1012{
1013 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1014
1015 if (event->header.type != PERF_RECORD_SAMPLE &&
1016 !perf_evlist__sample_id_all(evlist)) {
1017 fputs("-1 -1 ", stdout);
1018 return;
1019 }
1020
1021 if ((sample_type & PERF_SAMPLE_CPU))
1022 printf("%u ", sample->cpu);
1023
1024 if (sample_type & PERF_SAMPLE_TIME)
1025 printf("%" PRIu64 " ", sample->time);
1026}
1027
1028static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1029{
1030 printf("... sample_read:\n");
1031
1032 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1033 printf("...... time enabled %016" PRIx64 "\n",
1034 sample->read.time_enabled);
1035
1036 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1037 printf("...... time running %016" PRIx64 "\n",
1038 sample->read.time_running);
1039
1040 if (read_format & PERF_FORMAT_GROUP) {
1041 u64 i;
1042
1043 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1044
1045 for (i = 0; i < sample->read.group.nr; i++) {
1046 struct sample_read_value *value;
1047
1048 value = &sample->read.group.values[i];
1049 printf("..... id %016" PRIx64
1050 ", value %016" PRIx64 "\n",
1051 value->id, value->value);
1052 }
1053 } else
1054 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1055 sample->read.one.id, sample->read.one.value);
1056}
1057
1058static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1059 u64 file_offset, struct perf_sample *sample)
1060{
1061 if (!dump_trace)
1062 return;
1063
1064 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1065 file_offset, event->header.size, event->header.type);
1066
1067 trace_event(event);
1068
1069 if (sample)
1070 perf_evlist__print_tstamp(evlist, event, sample);
1071
1072 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1073 event->header.size, perf_event__name(event->header.type));
1074}
1075
1076static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1077 struct perf_sample *sample)
1078{
1079 u64 sample_type;
1080
1081 if (!dump_trace)
1082 return;
1083
1084 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1085 event->header.misc, sample->pid, sample->tid, sample->ip,
1086 sample->period, sample->addr);
1087
1088 sample_type = evsel->attr.sample_type;
1089
1090 if (evsel__has_callchain(evsel))
1091 callchain__printf(evsel, sample);
1092
1093 if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1094 branch_stack__printf(sample);
1095
1096 if (sample_type & PERF_SAMPLE_REGS_USER)
1097 regs_user__printf(sample);
1098
1099 if (sample_type & PERF_SAMPLE_REGS_INTR)
1100 regs_intr__printf(sample);
1101
1102 if (sample_type & PERF_SAMPLE_STACK_USER)
1103 stack_user__printf(&sample->user_stack);
1104
1105 if (sample_type & PERF_SAMPLE_WEIGHT)
1106 printf("... weight: %" PRIu64 "\n", sample->weight);
1107
1108 if (sample_type & PERF_SAMPLE_DATA_SRC)
1109 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1110
1111 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1112 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1113
1114 if (sample_type & PERF_SAMPLE_TRANSACTION)
1115 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1116
1117 if (sample_type & PERF_SAMPLE_READ)
1118 sample_read__printf(sample, evsel->attr.read_format);
1119}
1120
1121static void dump_read(struct perf_evsel *evsel, union perf_event *event)
1122{
1123 struct read_event *read_event = &event->read;
1124 u64 read_format;
1125
1126 if (!dump_trace)
1127 return;
1128
1129 printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
1130 evsel ? perf_evsel__name(evsel) : "FAIL",
1131 event->read.value);
1132
1133 read_format = evsel->attr.read_format;
1134
1135 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1136 printf("... time enabled : %" PRIu64 "\n", read_event->time_enabled);
1137
1138 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1139 printf("... time running : %" PRIu64 "\n", read_event->time_running);
1140
1141 if (read_format & PERF_FORMAT_ID)
1142 printf("... id : %" PRIu64 "\n", read_event->id);
1143}
1144
1145static struct machine *machines__find_for_cpumode(struct machines *machines,
1146 union perf_event *event,
1147 struct perf_sample *sample)
1148{
1149 struct machine *machine;
1150
1151 if (perf_guest &&
1152 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1153 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1154 u32 pid;
1155
1156 if (event->header.type == PERF_RECORD_MMAP
1157 || event->header.type == PERF_RECORD_MMAP2)
1158 pid = event->mmap.pid;
1159 else
1160 pid = sample->pid;
1161
1162 machine = machines__find(machines, pid);
1163 if (!machine)
1164 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1165 return machine;
1166 }
1167
1168 return &machines->host;
1169}
1170
1171static int deliver_sample_value(struct perf_evlist *evlist,
1172 struct perf_tool *tool,
1173 union perf_event *event,
1174 struct perf_sample *sample,
1175 struct sample_read_value *v,
1176 struct machine *machine)
1177{
1178 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1179
1180 if (sid) {
1181 sample->id = v->id;
1182 sample->period = v->value - sid->period;
1183 sid->period = v->value;
1184 }
1185
1186 if (!sid || sid->evsel == NULL) {
1187 ++evlist->stats.nr_unknown_id;
1188 return 0;
1189 }
1190
1191 return tool->sample(tool, event, sample, sid->evsel, machine);
1192}
1193
1194static int deliver_sample_group(struct perf_evlist *evlist,
1195 struct perf_tool *tool,
1196 union perf_event *event,
1197 struct perf_sample *sample,
1198 struct machine *machine)
1199{
1200 int ret = -EINVAL;
1201 u64 i;
1202
1203 for (i = 0; i < sample->read.group.nr; i++) {
1204 ret = deliver_sample_value(evlist, tool, event, sample,
1205 &sample->read.group.values[i],
1206 machine);
1207 if (ret)
1208 break;
1209 }
1210
1211 return ret;
1212}
1213
1214static int
1215 perf_evlist__deliver_sample(struct perf_evlist *evlist,
1216 struct perf_tool *tool,
1217 union perf_event *event,
1218 struct perf_sample *sample,
1219 struct perf_evsel *evsel,
1220 struct machine *machine)
1221{
1222
1223 u64 sample_type = evsel->attr.sample_type;
1224 u64 read_format = evsel->attr.read_format;
1225
1226
1227 if (!(sample_type & PERF_SAMPLE_READ))
1228 return tool->sample(tool, event, sample, evsel, machine);
1229
1230
1231 if (read_format & PERF_FORMAT_GROUP)
1232 return deliver_sample_group(evlist, tool, event, sample,
1233 machine);
1234 else
1235 return deliver_sample_value(evlist, tool, event, sample,
1236 &sample->read.one, machine);
1237}
1238
1239static int machines__deliver_event(struct machines *machines,
1240 struct perf_evlist *evlist,
1241 union perf_event *event,
1242 struct perf_sample *sample,
1243 struct perf_tool *tool, u64 file_offset)
1244{
1245 struct perf_evsel *evsel;
1246 struct machine *machine;
1247
1248 dump_event(evlist, event, file_offset, sample);
1249
1250 evsel = perf_evlist__id2evsel(evlist, sample->id);
1251
1252 machine = machines__find_for_cpumode(machines, event, sample);
1253
1254 switch (event->header.type) {
1255 case PERF_RECORD_SAMPLE:
1256 if (evsel == NULL) {
1257 ++evlist->stats.nr_unknown_id;
1258 return 0;
1259 }
1260 dump_sample(evsel, event, sample);
1261 if (machine == NULL) {
1262 ++evlist->stats.nr_unprocessable_samples;
1263 return 0;
1264 }
1265 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1266 case PERF_RECORD_MMAP:
1267 return tool->mmap(tool, event, sample, machine);
1268 case PERF_RECORD_MMAP2:
1269 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1270 ++evlist->stats.nr_proc_map_timeout;
1271 return tool->mmap2(tool, event, sample, machine);
1272 case PERF_RECORD_COMM:
1273 return tool->comm(tool, event, sample, machine);
1274 case PERF_RECORD_NAMESPACES:
1275 return tool->namespaces(tool, event, sample, machine);
1276 case PERF_RECORD_FORK:
1277 return tool->fork(tool, event, sample, machine);
1278 case PERF_RECORD_EXIT:
1279 return tool->exit(tool, event, sample, machine);
1280 case PERF_RECORD_LOST:
1281 if (tool->lost == perf_event__process_lost)
1282 evlist->stats.total_lost += event->lost.lost;
1283 return tool->lost(tool, event, sample, machine);
1284 case PERF_RECORD_LOST_SAMPLES:
1285 if (tool->lost_samples == perf_event__process_lost_samples)
1286 evlist->stats.total_lost_samples += event->lost_samples.lost;
1287 return tool->lost_samples(tool, event, sample, machine);
1288 case PERF_RECORD_READ:
1289 dump_read(evsel, event);
1290 return tool->read(tool, event, sample, evsel, machine);
1291 case PERF_RECORD_THROTTLE:
1292 return tool->throttle(tool, event, sample, machine);
1293 case PERF_RECORD_UNTHROTTLE:
1294 return tool->unthrottle(tool, event, sample, machine);
1295 case PERF_RECORD_AUX:
1296 if (tool->aux == perf_event__process_aux) {
1297 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1298 evlist->stats.total_aux_lost += 1;
1299 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1300 evlist->stats.total_aux_partial += 1;
1301 }
1302 return tool->aux(tool, event, sample, machine);
1303 case PERF_RECORD_ITRACE_START:
1304 return tool->itrace_start(tool, event, sample, machine);
1305 case PERF_RECORD_SWITCH:
1306 case PERF_RECORD_SWITCH_CPU_WIDE:
1307 return tool->context_switch(tool, event, sample, machine);
1308 default:
1309 ++evlist->stats.nr_unknown_events;
1310 return -1;
1311 }
1312}
1313
1314static int perf_session__deliver_event(struct perf_session *session,
1315 union perf_event *event,
1316 struct perf_tool *tool,
1317 u64 file_offset)
1318{
1319 struct perf_sample sample;
1320 int ret;
1321
1322 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1323 if (ret) {
1324 pr_err("Can't parse sample, err = %d\n", ret);
1325 return ret;
1326 }
1327
1328 ret = auxtrace__process_event(session, event, &sample, tool);
1329 if (ret < 0)
1330 return ret;
1331 if (ret > 0)
1332 return 0;
1333
1334 return machines__deliver_event(&session->machines, session->evlist,
1335 event, &sample, tool, file_offset);
1336}
1337
1338static s64 perf_session__process_user_event(struct perf_session *session,
1339 union perf_event *event,
1340 u64 file_offset)
1341{
1342 struct ordered_events *oe = &session->ordered_events;
1343 struct perf_tool *tool = session->tool;
1344 struct perf_sample sample = { .time = 0, };
1345 int fd = perf_data__fd(session->data);
1346 int err;
1347
1348 dump_event(session->evlist, event, file_offset, &sample);
1349
1350
1351 switch (event->header.type) {
1352 case PERF_RECORD_HEADER_ATTR:
1353 err = tool->attr(tool, event, &session->evlist);
1354 if (err == 0) {
1355 perf_session__set_id_hdr_size(session);
1356 perf_session__set_comm_exec(session);
1357 }
1358 return err;
1359 case PERF_RECORD_EVENT_UPDATE:
1360 return tool->event_update(tool, event, &session->evlist);
1361 case PERF_RECORD_HEADER_EVENT_TYPE:
1362
1363
1364
1365
1366 return 0;
1367 case PERF_RECORD_HEADER_TRACING_DATA:
1368
1369 lseek(fd, file_offset, SEEK_SET);
1370 return tool->tracing_data(session, event);
1371 case PERF_RECORD_HEADER_BUILD_ID:
1372 return tool->build_id(session, event);
1373 case PERF_RECORD_FINISHED_ROUND:
1374 return tool->finished_round(tool, event, oe);
1375 case PERF_RECORD_ID_INDEX:
1376 return tool->id_index(session, event);
1377 case PERF_RECORD_AUXTRACE_INFO:
1378 return tool->auxtrace_info(session, event);
1379 case PERF_RECORD_AUXTRACE:
1380
1381 lseek(fd, file_offset + event->header.size, SEEK_SET);
1382 return tool->auxtrace(session, event);
1383 case PERF_RECORD_AUXTRACE_ERROR:
1384 perf_session__auxtrace_error_inc(session, event);
1385 return tool->auxtrace_error(session, event);
1386 case PERF_RECORD_THREAD_MAP:
1387 return tool->thread_map(session, event);
1388 case PERF_RECORD_CPU_MAP:
1389 return tool->cpu_map(session, event);
1390 case PERF_RECORD_STAT_CONFIG:
1391 return tool->stat_config(session, event);
1392 case PERF_RECORD_STAT:
1393 return tool->stat(session, event);
1394 case PERF_RECORD_STAT_ROUND:
1395 return tool->stat_round(session, event);
1396 case PERF_RECORD_TIME_CONV:
1397 session->time_conv = event->time_conv;
1398 return tool->time_conv(session, event);
1399 case PERF_RECORD_HEADER_FEATURE:
1400 return tool->feature(session, event);
1401 default:
1402 return -EINVAL;
1403 }
1404}
1405
1406int perf_session__deliver_synth_event(struct perf_session *session,
1407 union perf_event *event,
1408 struct perf_sample *sample)
1409{
1410 struct perf_evlist *evlist = session->evlist;
1411 struct perf_tool *tool = session->tool;
1412
1413 events_stats__inc(&evlist->stats, event->header.type);
1414
1415 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1416 return perf_session__process_user_event(session, event, 0);
1417
1418 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1419}
1420
1421static void event_swap(union perf_event *event, bool sample_id_all)
1422{
1423 perf_event__swap_op swap;
1424
1425 swap = perf_event__swap_ops[event->header.type];
1426 if (swap)
1427 swap(event, sample_id_all);
1428}
1429
1430int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1431 void *buf, size_t buf_sz,
1432 union perf_event **event_ptr,
1433 struct perf_sample *sample)
1434{
1435 union perf_event *event;
1436 size_t hdr_sz, rest;
1437 int fd;
1438
1439 if (session->one_mmap && !session->header.needs_swap) {
1440 event = file_offset - session->one_mmap_offset +
1441 session->one_mmap_addr;
1442 goto out_parse_sample;
1443 }
1444
1445 if (perf_data__is_pipe(session->data))
1446 return -1;
1447
1448 fd = perf_data__fd(session->data);
1449 hdr_sz = sizeof(struct perf_event_header);
1450
1451 if (buf_sz < hdr_sz)
1452 return -1;
1453
1454 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1455 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1456 return -1;
1457
1458 event = (union perf_event *)buf;
1459
1460 if (session->header.needs_swap)
1461 perf_event_header__bswap(&event->header);
1462
1463 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1464 return -1;
1465
1466 rest = event->header.size - hdr_sz;
1467
1468 if (readn(fd, buf, rest) != (ssize_t)rest)
1469 return -1;
1470
1471 if (session->header.needs_swap)
1472 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1473
1474out_parse_sample:
1475
1476 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1477 perf_evlist__parse_sample(session->evlist, event, sample))
1478 return -1;
1479
1480 *event_ptr = event;
1481
1482 return 0;
1483}
1484
1485static s64 perf_session__process_event(struct perf_session *session,
1486 union perf_event *event, u64 file_offset)
1487{
1488 struct perf_evlist *evlist = session->evlist;
1489 struct perf_tool *tool = session->tool;
1490 int ret;
1491
1492 if (session->header.needs_swap)
1493 event_swap(event, perf_evlist__sample_id_all(evlist));
1494
1495 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1496 return -EINVAL;
1497
1498 events_stats__inc(&evlist->stats, event->header.type);
1499
1500 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1501 return perf_session__process_user_event(session, event, file_offset);
1502
1503 if (tool->ordered_events) {
1504 u64 timestamp = -1ULL;
1505
1506 ret = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp);
1507 if (ret && ret != -1)
1508 return ret;
1509
1510 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1511 if (ret != -ETIME)
1512 return ret;
1513 }
1514
1515 return perf_session__deliver_event(session, event, tool, file_offset);
1516}
1517
1518void perf_event_header__bswap(struct perf_event_header *hdr)
1519{
1520 hdr->type = bswap_32(hdr->type);
1521 hdr->misc = bswap_16(hdr->misc);
1522 hdr->size = bswap_16(hdr->size);
1523}
1524
1525struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1526{
1527 return machine__findnew_thread(&session->machines.host, -1, pid);
1528}
1529
1530
1531
1532
1533
1534
1535
1536
1537int perf_session__register_idle_thread(struct perf_session *session)
1538{
1539 struct thread *thread;
1540 int err = 0;
1541
1542 thread = machine__findnew_thread(&session->machines.host, 0, 0);
1543 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1544 pr_err("problem inserting idle task.\n");
1545 err = -1;
1546 }
1547
1548 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1549 pr_err("problem inserting idle task.\n");
1550 err = -1;
1551 }
1552
1553
1554 thread__put(thread);
1555 return err;
1556}
1557
1558static void
1559perf_session__warn_order(const struct perf_session *session)
1560{
1561 const struct ordered_events *oe = &session->ordered_events;
1562 struct perf_evsel *evsel;
1563 bool should_warn = true;
1564
1565 evlist__for_each_entry(session->evlist, evsel) {
1566 if (evsel->attr.write_backward)
1567 should_warn = false;
1568 }
1569
1570 if (!should_warn)
1571 return;
1572 if (oe->nr_unordered_events != 0)
1573 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1574}
1575
1576static void perf_session__warn_about_errors(const struct perf_session *session)
1577{
1578 const struct events_stats *stats = &session->evlist->stats;
1579
1580 if (session->tool->lost == perf_event__process_lost &&
1581 stats->nr_events[PERF_RECORD_LOST] != 0) {
1582 ui__warning("Processed %d events and lost %d chunks!\n\n"
1583 "Check IO/CPU overload!\n\n",
1584 stats->nr_events[0],
1585 stats->nr_events[PERF_RECORD_LOST]);
1586 }
1587
1588 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1589 double drop_rate;
1590
1591 drop_rate = (double)stats->total_lost_samples /
1592 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1593 if (drop_rate > 0.05) {
1594 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1595 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1596 drop_rate * 100.0);
1597 }
1598 }
1599
1600 if (session->tool->aux == perf_event__process_aux &&
1601 stats->total_aux_lost != 0) {
1602 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1603 stats->total_aux_lost,
1604 stats->nr_events[PERF_RECORD_AUX]);
1605 }
1606
1607 if (session->tool->aux == perf_event__process_aux &&
1608 stats->total_aux_partial != 0) {
1609 bool vmm_exclusive = false;
1610
1611 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1612 &vmm_exclusive);
1613
1614 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1615 "Are you running a KVM guest in the background?%s\n\n",
1616 stats->total_aux_partial,
1617 stats->nr_events[PERF_RECORD_AUX],
1618 vmm_exclusive ?
1619 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1620 "will reduce the gaps to only guest's timeslices." :
1621 "");
1622 }
1623
1624 if (stats->nr_unknown_events != 0) {
1625 ui__warning("Found %u unknown events!\n\n"
1626 "Is this an older tool processing a perf.data "
1627 "file generated by a more recent tool?\n\n"
1628 "If that is not the case, consider "
1629 "reporting to linux-kernel@vger.kernel.org.\n\n",
1630 stats->nr_unknown_events);
1631 }
1632
1633 if (stats->nr_unknown_id != 0) {
1634 ui__warning("%u samples with id not present in the header\n",
1635 stats->nr_unknown_id);
1636 }
1637
1638 if (stats->nr_invalid_chains != 0) {
1639 ui__warning("Found invalid callchains!\n\n"
1640 "%u out of %u events were discarded for this reason.\n\n"
1641 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1642 stats->nr_invalid_chains,
1643 stats->nr_events[PERF_RECORD_SAMPLE]);
1644 }
1645
1646 if (stats->nr_unprocessable_samples != 0) {
1647 ui__warning("%u unprocessable samples recorded.\n"
1648 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1649 stats->nr_unprocessable_samples);
1650 }
1651
1652 perf_session__warn_order(session);
1653
1654 events_stats__auxtrace_error_warn(stats);
1655
1656 if (stats->nr_proc_map_timeout != 0) {
1657 ui__warning("%d map information files for pre-existing threads were\n"
1658 "not processed, if there are samples for addresses they\n"
1659 "will not be resolved, you may find out which are these\n"
1660 "threads by running with -v and redirecting the output\n"
1661 "to a file.\n"
1662 "The time limit to process proc map is too short?\n"
1663 "Increase it by --proc-map-timeout\n",
1664 stats->nr_proc_map_timeout);
1665 }
1666}
1667
1668static int perf_session__flush_thread_stack(struct thread *thread,
1669 void *p __maybe_unused)
1670{
1671 return thread_stack__flush(thread);
1672}
1673
1674static int perf_session__flush_thread_stacks(struct perf_session *session)
1675{
1676 return machines__for_each_thread(&session->machines,
1677 perf_session__flush_thread_stack,
1678 NULL);
1679}
1680
1681volatile int session_done;
1682
1683static int __perf_session__process_pipe_events(struct perf_session *session)
1684{
1685 struct ordered_events *oe = &session->ordered_events;
1686 struct perf_tool *tool = session->tool;
1687 int fd = perf_data__fd(session->data);
1688 union perf_event *event;
1689 uint32_t size, cur_size = 0;
1690 void *buf = NULL;
1691 s64 skip = 0;
1692 u64 head;
1693 ssize_t err;
1694 void *p;
1695
1696 perf_tool__fill_defaults(tool);
1697
1698 head = 0;
1699 cur_size = sizeof(union perf_event);
1700
1701 buf = malloc(cur_size);
1702 if (!buf)
1703 return -errno;
1704 ordered_events__set_copy_on_queue(oe, true);
1705more:
1706 event = buf;
1707 err = readn(fd, event, sizeof(struct perf_event_header));
1708 if (err <= 0) {
1709 if (err == 0)
1710 goto done;
1711
1712 pr_err("failed to read event header\n");
1713 goto out_err;
1714 }
1715
1716 if (session->header.needs_swap)
1717 perf_event_header__bswap(&event->header);
1718
1719 size = event->header.size;
1720 if (size < sizeof(struct perf_event_header)) {
1721 pr_err("bad event header size\n");
1722 goto out_err;
1723 }
1724
1725 if (size > cur_size) {
1726 void *new = realloc(buf, size);
1727 if (!new) {
1728 pr_err("failed to allocate memory to read event\n");
1729 goto out_err;
1730 }
1731 buf = new;
1732 cur_size = size;
1733 event = buf;
1734 }
1735 p = event;
1736 p += sizeof(struct perf_event_header);
1737
1738 if (size - sizeof(struct perf_event_header)) {
1739 err = readn(fd, p, size - sizeof(struct perf_event_header));
1740 if (err <= 0) {
1741 if (err == 0) {
1742 pr_err("unexpected end of event stream\n");
1743 goto done;
1744 }
1745
1746 pr_err("failed to read event data\n");
1747 goto out_err;
1748 }
1749 }
1750
1751 if ((skip = perf_session__process_event(session, event, head)) < 0) {
1752 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1753 head, event->header.size, event->header.type);
1754 err = -EINVAL;
1755 goto out_err;
1756 }
1757
1758 head += size;
1759
1760 if (skip > 0)
1761 head += skip;
1762
1763 if (!session_done())
1764 goto more;
1765done:
1766
1767 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1768 if (err)
1769 goto out_err;
1770 err = auxtrace__flush_events(session, tool);
1771 if (err)
1772 goto out_err;
1773 err = perf_session__flush_thread_stacks(session);
1774out_err:
1775 free(buf);
1776 if (!tool->no_warn)
1777 perf_session__warn_about_errors(session);
1778 ordered_events__free(&session->ordered_events);
1779 auxtrace__free_events(session);
1780 return err;
1781}
1782
1783static union perf_event *
1784fetch_mmaped_event(struct perf_session *session,
1785 u64 head, size_t mmap_size, char *buf)
1786{
1787 union perf_event *event;
1788
1789
1790
1791
1792
1793 if (head + sizeof(event->header) > mmap_size)
1794 return NULL;
1795
1796 event = (union perf_event *)(buf + head);
1797
1798 if (session->header.needs_swap)
1799 perf_event_header__bswap(&event->header);
1800
1801 if (head + event->header.size > mmap_size) {
1802
1803 if (session->header.needs_swap)
1804 perf_event_header__bswap(&event->header);
1805 return NULL;
1806 }
1807
1808 return event;
1809}
1810
1811
1812
1813
1814
1815#if BITS_PER_LONG == 64
1816#define MMAP_SIZE ULLONG_MAX
1817#define NUM_MMAPS 1
1818#else
1819#define MMAP_SIZE (32 * 1024 * 1024ULL)
1820#define NUM_MMAPS 128
1821#endif
1822
1823static int __perf_session__process_events(struct perf_session *session,
1824 u64 data_offset, u64 data_size,
1825 u64 file_size)
1826{
1827 struct ordered_events *oe = &session->ordered_events;
1828 struct perf_tool *tool = session->tool;
1829 int fd = perf_data__fd(session->data);
1830 u64 head, page_offset, file_offset, file_pos, size;
1831 int err, mmap_prot, mmap_flags, map_idx = 0;
1832 size_t mmap_size;
1833 char *buf, *mmaps[NUM_MMAPS];
1834 union perf_event *event;
1835 struct ui_progress prog;
1836 s64 skip;
1837
1838 perf_tool__fill_defaults(tool);
1839
1840 page_offset = page_size * (data_offset / page_size);
1841 file_offset = page_offset;
1842 head = data_offset - page_offset;
1843
1844 if (data_size == 0)
1845 goto out;
1846
1847 if (data_offset + data_size < file_size)
1848 file_size = data_offset + data_size;
1849
1850 ui_progress__init_size(&prog, file_size, "Processing events...");
1851
1852 mmap_size = MMAP_SIZE;
1853 if (mmap_size > file_size) {
1854 mmap_size = file_size;
1855 session->one_mmap = true;
1856 }
1857
1858 memset(mmaps, 0, sizeof(mmaps));
1859
1860 mmap_prot = PROT_READ;
1861 mmap_flags = MAP_SHARED;
1862
1863 if (session->header.needs_swap) {
1864 mmap_prot |= PROT_WRITE;
1865 mmap_flags = MAP_PRIVATE;
1866 }
1867remap:
1868 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1869 file_offset);
1870 if (buf == MAP_FAILED) {
1871 pr_err("failed to mmap file\n");
1872 err = -errno;
1873 goto out_err;
1874 }
1875 mmaps[map_idx] = buf;
1876 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1877 file_pos = file_offset + head;
1878 if (session->one_mmap) {
1879 session->one_mmap_addr = buf;
1880 session->one_mmap_offset = file_offset;
1881 }
1882
1883more:
1884 event = fetch_mmaped_event(session, head, mmap_size, buf);
1885 if (!event) {
1886 if (mmaps[map_idx]) {
1887 munmap(mmaps[map_idx], mmap_size);
1888 mmaps[map_idx] = NULL;
1889 }
1890
1891 page_offset = page_size * (head / page_size);
1892 file_offset += page_offset;
1893 head -= page_offset;
1894 goto remap;
1895 }
1896
1897 size = event->header.size;
1898
1899 if (size < sizeof(struct perf_event_header) ||
1900 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1901 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1902 file_offset + head, event->header.size,
1903 event->header.type);
1904 err = -EINVAL;
1905 goto out_err;
1906 }
1907
1908 if (skip)
1909 size += skip;
1910
1911 head += size;
1912 file_pos += size;
1913
1914 ui_progress__update(&prog, size);
1915
1916 if (session_done())
1917 goto out;
1918
1919 if (file_pos < file_size)
1920 goto more;
1921
1922out:
1923
1924 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1925 if (err)
1926 goto out_err;
1927 err = auxtrace__flush_events(session, tool);
1928 if (err)
1929 goto out_err;
1930 err = perf_session__flush_thread_stacks(session);
1931out_err:
1932 ui_progress__finish();
1933 if (!tool->no_warn)
1934 perf_session__warn_about_errors(session);
1935
1936
1937
1938
1939 ordered_events__reinit(&session->ordered_events);
1940 auxtrace__free_events(session);
1941 session->one_mmap = false;
1942 return err;
1943}
1944
1945int perf_session__process_events(struct perf_session *session)
1946{
1947 u64 size = perf_data__size(session->data);
1948 int err;
1949
1950 if (perf_session__register_idle_thread(session) < 0)
1951 return -ENOMEM;
1952
1953 if (!perf_data__is_pipe(session->data))
1954 err = __perf_session__process_events(session,
1955 session->header.data_offset,
1956 session->header.data_size, size);
1957 else
1958 err = __perf_session__process_pipe_events(session);
1959
1960 return err;
1961}
1962
1963bool perf_session__has_traces(struct perf_session *session, const char *msg)
1964{
1965 struct perf_evsel *evsel;
1966
1967 evlist__for_each_entry(session->evlist, evsel) {
1968 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1969 return true;
1970 }
1971
1972 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1973 return false;
1974}
1975
1976int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
1977{
1978 char *bracket;
1979 struct ref_reloc_sym *ref;
1980 struct kmap *kmap;
1981
1982 ref = zalloc(sizeof(struct ref_reloc_sym));
1983 if (ref == NULL)
1984 return -ENOMEM;
1985
1986 ref->name = strdup(symbol_name);
1987 if (ref->name == NULL) {
1988 free(ref);
1989 return -ENOMEM;
1990 }
1991
1992 bracket = strchr(ref->name, ']');
1993 if (bracket)
1994 *bracket = '\0';
1995
1996 ref->addr = addr;
1997
1998 kmap = map__kmap(map);
1999 if (kmap)
2000 kmap->ref_reloc_sym = ref;
2001
2002 return 0;
2003}
2004
2005size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2006{
2007 return machines__fprintf_dsos(&session->machines, fp);
2008}
2009
2010size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2011 bool (skip)(struct dso *dso, int parm), int parm)
2012{
2013 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2014}
2015
2016size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2017{
2018 size_t ret;
2019 const char *msg = "";
2020
2021 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2022 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2023
2024 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2025
2026 ret += events_stats__fprintf(&session->evlist->stats, fp);
2027 return ret;
2028}
2029
2030size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2031{
2032
2033
2034
2035
2036 return machine__fprintf(&session->machines.host, fp);
2037}
2038
2039struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
2040 unsigned int type)
2041{
2042 struct perf_evsel *pos;
2043
2044 evlist__for_each_entry(session->evlist, pos) {
2045 if (pos->attr.type == type)
2046 return pos;
2047 }
2048 return NULL;
2049}
2050
2051int perf_session__cpu_bitmap(struct perf_session *session,
2052 const char *cpu_list, unsigned long *cpu_bitmap)
2053{
2054 int i, err = -1;
2055 struct cpu_map *map;
2056
2057 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2058 struct perf_evsel *evsel;
2059
2060 evsel = perf_session__find_first_evtype(session, i);
2061 if (!evsel)
2062 continue;
2063
2064 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
2065 pr_err("File does not contain CPU events. "
2066 "Remove -C option to proceed.\n");
2067 return -1;
2068 }
2069 }
2070
2071 map = cpu_map__new(cpu_list);
2072 if (map == NULL) {
2073 pr_err("Invalid cpu_list\n");
2074 return -1;
2075 }
2076
2077 for (i = 0; i < map->nr; i++) {
2078 int cpu = map->map[i];
2079
2080 if (cpu >= MAX_NR_CPUS) {
2081 pr_err("Requested CPU %d too large. "
2082 "Consider raising MAX_NR_CPUS\n", cpu);
2083 goto out_delete_map;
2084 }
2085
2086 set_bit(cpu, cpu_bitmap);
2087 }
2088
2089 err = 0;
2090
2091out_delete_map:
2092 cpu_map__put(map);
2093 return err;
2094}
2095
2096void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2097 bool full)
2098{
2099 if (session == NULL || fp == NULL)
2100 return;
2101
2102 fprintf(fp, "# ========\n");
2103 perf_header__fprintf_info(session, fp, full);
2104 fprintf(fp, "# ========\n#\n");
2105}
2106
2107
2108int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2109 const struct perf_evsel_str_handler *assocs,
2110 size_t nr_assocs)
2111{
2112 struct perf_evsel *evsel;
2113 size_t i;
2114 int err;
2115
2116 for (i = 0; i < nr_assocs; i++) {
2117
2118
2119
2120
2121 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2122 if (evsel == NULL)
2123 continue;
2124
2125 err = -EEXIST;
2126 if (evsel->handler != NULL)
2127 goto out;
2128 evsel->handler = assocs[i].handler;
2129 }
2130
2131 err = 0;
2132out:
2133 return err;
2134}
2135
2136int perf_event__process_id_index(struct perf_session *session,
2137 union perf_event *event)
2138{
2139 struct perf_evlist *evlist = session->evlist;
2140 struct id_index_event *ie = &event->id_index;
2141 size_t i, nr, max_nr;
2142
2143 max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2144 sizeof(struct id_index_entry);
2145 nr = ie->nr;
2146 if (nr > max_nr)
2147 return -EINVAL;
2148
2149 if (dump_trace)
2150 fprintf(stdout, " nr: %zu\n", nr);
2151
2152 for (i = 0; i < nr; i++) {
2153 struct id_index_entry *e = &ie->entries[i];
2154 struct perf_sample_id *sid;
2155
2156 if (dump_trace) {
2157 fprintf(stdout, " ... id: %"PRIu64, e->id);
2158 fprintf(stdout, " idx: %"PRIu64, e->idx);
2159 fprintf(stdout, " cpu: %"PRId64, e->cpu);
2160 fprintf(stdout, " tid: %"PRId64"\n", e->tid);
2161 }
2162
2163 sid = perf_evlist__id2sid(evlist, e->id);
2164 if (!sid)
2165 return -ENOENT;
2166 sid->idx = e->idx;
2167 sid->cpu = e->cpu;
2168 sid->tid = e->tid;
2169 }
2170 return 0;
2171}
2172
2173int perf_event__synthesize_id_index(struct perf_tool *tool,
2174 perf_event__handler_t process,
2175 struct perf_evlist *evlist,
2176 struct machine *machine)
2177{
2178 union perf_event *ev;
2179 struct perf_evsel *evsel;
2180 size_t nr = 0, i = 0, sz, max_nr, n;
2181 int err;
2182
2183 pr_debug2("Synthesizing id index\n");
2184
2185 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2186 sizeof(struct id_index_entry);
2187
2188 evlist__for_each_entry(evlist, evsel)
2189 nr += evsel->ids;
2190
2191 n = nr > max_nr ? max_nr : nr;
2192 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2193 ev = zalloc(sz);
2194 if (!ev)
2195 return -ENOMEM;
2196
2197 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2198 ev->id_index.header.size = sz;
2199 ev->id_index.nr = n;
2200
2201 evlist__for_each_entry(evlist, evsel) {
2202 u32 j;
2203
2204 for (j = 0; j < evsel->ids; j++) {
2205 struct id_index_entry *e;
2206 struct perf_sample_id *sid;
2207
2208 if (i >= n) {
2209 err = process(tool, ev, NULL, machine);
2210 if (err)
2211 goto out_err;
2212 nr -= n;
2213 i = 0;
2214 }
2215
2216 e = &ev->id_index.entries[i++];
2217
2218 e->id = evsel->id[j];
2219
2220 sid = perf_evlist__id2sid(evlist, e->id);
2221 if (!sid) {
2222 free(ev);
2223 return -ENOENT;
2224 }
2225
2226 e->idx = sid->idx;
2227 e->cpu = sid->cpu;
2228 e->tid = sid->tid;
2229 }
2230 }
2231
2232 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2233 ev->id_index.header.size = sz;
2234 ev->id_index.nr = nr;
2235
2236 err = process(tool, ev, NULL, machine);
2237out_err:
2238 free(ev);
2239
2240 return err;
2241}
2242