1#define _FILE_OFFSET_BITS 64
2
3#include <linux/kernel.h>
4
5#include <byteswap.h>
6#include <unistd.h>
7#include <sys/types.h>
8#include <sys/mman.h>
9
10#include "evlist.h"
11#include "evsel.h"
12#include "session.h"
13#include "sort.h"
14#include "util.h"
15#include "cpumap.h"
16
17static int perf_session__open(struct perf_session *self, bool force)
18{
19 struct stat input_stat;
20
21 if (!strcmp(self->filename, "-")) {
22 self->fd_pipe = true;
23 self->fd = STDIN_FILENO;
24
25 if (perf_session__read_header(self, self->fd) < 0)
26 pr_err("incompatible file format");
27
28 return 0;
29 }
30
31 self->fd = open(self->filename, O_RDONLY);
32 if (self->fd < 0) {
33 int err = errno;
34
35 pr_err("failed to open %s: %s", self->filename, strerror(err));
36 if (err == ENOENT && !strcmp(self->filename, "perf.data"))
37 pr_err(" (try 'perf record' first)");
38 pr_err("\n");
39 return -errno;
40 }
41
42 if (fstat(self->fd, &input_stat) < 0)
43 goto out_close;
44
45 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
46 pr_err("file %s not owned by current user or root\n",
47 self->filename);
48 goto out_close;
49 }
50
51 if (!input_stat.st_size) {
52 pr_info("zero-sized file (%s), nothing to do!\n",
53 self->filename);
54 goto out_close;
55 }
56
57 if (perf_session__read_header(self, self->fd) < 0) {
58 pr_err("incompatible file format");
59 goto out_close;
60 }
61
62 if (!perf_evlist__valid_sample_type(self->evlist)) {
63 pr_err("non matching sample_type");
64 goto out_close;
65 }
66
67 if (!perf_evlist__valid_sample_id_all(self->evlist)) {
68 pr_err("non matching sample_id_all");
69 goto out_close;
70 }
71
72 self->size = input_stat.st_size;
73 return 0;
74
75out_close:
76 close(self->fd);
77 self->fd = -1;
78 return -1;
79}
80
81static void perf_session__id_header_size(struct perf_session *session)
82{
83 struct perf_sample *data;
84 u64 sample_type = session->sample_type;
85 u16 size = 0;
86
87 if (!session->sample_id_all)
88 goto out;
89
90 if (sample_type & PERF_SAMPLE_TID)
91 size += sizeof(data->tid) * 2;
92
93 if (sample_type & PERF_SAMPLE_TIME)
94 size += sizeof(data->time);
95
96 if (sample_type & PERF_SAMPLE_ID)
97 size += sizeof(data->id);
98
99 if (sample_type & PERF_SAMPLE_STREAM_ID)
100 size += sizeof(data->stream_id);
101
102 if (sample_type & PERF_SAMPLE_CPU)
103 size += sizeof(data->cpu) * 2;
104out:
105 session->id_hdr_size = size;
106}
107
108void perf_session__update_sample_type(struct perf_session *self)
109{
110 self->sample_type = perf_evlist__sample_type(self->evlist);
111 self->sample_size = __perf_evsel__sample_size(self->sample_type);
112 self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
113 perf_session__id_header_size(self);
114}
115
116int perf_session__create_kernel_maps(struct perf_session *self)
117{
118 int ret = machine__create_kernel_maps(&self->host_machine);
119
120 if (ret >= 0)
121 ret = machines__create_guest_kernel_maps(&self->machines);
122 return ret;
123}
124
125static void perf_session__destroy_kernel_maps(struct perf_session *self)
126{
127 machine__destroy_kernel_maps(&self->host_machine);
128 machines__destroy_guest_kernel_maps(&self->machines);
129}
130
131struct perf_session *perf_session__new(const char *filename, int mode,
132 bool force, bool repipe,
133 struct perf_event_ops *ops)
134{
135 size_t len = filename ? strlen(filename) + 1 : 0;
136 struct perf_session *self = zalloc(sizeof(*self) + len);
137
138 if (self == NULL)
139 goto out;
140
141 memcpy(self->filename, filename, len);
142 self->threads = RB_ROOT;
143 INIT_LIST_HEAD(&self->dead_threads);
144 self->last_match = NULL;
145
146
147
148
149#if BITS_PER_LONG == 64
150 self->mmap_window = ULLONG_MAX;
151#else
152 self->mmap_window = 32 * 1024 * 1024ULL;
153#endif
154 self->machines = RB_ROOT;
155 self->repipe = repipe;
156 INIT_LIST_HEAD(&self->ordered_samples.samples);
157 INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
158 INIT_LIST_HEAD(&self->ordered_samples.to_free);
159 machine__init(&self->host_machine, "", HOST_KERNEL_ID);
160
161 if (mode == O_RDONLY) {
162 if (perf_session__open(self, force) < 0)
163 goto out_delete;
164 perf_session__update_sample_type(self);
165 } else if (mode == O_WRONLY) {
166
167
168
169
170 if (perf_session__create_kernel_maps(self) < 0)
171 goto out_delete;
172 }
173
174 if (ops && ops->ordering_requires_timestamps &&
175 ops->ordered_samples && !self->sample_id_all) {
176 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
177 ops->ordered_samples = false;
178 }
179
180out:
181 return self;
182out_delete:
183 perf_session__delete(self);
184 return NULL;
185}
186
187static void perf_session__delete_dead_threads(struct perf_session *self)
188{
189 struct thread *n, *t;
190
191 list_for_each_entry_safe(t, n, &self->dead_threads, node) {
192 list_del(&t->node);
193 thread__delete(t);
194 }
195}
196
197static void perf_session__delete_threads(struct perf_session *self)
198{
199 struct rb_node *nd = rb_first(&self->threads);
200
201 while (nd) {
202 struct thread *t = rb_entry(nd, struct thread, rb_node);
203
204 rb_erase(&t->rb_node, &self->threads);
205 nd = rb_next(nd);
206 thread__delete(t);
207 }
208}
209
210void perf_session__delete(struct perf_session *self)
211{
212 perf_session__destroy_kernel_maps(self);
213 perf_session__delete_dead_threads(self);
214 perf_session__delete_threads(self);
215 machine__exit(&self->host_machine);
216 close(self->fd);
217 free(self);
218}
219
220void perf_session__remove_thread(struct perf_session *self, struct thread *th)
221{
222 self->last_match = NULL;
223 rb_erase(&th->rb_node, &self->threads);
224
225
226
227
228 list_add_tail(&th->node, &self->dead_threads);
229}
230
231static bool symbol__match_parent_regex(struct symbol *sym)
232{
233 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
234 return 1;
235
236 return 0;
237}
238
239int perf_session__resolve_callchain(struct perf_session *self,
240 struct thread *thread,
241 struct ip_callchain *chain,
242 struct symbol **parent)
243{
244 u8 cpumode = PERF_RECORD_MISC_USER;
245 unsigned int i;
246 int err;
247
248 callchain_cursor_reset(&self->callchain_cursor);
249
250 for (i = 0; i < chain->nr; i++) {
251 u64 ip;
252 struct addr_location al;
253
254 if (callchain_param.order == ORDER_CALLEE)
255 ip = chain->ips[i];
256 else
257 ip = chain->ips[chain->nr - i - 1];
258
259 if (ip >= PERF_CONTEXT_MAX) {
260 switch (ip) {
261 case PERF_CONTEXT_HV:
262 cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
263 case PERF_CONTEXT_KERNEL:
264 cpumode = PERF_RECORD_MISC_KERNEL; break;
265 case PERF_CONTEXT_USER:
266 cpumode = PERF_RECORD_MISC_USER; break;
267 default:
268 break;
269 }
270 continue;
271 }
272
273 al.filtered = false;
274 thread__find_addr_location(thread, self, cpumode,
275 MAP__FUNCTION, thread->pid, ip, &al, NULL);
276 if (al.sym != NULL) {
277 if (sort__has_parent && !*parent &&
278 symbol__match_parent_regex(al.sym))
279 *parent = al.sym;
280 if (!symbol_conf.use_callchain)
281 break;
282 }
283
284 err = callchain_cursor_append(&self->callchain_cursor,
285 ip, al.map, al.sym);
286 if (err)
287 return err;
288 }
289
290 return 0;
291}
292
293static int process_event_synth_stub(union perf_event *event __used,
294 struct perf_session *session __used)
295{
296 dump_printf(": unhandled!\n");
297 return 0;
298}
299
300static int process_event_sample_stub(union perf_event *event __used,
301 struct perf_sample *sample __used,
302 struct perf_evsel *evsel __used,
303 struct perf_session *session __used)
304{
305 dump_printf(": unhandled!\n");
306 return 0;
307}
308
309static int process_event_stub(union perf_event *event __used,
310 struct perf_sample *sample __used,
311 struct perf_session *session __used)
312{
313 dump_printf(": unhandled!\n");
314 return 0;
315}
316
317static int process_finished_round_stub(union perf_event *event __used,
318 struct perf_session *session __used,
319 struct perf_event_ops *ops __used)
320{
321 dump_printf(": unhandled!\n");
322 return 0;
323}
324
325static int process_finished_round(union perf_event *event,
326 struct perf_session *session,
327 struct perf_event_ops *ops);
328
329static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
330{
331 if (handler->sample == NULL)
332 handler->sample = process_event_sample_stub;
333 if (handler->mmap == NULL)
334 handler->mmap = process_event_stub;
335 if (handler->comm == NULL)
336 handler->comm = process_event_stub;
337 if (handler->fork == NULL)
338 handler->fork = process_event_stub;
339 if (handler->exit == NULL)
340 handler->exit = process_event_stub;
341 if (handler->lost == NULL)
342 handler->lost = perf_event__process_lost;
343 if (handler->read == NULL)
344 handler->read = process_event_stub;
345 if (handler->throttle == NULL)
346 handler->throttle = process_event_stub;
347 if (handler->unthrottle == NULL)
348 handler->unthrottle = process_event_stub;
349 if (handler->attr == NULL)
350 handler->attr = process_event_synth_stub;
351 if (handler->event_type == NULL)
352 handler->event_type = process_event_synth_stub;
353 if (handler->tracing_data == NULL)
354 handler->tracing_data = process_event_synth_stub;
355 if (handler->build_id == NULL)
356 handler->build_id = process_event_synth_stub;
357 if (handler->finished_round == NULL) {
358 if (handler->ordered_samples)
359 handler->finished_round = process_finished_round;
360 else
361 handler->finished_round = process_finished_round_stub;
362 }
363}
364
365void mem_bswap_64(void *src, int byte_size)
366{
367 u64 *m = src;
368
369 while (byte_size > 0) {
370 *m = bswap_64(*m);
371 byte_size -= sizeof(u64);
372 ++m;
373 }
374}
375
376static void perf_event__all64_swap(union perf_event *event)
377{
378 struct perf_event_header *hdr = &event->header;
379 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
380}
381
382static void perf_event__comm_swap(union perf_event *event)
383{
384 event->comm.pid = bswap_32(event->comm.pid);
385 event->comm.tid = bswap_32(event->comm.tid);
386}
387
388static void perf_event__mmap_swap(union perf_event *event)
389{
390 event->mmap.pid = bswap_32(event->mmap.pid);
391 event->mmap.tid = bswap_32(event->mmap.tid);
392 event->mmap.start = bswap_64(event->mmap.start);
393 event->mmap.len = bswap_64(event->mmap.len);
394 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
395}
396
397static void perf_event__task_swap(union perf_event *event)
398{
399 event->fork.pid = bswap_32(event->fork.pid);
400 event->fork.tid = bswap_32(event->fork.tid);
401 event->fork.ppid = bswap_32(event->fork.ppid);
402 event->fork.ptid = bswap_32(event->fork.ptid);
403 event->fork.time = bswap_64(event->fork.time);
404}
405
406static void perf_event__read_swap(union perf_event *event)
407{
408 event->read.pid = bswap_32(event->read.pid);
409 event->read.tid = bswap_32(event->read.tid);
410 event->read.value = bswap_64(event->read.value);
411 event->read.time_enabled = bswap_64(event->read.time_enabled);
412 event->read.time_running = bswap_64(event->read.time_running);
413 event->read.id = bswap_64(event->read.id);
414}
415
416
417void perf_event__attr_swap(struct perf_event_attr *attr)
418{
419 attr->type = bswap_32(attr->type);
420 attr->size = bswap_32(attr->size);
421 attr->config = bswap_64(attr->config);
422 attr->sample_period = bswap_64(attr->sample_period);
423 attr->sample_type = bswap_64(attr->sample_type);
424 attr->read_format = bswap_64(attr->read_format);
425 attr->wakeup_events = bswap_32(attr->wakeup_events);
426 attr->bp_type = bswap_32(attr->bp_type);
427 attr->bp_addr = bswap_64(attr->bp_addr);
428 attr->bp_len = bswap_64(attr->bp_len);
429}
430
431static void perf_event__hdr_attr_swap(union perf_event *event)
432{
433 size_t size;
434
435 perf_event__attr_swap(&event->attr.attr);
436
437 size = event->header.size;
438 size -= (void *)&event->attr.id - (void *)event;
439 mem_bswap_64(event->attr.id, size);
440}
441
442static void perf_event__event_type_swap(union perf_event *event)
443{
444 event->event_type.event_type.event_id =
445 bswap_64(event->event_type.event_type.event_id);
446}
447
448static void perf_event__tracing_data_swap(union perf_event *event)
449{
450 event->tracing_data.size = bswap_32(event->tracing_data.size);
451}
452
453typedef void (*perf_event__swap_op)(union perf_event *event);
454
455static perf_event__swap_op perf_event__swap_ops[] = {
456 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
457 [PERF_RECORD_COMM] = perf_event__comm_swap,
458 [PERF_RECORD_FORK] = perf_event__task_swap,
459 [PERF_RECORD_EXIT] = perf_event__task_swap,
460 [PERF_RECORD_LOST] = perf_event__all64_swap,
461 [PERF_RECORD_READ] = perf_event__read_swap,
462 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
463 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
464 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
465 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
466 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
467 [PERF_RECORD_HEADER_MAX] = NULL,
468};
469
470struct sample_queue {
471 u64 timestamp;
472 u64 file_offset;
473 union perf_event *event;
474 struct list_head list;
475};
476
477static void perf_session_free_sample_buffers(struct perf_session *session)
478{
479 struct ordered_samples *os = &session->ordered_samples;
480
481 while (!list_empty(&os->to_free)) {
482 struct sample_queue *sq;
483
484 sq = list_entry(os->to_free.next, struct sample_queue, list);
485 list_del(&sq->list);
486 free(sq);
487 }
488}
489
490static int perf_session_deliver_event(struct perf_session *session,
491 union perf_event *event,
492 struct perf_sample *sample,
493 struct perf_event_ops *ops,
494 u64 file_offset);
495
496static void flush_sample_queue(struct perf_session *s,
497 struct perf_event_ops *ops)
498{
499 struct ordered_samples *os = &s->ordered_samples;
500 struct list_head *head = &os->samples;
501 struct sample_queue *tmp, *iter;
502 struct perf_sample sample;
503 u64 limit = os->next_flush;
504 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
505 unsigned idx = 0, progress_next = os->nr_samples / 16;
506 int ret;
507
508 if (!ops->ordered_samples || !limit)
509 return;
510
511 list_for_each_entry_safe(iter, tmp, head, list) {
512 if (iter->timestamp > limit)
513 break;
514
515 ret = perf_session__parse_sample(s, iter->event, &sample);
516 if (ret)
517 pr_err("Can't parse sample, err = %d\n", ret);
518 else
519 perf_session_deliver_event(s, iter->event, &sample, ops,
520 iter->file_offset);
521
522 os->last_flush = iter->timestamp;
523 list_del(&iter->list);
524 list_add(&iter->list, &os->sample_cache);
525 if (++idx >= progress_next) {
526 progress_next += os->nr_samples / 16;
527 ui_progress__update(idx, os->nr_samples,
528 "Processing time ordered events...");
529 }
530 }
531
532 if (list_empty(head)) {
533 os->last_sample = NULL;
534 } else if (last_ts <= limit) {
535 os->last_sample =
536 list_entry(head->prev, struct sample_queue, list);
537 }
538
539 os->nr_samples = 0;
540}
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581static int process_finished_round(union perf_event *event __used,
582 struct perf_session *session,
583 struct perf_event_ops *ops)
584{
585 flush_sample_queue(session, ops);
586 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
587
588 return 0;
589}
590
591
592static void __queue_event(struct sample_queue *new, struct perf_session *s)
593{
594 struct ordered_samples *os = &s->ordered_samples;
595 struct sample_queue *sample = os->last_sample;
596 u64 timestamp = new->timestamp;
597 struct list_head *p;
598
599 ++os->nr_samples;
600 os->last_sample = new;
601
602 if (!sample) {
603 list_add(&new->list, &os->samples);
604 os->max_timestamp = timestamp;
605 return;
606 }
607
608
609
610
611
612
613 if (sample->timestamp <= timestamp) {
614 while (sample->timestamp <= timestamp) {
615 p = sample->list.next;
616 if (p == &os->samples) {
617 list_add_tail(&new->list, &os->samples);
618 os->max_timestamp = timestamp;
619 return;
620 }
621 sample = list_entry(p, struct sample_queue, list);
622 }
623 list_add_tail(&new->list, &sample->list);
624 } else {
625 while (sample->timestamp > timestamp) {
626 p = sample->list.prev;
627 if (p == &os->samples) {
628 list_add(&new->list, &os->samples);
629 return;
630 }
631 sample = list_entry(p, struct sample_queue, list);
632 }
633 list_add(&new->list, &sample->list);
634 }
635}
636
637#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
638
639static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
640 struct perf_sample *sample, u64 file_offset)
641{
642 struct ordered_samples *os = &s->ordered_samples;
643 struct list_head *sc = &os->sample_cache;
644 u64 timestamp = sample->time;
645 struct sample_queue *new;
646
647 if (!timestamp || timestamp == ~0ULL)
648 return -ETIME;
649
650 if (timestamp < s->ordered_samples.last_flush) {
651 printf("Warning: Timestamp below last timeslice flush\n");
652 return -EINVAL;
653 }
654
655 if (!list_empty(sc)) {
656 new = list_entry(sc->next, struct sample_queue, list);
657 list_del(&new->list);
658 } else if (os->sample_buffer) {
659 new = os->sample_buffer + os->sample_buffer_idx;
660 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
661 os->sample_buffer = NULL;
662 } else {
663 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
664 if (!os->sample_buffer)
665 return -ENOMEM;
666 list_add(&os->sample_buffer->list, &os->to_free);
667 os->sample_buffer_idx = 2;
668 new = os->sample_buffer + 1;
669 }
670
671 new->timestamp = timestamp;
672 new->file_offset = file_offset;
673 new->event = event;
674
675 __queue_event(new, s);
676
677 return 0;
678}
679
680static void callchain__printf(struct perf_sample *sample)
681{
682 unsigned int i;
683
684 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
685
686 for (i = 0; i < sample->callchain->nr; i++)
687 printf("..... %2d: %016" PRIx64 "\n",
688 i, sample->callchain->ips[i]);
689}
690
691static void perf_session__print_tstamp(struct perf_session *session,
692 union perf_event *event,
693 struct perf_sample *sample)
694{
695 if (event->header.type != PERF_RECORD_SAMPLE &&
696 !session->sample_id_all) {
697 fputs("-1 -1 ", stdout);
698 return;
699 }
700
701 if ((session->sample_type & PERF_SAMPLE_CPU))
702 printf("%u ", sample->cpu);
703
704 if (session->sample_type & PERF_SAMPLE_TIME)
705 printf("%" PRIu64 " ", sample->time);
706}
707
708static void dump_event(struct perf_session *session, union perf_event *event,
709 u64 file_offset, struct perf_sample *sample)
710{
711 if (!dump_trace)
712 return;
713
714 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
715 file_offset, event->header.size, event->header.type);
716
717 trace_event(event);
718
719 if (sample)
720 perf_session__print_tstamp(session, event, sample);
721
722 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
723 event->header.size, perf_event__name(event->header.type));
724}
725
726static void dump_sample(struct perf_session *session, union perf_event *event,
727 struct perf_sample *sample)
728{
729 if (!dump_trace)
730 return;
731
732 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
733 event->header.misc, sample->pid, sample->tid, sample->ip,
734 sample->period, sample->addr);
735
736 if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
737 callchain__printf(sample);
738}
739
740static int perf_session_deliver_event(struct perf_session *session,
741 union perf_event *event,
742 struct perf_sample *sample,
743 struct perf_event_ops *ops,
744 u64 file_offset)
745{
746 struct perf_evsel *evsel;
747
748 dump_event(session, event, file_offset, sample);
749
750 evsel = perf_evlist__id2evsel(session->evlist, sample->id);
751 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
752
753
754
755
756
757
758
759
760
761
762
763
764
765 hists__inc_nr_events(&evsel->hists, event->header.type);
766 }
767
768 switch (event->header.type) {
769 case PERF_RECORD_SAMPLE:
770 dump_sample(session, event, sample);
771 if (evsel == NULL) {
772 ++session->hists.stats.nr_unknown_id;
773 return -1;
774 }
775 return ops->sample(event, sample, evsel, session);
776 case PERF_RECORD_MMAP:
777 return ops->mmap(event, sample, session);
778 case PERF_RECORD_COMM:
779 return ops->comm(event, sample, session);
780 case PERF_RECORD_FORK:
781 return ops->fork(event, sample, session);
782 case PERF_RECORD_EXIT:
783 return ops->exit(event, sample, session);
784 case PERF_RECORD_LOST:
785 return ops->lost(event, sample, session);
786 case PERF_RECORD_READ:
787 return ops->read(event, sample, session);
788 case PERF_RECORD_THROTTLE:
789 return ops->throttle(event, sample, session);
790 case PERF_RECORD_UNTHROTTLE:
791 return ops->unthrottle(event, sample, session);
792 default:
793 ++session->hists.stats.nr_unknown_events;
794 return -1;
795 }
796}
797
798static int perf_session__preprocess_sample(struct perf_session *session,
799 union perf_event *event, struct perf_sample *sample)
800{
801 if (event->header.type != PERF_RECORD_SAMPLE ||
802 !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
803 return 0;
804
805 if (!ip_callchain__valid(sample->callchain, event)) {
806 pr_debug("call-chain problem with event, skipping it.\n");
807 ++session->hists.stats.nr_invalid_chains;
808 session->hists.stats.total_invalid_chains += sample->period;
809 return -EINVAL;
810 }
811 return 0;
812}
813
814static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
815 struct perf_event_ops *ops, u64 file_offset)
816{
817 dump_event(session, event, file_offset, NULL);
818
819
820 switch (event->header.type) {
821 case PERF_RECORD_HEADER_ATTR:
822 return ops->attr(event, session);
823 case PERF_RECORD_HEADER_EVENT_TYPE:
824 return ops->event_type(event, session);
825 case PERF_RECORD_HEADER_TRACING_DATA:
826
827 lseek(session->fd, file_offset, SEEK_SET);
828 return ops->tracing_data(event, session);
829 case PERF_RECORD_HEADER_BUILD_ID:
830 return ops->build_id(event, session);
831 case PERF_RECORD_FINISHED_ROUND:
832 return ops->finished_round(event, session, ops);
833 default:
834 return -EINVAL;
835 }
836}
837
838static int perf_session__process_event(struct perf_session *session,
839 union perf_event *event,
840 struct perf_event_ops *ops,
841 u64 file_offset)
842{
843 struct perf_sample sample;
844 int ret;
845
846 if (session->header.needs_swap &&
847 perf_event__swap_ops[event->header.type])
848 perf_event__swap_ops[event->header.type](event);
849
850 if (event->header.type >= PERF_RECORD_HEADER_MAX)
851 return -EINVAL;
852
853 hists__inc_nr_events(&session->hists, event->header.type);
854
855 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
856 return perf_session__process_user_event(session, event, ops, file_offset);
857
858
859
860
861 ret = perf_session__parse_sample(session, event, &sample);
862 if (ret)
863 return ret;
864
865
866 if (perf_session__preprocess_sample(session, event, &sample))
867 return 0;
868
869 if (ops->ordered_samples) {
870 ret = perf_session_queue_event(session, event, &sample,
871 file_offset);
872 if (ret != -ETIME)
873 return ret;
874 }
875
876 return perf_session_deliver_event(session, event, &sample, ops,
877 file_offset);
878}
879
880void perf_event_header__bswap(struct perf_event_header *self)
881{
882 self->type = bswap_32(self->type);
883 self->misc = bswap_16(self->misc);
884 self->size = bswap_16(self->size);
885}
886
887static struct thread *perf_session__register_idle_thread(struct perf_session *self)
888{
889 struct thread *thread = perf_session__findnew(self, 0);
890
891 if (thread == NULL || thread__set_comm(thread, "swapper")) {
892 pr_err("problem inserting idle task.\n");
893 thread = NULL;
894 }
895
896 return thread;
897}
898
899static void perf_session__warn_about_errors(const struct perf_session *session,
900 const struct perf_event_ops *ops)
901{
902 if (ops->lost == perf_event__process_lost &&
903 session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
904 ui__warning("Processed %d events and lost %d chunks!\n\n"
905 "Check IO/CPU overload!\n\n",
906 session->hists.stats.nr_events[0],
907 session->hists.stats.nr_events[PERF_RECORD_LOST]);
908 }
909
910 if (session->hists.stats.nr_unknown_events != 0) {
911 ui__warning("Found %u unknown events!\n\n"
912 "Is this an older tool processing a perf.data "
913 "file generated by a more recent tool?\n\n"
914 "If that is not the case, consider "
915 "reporting to linux-kernel@vger.kernel.org.\n\n",
916 session->hists.stats.nr_unknown_events);
917 }
918
919 if (session->hists.stats.nr_unknown_id != 0) {
920 ui__warning("%u samples with id not present in the header\n",
921 session->hists.stats.nr_unknown_id);
922 }
923
924 if (session->hists.stats.nr_invalid_chains != 0) {
925 ui__warning("Found invalid callchains!\n\n"
926 "%u out of %u events were discarded for this reason.\n\n"
927 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
928 session->hists.stats.nr_invalid_chains,
929 session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
930 }
931}
932
933#define session_done() (*(volatile int *)(&session_done))
934volatile int session_done;
935
936static int __perf_session__process_pipe_events(struct perf_session *self,
937 struct perf_event_ops *ops)
938{
939 union perf_event event;
940 uint32_t size;
941 int skip = 0;
942 u64 head;
943 int err;
944 void *p;
945
946 perf_event_ops__fill_defaults(ops);
947
948 head = 0;
949more:
950 err = readn(self->fd, &event, sizeof(struct perf_event_header));
951 if (err <= 0) {
952 if (err == 0)
953 goto done;
954
955 pr_err("failed to read event header\n");
956 goto out_err;
957 }
958
959 if (self->header.needs_swap)
960 perf_event_header__bswap(&event.header);
961
962 size = event.header.size;
963 if (size == 0)
964 size = 8;
965
966 p = &event;
967 p += sizeof(struct perf_event_header);
968
969 if (size - sizeof(struct perf_event_header)) {
970 err = readn(self->fd, p, size - sizeof(struct perf_event_header));
971 if (err <= 0) {
972 if (err == 0) {
973 pr_err("unexpected end of event stream\n");
974 goto done;
975 }
976
977 pr_err("failed to read event data\n");
978 goto out_err;
979 }
980 }
981
982 if (size == 0 ||
983 (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
984 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
985 head, event.header.size, event.header.type);
986
987
988
989
990 if (unlikely(head & 7))
991 head &= ~7ULL;
992
993 size = 8;
994 }
995
996 head += size;
997
998 if (skip > 0)
999 head += skip;
1000
1001 if (!session_done())
1002 goto more;
1003done:
1004 err = 0;
1005out_err:
1006 perf_session__warn_about_errors(self, ops);
1007 perf_session_free_sample_buffers(self);
1008 return err;
1009}
1010
1011static union perf_event *
1012fetch_mmaped_event(struct perf_session *session,
1013 u64 head, size_t mmap_size, char *buf)
1014{
1015 union perf_event *event;
1016
1017
1018
1019
1020
1021 if (head + sizeof(event->header) > mmap_size)
1022 return NULL;
1023
1024 event = (union perf_event *)(buf + head);
1025
1026 if (session->header.needs_swap)
1027 perf_event_header__bswap(&event->header);
1028
1029 if (head + event->header.size > mmap_size)
1030 return NULL;
1031
1032 return event;
1033}
1034
1035int __perf_session__process_events(struct perf_session *session,
1036 u64 data_offset, u64 data_size,
1037 u64 file_size, struct perf_event_ops *ops)
1038{
1039 u64 head, page_offset, file_offset, file_pos, progress_next;
1040 int err, mmap_prot, mmap_flags, map_idx = 0;
1041 size_t page_size, mmap_size;
1042 char *buf, *mmaps[8];
1043 union perf_event *event;
1044 uint32_t size;
1045
1046 perf_event_ops__fill_defaults(ops);
1047
1048 page_size = sysconf(_SC_PAGESIZE);
1049
1050 page_offset = page_size * (data_offset / page_size);
1051 file_offset = page_offset;
1052 head = data_offset - page_offset;
1053
1054 if (data_offset + data_size < file_size)
1055 file_size = data_offset + data_size;
1056
1057 progress_next = file_size / 16;
1058
1059 mmap_size = session->mmap_window;
1060 if (mmap_size > file_size)
1061 mmap_size = file_size;
1062
1063 memset(mmaps, 0, sizeof(mmaps));
1064
1065 mmap_prot = PROT_READ;
1066 mmap_flags = MAP_SHARED;
1067
1068 if (session->header.needs_swap) {
1069 mmap_prot |= PROT_WRITE;
1070 mmap_flags = MAP_PRIVATE;
1071 }
1072remap:
1073 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
1074 file_offset);
1075 if (buf == MAP_FAILED) {
1076 pr_err("failed to mmap file\n");
1077 err = -errno;
1078 goto out_err;
1079 }
1080 mmaps[map_idx] = buf;
1081 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1082 file_pos = file_offset + head;
1083
1084more:
1085 event = fetch_mmaped_event(session, head, mmap_size, buf);
1086 if (!event) {
1087 if (mmaps[map_idx]) {
1088 munmap(mmaps[map_idx], mmap_size);
1089 mmaps[map_idx] = NULL;
1090 }
1091
1092 page_offset = page_size * (head / page_size);
1093 file_offset += page_offset;
1094 head -= page_offset;
1095 goto remap;
1096 }
1097
1098 size = event->header.size;
1099
1100 if (size == 0 ||
1101 perf_session__process_event(session, event, ops, file_pos) < 0) {
1102 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
1103 file_offset + head, event->header.size,
1104 event->header.type);
1105
1106
1107
1108
1109 if (unlikely(head & 7))
1110 head &= ~7ULL;
1111
1112 size = 8;
1113 }
1114
1115 head += size;
1116 file_pos += size;
1117
1118 if (file_pos >= progress_next) {
1119 progress_next += file_size / 16;
1120 ui_progress__update(file_pos, file_size,
1121 "Processing events...");
1122 }
1123
1124 if (file_pos < file_size)
1125 goto more;
1126
1127 err = 0;
1128
1129 session->ordered_samples.next_flush = ULLONG_MAX;
1130 flush_sample_queue(session, ops);
1131out_err:
1132 perf_session__warn_about_errors(session, ops);
1133 perf_session_free_sample_buffers(session);
1134 return err;
1135}
1136
1137int perf_session__process_events(struct perf_session *self,
1138 struct perf_event_ops *ops)
1139{
1140 int err;
1141
1142 if (perf_session__register_idle_thread(self) == NULL)
1143 return -ENOMEM;
1144
1145 if (!self->fd_pipe)
1146 err = __perf_session__process_events(self,
1147 self->header.data_offset,
1148 self->header.data_size,
1149 self->size, ops);
1150 else
1151 err = __perf_session__process_pipe_events(self, ops);
1152
1153 return err;
1154}
1155
1156bool perf_session__has_traces(struct perf_session *self, const char *msg)
1157{
1158 if (!(self->sample_type & PERF_SAMPLE_RAW)) {
1159 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1160 return false;
1161 }
1162
1163 return true;
1164}
1165
1166int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
1167 const char *symbol_name,
1168 u64 addr)
1169{
1170 char *bracket;
1171 enum map_type i;
1172 struct ref_reloc_sym *ref;
1173
1174 ref = zalloc(sizeof(struct ref_reloc_sym));
1175 if (ref == NULL)
1176 return -ENOMEM;
1177
1178 ref->name = strdup(symbol_name);
1179 if (ref->name == NULL) {
1180 free(ref);
1181 return -ENOMEM;
1182 }
1183
1184 bracket = strchr(ref->name, ']');
1185 if (bracket)
1186 *bracket = '\0';
1187
1188 ref->addr = addr;
1189
1190 for (i = 0; i < MAP__NR_TYPES; ++i) {
1191 struct kmap *kmap = map__kmap(maps[i]);
1192 kmap->ref_reloc_sym = ref;
1193 }
1194
1195 return 0;
1196}
1197
1198size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1199{
1200 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
1201 __dsos__fprintf(&self->host_machine.user_dsos, fp) +
1202 machines__fprintf_dsos(&self->machines, fp);
1203}
1204
1205size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1206 bool with_hits)
1207{
1208 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
1209 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
1210}
1211
1212size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1213{
1214 struct perf_evsel *pos;
1215 size_t ret = fprintf(fp, "Aggregated stats:\n");
1216
1217 ret += hists__fprintf_nr_events(&session->hists, fp);
1218
1219 list_for_each_entry(pos, &session->evlist->entries, node) {
1220 ret += fprintf(fp, "%s stats:\n", event_name(pos));
1221 ret += hists__fprintf_nr_events(&pos->hists, fp);
1222 }
1223
1224 return ret;
1225}
1226
1227struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1228 unsigned int type)
1229{
1230 struct perf_evsel *pos;
1231
1232 list_for_each_entry(pos, &session->evlist->entries, node) {
1233 if (pos->attr.type == type)
1234 return pos;
1235 }
1236 return NULL;
1237}
1238
1239void perf_session__print_ip(union perf_event *event,
1240 struct perf_sample *sample,
1241 struct perf_session *session,
1242 int print_sym, int print_dso)
1243{
1244 struct addr_location al;
1245 const char *symname, *dsoname;
1246 struct callchain_cursor *cursor = &session->callchain_cursor;
1247 struct callchain_cursor_node *node;
1248
1249 if (perf_event__preprocess_sample(event, session, &al, sample,
1250 NULL) < 0) {
1251 error("problem processing %d event, skipping it.\n",
1252 event->header.type);
1253 return;
1254 }
1255
1256 if (symbol_conf.use_callchain && sample->callchain) {
1257
1258 if (perf_session__resolve_callchain(session, al.thread,
1259 sample->callchain, NULL) != 0) {
1260 if (verbose)
1261 error("Failed to resolve callchain. Skipping\n");
1262 return;
1263 }
1264 callchain_cursor_commit(cursor);
1265
1266 while (1) {
1267 node = callchain_cursor_current(cursor);
1268 if (!node)
1269 break;
1270
1271 printf("\t%16" PRIx64, node->ip);
1272 if (print_sym) {
1273 if (node->sym && node->sym->name)
1274 symname = node->sym->name;
1275 else
1276 symname = "";
1277
1278 printf(" %s", symname);
1279 }
1280 if (print_dso) {
1281 if (node->map && node->map->dso && node->map->dso->name)
1282 dsoname = node->map->dso->name;
1283 else
1284 dsoname = "";
1285
1286 printf(" (%s)", dsoname);
1287 }
1288 printf("\n");
1289
1290 callchain_cursor_advance(cursor);
1291 }
1292
1293 } else {
1294 printf("%16" PRIx64, sample->ip);
1295 if (print_sym) {
1296 if (al.sym && al.sym->name)
1297 symname = al.sym->name;
1298 else
1299 symname = "";
1300
1301 printf(" %s", symname);
1302 }
1303
1304 if (print_dso) {
1305 if (al.map && al.map->dso && al.map->dso->name)
1306 dsoname = al.map->dso->name;
1307 else
1308 dsoname = "";
1309
1310 printf(" (%s)", dsoname);
1311 }
1312 }
1313}
1314
1315int perf_session__cpu_bitmap(struct perf_session *session,
1316 const char *cpu_list, unsigned long *cpu_bitmap)
1317{
1318 int i;
1319 struct cpu_map *map;
1320
1321 for (i = 0; i < PERF_TYPE_MAX; ++i) {
1322 struct perf_evsel *evsel;
1323
1324 evsel = perf_session__find_first_evtype(session, i);
1325 if (!evsel)
1326 continue;
1327
1328 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
1329 pr_err("File does not contain CPU events. "
1330 "Remove -c option to proceed.\n");
1331 return -1;
1332 }
1333 }
1334
1335 map = cpu_map__new(cpu_list);
1336 if (map == NULL) {
1337 pr_err("Invalid cpu_list\n");
1338 return -1;
1339 }
1340
1341 for (i = 0; i < map->nr; i++) {
1342 int cpu = map->map[i];
1343
1344 if (cpu >= MAX_NR_CPUS) {
1345 pr_err("Requested CPU %d too large. "
1346 "Consider raising MAX_NR_CPUS\n", cpu);
1347 return -1;
1348 }
1349
1350 set_bit(cpu, cpu_bitmap);
1351 }
1352
1353 return 0;
1354}
1355
1356void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
1357 bool full)
1358{
1359 struct stat st;
1360 int ret;
1361
1362 if (session == NULL || fp == NULL)
1363 return;
1364
1365 ret = fstat(session->fd, &st);
1366 if (ret == -1)
1367 return;
1368
1369 fprintf(fp, "# ========\n");
1370 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
1371 perf_header__fprintf_info(session, fp, full);
1372 fprintf(fp, "# ========\n#\n");
1373}
1374