1#define _FILE_OFFSET_BITS 64
2
3#include <linux/kernel.h>
4
5#include <byteswap.h>
6#include <unistd.h>
7#include <sys/types.h>
8#include <sys/mman.h>
9
10#include "evlist.h"
11#include "evsel.h"
12#include "session.h"
13#include "tool.h"
14#include "sort.h"
15#include "util.h"
16#include "cpumap.h"
17#include "event-parse.h"
18
19static int perf_session__open(struct perf_session *self, bool force)
20{
21 struct stat input_stat;
22
23 if (!strcmp(self->filename, "-")) {
24 self->fd_pipe = true;
25 self->fd = STDIN_FILENO;
26
27 if (perf_session__read_header(self, self->fd) < 0)
28 pr_err("incompatible file format (rerun with -v to learn more)");
29
30 return 0;
31 }
32
33 self->fd = open(self->filename, O_RDONLY);
34 if (self->fd < 0) {
35 int err = errno;
36
37 pr_err("failed to open %s: %s", self->filename, strerror(err));
38 if (err == ENOENT && !strcmp(self->filename, "perf.data"))
39 pr_err(" (try 'perf record' first)");
40 pr_err("\n");
41 return -errno;
42 }
43
44 if (fstat(self->fd, &input_stat) < 0)
45 goto out_close;
46
47 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
48 pr_err("file %s not owned by current user or root\n",
49 self->filename);
50 goto out_close;
51 }
52
53 if (!input_stat.st_size) {
54 pr_info("zero-sized file (%s), nothing to do!\n",
55 self->filename);
56 goto out_close;
57 }
58
59 if (perf_session__read_header(self, self->fd) < 0) {
60 pr_err("incompatible file format (rerun with -v to learn more)");
61 goto out_close;
62 }
63
64 if (!perf_evlist__valid_sample_type(self->evlist)) {
65 pr_err("non matching sample_type");
66 goto out_close;
67 }
68
69 if (!perf_evlist__valid_sample_id_all(self->evlist)) {
70 pr_err("non matching sample_id_all");
71 goto out_close;
72 }
73
74 self->size = input_stat.st_size;
75 return 0;
76
77out_close:
78 close(self->fd);
79 self->fd = -1;
80 return -1;
81}
82
83void perf_session__set_id_hdr_size(struct perf_session *session)
84{
85 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
86
87 session->host_machine.id_hdr_size = id_hdr_size;
88 machines__set_id_hdr_size(&session->machines, id_hdr_size);
89}
90
91int perf_session__create_kernel_maps(struct perf_session *self)
92{
93 int ret = machine__create_kernel_maps(&self->host_machine);
94
95 if (ret >= 0)
96 ret = machines__create_guest_kernel_maps(&self->machines);
97 return ret;
98}
99
100static void perf_session__destroy_kernel_maps(struct perf_session *self)
101{
102 machine__destroy_kernel_maps(&self->host_machine);
103 machines__destroy_guest_kernel_maps(&self->machines);
104}
105
106struct perf_session *perf_session__new(const char *filename, int mode,
107 bool force, bool repipe,
108 struct perf_tool *tool)
109{
110 struct perf_session *self;
111 struct stat st;
112 size_t len;
113
114 if (!filename || !strlen(filename)) {
115 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
116 filename = "-";
117 else
118 filename = "perf.data";
119 }
120
121 len = strlen(filename);
122 self = zalloc(sizeof(*self) + len);
123
124 if (self == NULL)
125 goto out;
126
127 memcpy(self->filename, filename, len);
128
129
130
131
132#if BITS_PER_LONG == 64
133 self->mmap_window = ULLONG_MAX;
134#else
135 self->mmap_window = 32 * 1024 * 1024ULL;
136#endif
137 self->machines = RB_ROOT;
138 self->repipe = repipe;
139 INIT_LIST_HEAD(&self->ordered_samples.samples);
140 INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
141 INIT_LIST_HEAD(&self->ordered_samples.to_free);
142 machine__init(&self->host_machine, "", HOST_KERNEL_ID);
143 hists__init(&self->hists);
144
145 if (mode == O_RDONLY) {
146 if (perf_session__open(self, force) < 0)
147 goto out_delete;
148 perf_session__set_id_hdr_size(self);
149 } else if (mode == O_WRONLY) {
150
151
152
153
154 if (perf_session__create_kernel_maps(self) < 0)
155 goto out_delete;
156 }
157
158 if (tool && tool->ordering_requires_timestamps &&
159 tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
160 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
161 tool->ordered_samples = false;
162 }
163
164out:
165 return self;
166out_delete:
167 perf_session__delete(self);
168 return NULL;
169}
170
171static void machine__delete_dead_threads(struct machine *machine)
172{
173 struct thread *n, *t;
174
175 list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
176 list_del(&t->node);
177 thread__delete(t);
178 }
179}
180
181static void perf_session__delete_dead_threads(struct perf_session *session)
182{
183 machine__delete_dead_threads(&session->host_machine);
184}
185
186static void machine__delete_threads(struct machine *self)
187{
188 struct rb_node *nd = rb_first(&self->threads);
189
190 while (nd) {
191 struct thread *t = rb_entry(nd, struct thread, rb_node);
192
193 rb_erase(&t->rb_node, &self->threads);
194 nd = rb_next(nd);
195 thread__delete(t);
196 }
197}
198
199static void perf_session__delete_threads(struct perf_session *session)
200{
201 machine__delete_threads(&session->host_machine);
202}
203
204void perf_session__delete(struct perf_session *self)
205{
206 perf_session__destroy_kernel_maps(self);
207 perf_session__delete_dead_threads(self);
208 perf_session__delete_threads(self);
209 machine__exit(&self->host_machine);
210 close(self->fd);
211 free(self);
212}
213
214void machine__remove_thread(struct machine *self, struct thread *th)
215{
216 self->last_match = NULL;
217 rb_erase(&th->rb_node, &self->threads);
218
219
220
221
222 list_add_tail(&th->node, &self->dead_threads);
223}
224
225static bool symbol__match_parent_regex(struct symbol *sym)
226{
227 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
228 return 1;
229
230 return 0;
231}
232
233static const u8 cpumodes[] = {
234 PERF_RECORD_MISC_USER,
235 PERF_RECORD_MISC_KERNEL,
236 PERF_RECORD_MISC_GUEST_USER,
237 PERF_RECORD_MISC_GUEST_KERNEL
238};
239#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
240
241static void ip__resolve_ams(struct machine *self, struct thread *thread,
242 struct addr_map_symbol *ams,
243 u64 ip)
244{
245 struct addr_location al;
246 size_t i;
247 u8 m;
248
249 memset(&al, 0, sizeof(al));
250
251 for (i = 0; i < NCPUMODES; i++) {
252 m = cpumodes[i];
253
254
255
256
257
258
259
260 thread__find_addr_location(thread, self, m, MAP__FUNCTION,
261 ip, &al, NULL);
262 if (al.sym)
263 goto found;
264 }
265found:
266 ams->addr = ip;
267 ams->al_addr = al.addr;
268 ams->sym = al.sym;
269 ams->map = al.map;
270}
271
272struct branch_info *machine__resolve_bstack(struct machine *self,
273 struct thread *thr,
274 struct branch_stack *bs)
275{
276 struct branch_info *bi;
277 unsigned int i;
278
279 bi = calloc(bs->nr, sizeof(struct branch_info));
280 if (!bi)
281 return NULL;
282
283 for (i = 0; i < bs->nr; i++) {
284 ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to);
285 ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from);
286 bi[i].flags = bs->entries[i].flags;
287 }
288 return bi;
289}
290
291int machine__resolve_callchain(struct machine *self,
292 struct thread *thread,
293 struct ip_callchain *chain,
294 struct symbol **parent)
295{
296 u8 cpumode = PERF_RECORD_MISC_USER;
297 unsigned int i;
298 int err;
299
300 callchain_cursor_reset(&callchain_cursor);
301
302 if (chain->nr > PERF_MAX_STACK_DEPTH) {
303 pr_warning("corrupted callchain. skipping...\n");
304 return 0;
305 }
306
307 for (i = 0; i < chain->nr; i++) {
308 u64 ip;
309 struct addr_location al;
310
311 if (callchain_param.order == ORDER_CALLEE)
312 ip = chain->ips[i];
313 else
314 ip = chain->ips[chain->nr - i - 1];
315
316 if (ip >= PERF_CONTEXT_MAX) {
317 switch (ip) {
318 case PERF_CONTEXT_HV:
319 cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
320 case PERF_CONTEXT_KERNEL:
321 cpumode = PERF_RECORD_MISC_KERNEL; break;
322 case PERF_CONTEXT_USER:
323 cpumode = PERF_RECORD_MISC_USER; break;
324 default:
325 pr_debug("invalid callchain context: "
326 "%"PRId64"\n", (s64) ip);
327
328
329
330
331 callchain_cursor_reset(&callchain_cursor);
332 return 0;
333 }
334 continue;
335 }
336
337 al.filtered = false;
338 thread__find_addr_location(thread, self, cpumode,
339 MAP__FUNCTION, ip, &al, NULL);
340 if (al.sym != NULL) {
341 if (sort__has_parent && !*parent &&
342 symbol__match_parent_regex(al.sym))
343 *parent = al.sym;
344 if (!symbol_conf.use_callchain)
345 break;
346 }
347
348 err = callchain_cursor_append(&callchain_cursor,
349 ip, al.map, al.sym);
350 if (err)
351 return err;
352 }
353
354 return 0;
355}
356
357static int process_event_synth_tracing_data_stub(union perf_event *event __used,
358 struct perf_session *session __used)
359{
360 dump_printf(": unhandled!\n");
361 return 0;
362}
363
364static int process_event_synth_attr_stub(union perf_event *event __used,
365 struct perf_evlist **pevlist __used)
366{
367 dump_printf(": unhandled!\n");
368 return 0;
369}
370
371static int process_event_sample_stub(struct perf_tool *tool __used,
372 union perf_event *event __used,
373 struct perf_sample *sample __used,
374 struct perf_evsel *evsel __used,
375 struct machine *machine __used)
376{
377 dump_printf(": unhandled!\n");
378 return 0;
379}
380
381static int process_event_stub(struct perf_tool *tool __used,
382 union perf_event *event __used,
383 struct perf_sample *sample __used,
384 struct machine *machine __used)
385{
386 dump_printf(": unhandled!\n");
387 return 0;
388}
389
390static int process_finished_round_stub(struct perf_tool *tool __used,
391 union perf_event *event __used,
392 struct perf_session *perf_session __used)
393{
394 dump_printf(": unhandled!\n");
395 return 0;
396}
397
398static int process_event_type_stub(struct perf_tool *tool __used,
399 union perf_event *event __used)
400{
401 dump_printf(": unhandled!\n");
402 return 0;
403}
404
405static int process_finished_round(struct perf_tool *tool,
406 union perf_event *event,
407 struct perf_session *session);
408
409static void perf_tool__fill_defaults(struct perf_tool *tool)
410{
411 if (tool->sample == NULL)
412 tool->sample = process_event_sample_stub;
413 if (tool->mmap == NULL)
414 tool->mmap = process_event_stub;
415 if (tool->comm == NULL)
416 tool->comm = process_event_stub;
417 if (tool->fork == NULL)
418 tool->fork = process_event_stub;
419 if (tool->exit == NULL)
420 tool->exit = process_event_stub;
421 if (tool->lost == NULL)
422 tool->lost = perf_event__process_lost;
423 if (tool->read == NULL)
424 tool->read = process_event_sample_stub;
425 if (tool->throttle == NULL)
426 tool->throttle = process_event_stub;
427 if (tool->unthrottle == NULL)
428 tool->unthrottle = process_event_stub;
429 if (tool->attr == NULL)
430 tool->attr = process_event_synth_attr_stub;
431 if (tool->event_type == NULL)
432 tool->event_type = process_event_type_stub;
433 if (tool->tracing_data == NULL)
434 tool->tracing_data = process_event_synth_tracing_data_stub;
435 if (tool->build_id == NULL)
436 tool->build_id = process_finished_round_stub;
437 if (tool->finished_round == NULL) {
438 if (tool->ordered_samples)
439 tool->finished_round = process_finished_round;
440 else
441 tool->finished_round = process_finished_round_stub;
442 }
443}
444
445void mem_bswap_32(void *src, int byte_size)
446{
447 u32 *m = src;
448 while (byte_size > 0) {
449 *m = bswap_32(*m);
450 byte_size -= sizeof(u32);
451 ++m;
452 }
453}
454
455void mem_bswap_64(void *src, int byte_size)
456{
457 u64 *m = src;
458
459 while (byte_size > 0) {
460 *m = bswap_64(*m);
461 byte_size -= sizeof(u64);
462 ++m;
463 }
464}
465
466static void swap_sample_id_all(union perf_event *event, void *data)
467{
468 void *end = (void *) event + event->header.size;
469 int size = end - data;
470
471 BUG_ON(size % sizeof(u64));
472 mem_bswap_64(data, size);
473}
474
475static void perf_event__all64_swap(union perf_event *event,
476 bool sample_id_all __used)
477{
478 struct perf_event_header *hdr = &event->header;
479 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
480}
481
482static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
483{
484 event->comm.pid = bswap_32(event->comm.pid);
485 event->comm.tid = bswap_32(event->comm.tid);
486
487 if (sample_id_all) {
488 void *data = &event->comm.comm;
489
490 data += ALIGN(strlen(data) + 1, sizeof(u64));
491 swap_sample_id_all(event, data);
492 }
493}
494
495static void perf_event__mmap_swap(union perf_event *event,
496 bool sample_id_all)
497{
498 event->mmap.pid = bswap_32(event->mmap.pid);
499 event->mmap.tid = bswap_32(event->mmap.tid);
500 event->mmap.start = bswap_64(event->mmap.start);
501 event->mmap.len = bswap_64(event->mmap.len);
502 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
503
504 if (sample_id_all) {
505 void *data = &event->mmap.filename;
506
507 data += ALIGN(strlen(data) + 1, sizeof(u64));
508 swap_sample_id_all(event, data);
509 }
510}
511
512static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
513{
514 event->fork.pid = bswap_32(event->fork.pid);
515 event->fork.tid = bswap_32(event->fork.tid);
516 event->fork.ppid = bswap_32(event->fork.ppid);
517 event->fork.ptid = bswap_32(event->fork.ptid);
518 event->fork.time = bswap_64(event->fork.time);
519
520 if (sample_id_all)
521 swap_sample_id_all(event, &event->fork + 1);
522}
523
524static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
525{
526 event->read.pid = bswap_32(event->read.pid);
527 event->read.tid = bswap_32(event->read.tid);
528 event->read.value = bswap_64(event->read.value);
529 event->read.time_enabled = bswap_64(event->read.time_enabled);
530 event->read.time_running = bswap_64(event->read.time_running);
531 event->read.id = bswap_64(event->read.id);
532
533 if (sample_id_all)
534 swap_sample_id_all(event, &event->read + 1);
535}
536
537static u8 revbyte(u8 b)
538{
539 int rev = (b >> 4) | ((b & 0xf) << 4);
540 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
541 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
542 return (u8) rev;
543}
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559static void swap_bitfield(u8 *p, unsigned len)
560{
561 unsigned i;
562
563 for (i = 0; i < len; i++) {
564 *p = revbyte(*p);
565 p++;
566 }
567}
568
569
570void perf_event__attr_swap(struct perf_event_attr *attr)
571{
572 attr->type = bswap_32(attr->type);
573 attr->size = bswap_32(attr->size);
574 attr->config = bswap_64(attr->config);
575 attr->sample_period = bswap_64(attr->sample_period);
576 attr->sample_type = bswap_64(attr->sample_type);
577 attr->read_format = bswap_64(attr->read_format);
578 attr->wakeup_events = bswap_32(attr->wakeup_events);
579 attr->bp_type = bswap_32(attr->bp_type);
580 attr->bp_addr = bswap_64(attr->bp_addr);
581 attr->bp_len = bswap_64(attr->bp_len);
582
583 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
584}
585
586static void perf_event__hdr_attr_swap(union perf_event *event,
587 bool sample_id_all __used)
588{
589 size_t size;
590
591 perf_event__attr_swap(&event->attr.attr);
592
593 size = event->header.size;
594 size -= (void *)&event->attr.id - (void *)event;
595 mem_bswap_64(event->attr.id, size);
596}
597
598static void perf_event__event_type_swap(union perf_event *event,
599 bool sample_id_all __used)
600{
601 event->event_type.event_type.event_id =
602 bswap_64(event->event_type.event_type.event_id);
603}
604
605static void perf_event__tracing_data_swap(union perf_event *event,
606 bool sample_id_all __used)
607{
608 event->tracing_data.size = bswap_32(event->tracing_data.size);
609}
610
611typedef void (*perf_event__swap_op)(union perf_event *event,
612 bool sample_id_all);
613
614static perf_event__swap_op perf_event__swap_ops[] = {
615 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
616 [PERF_RECORD_COMM] = perf_event__comm_swap,
617 [PERF_RECORD_FORK] = perf_event__task_swap,
618 [PERF_RECORD_EXIT] = perf_event__task_swap,
619 [PERF_RECORD_LOST] = perf_event__all64_swap,
620 [PERF_RECORD_READ] = perf_event__read_swap,
621 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
622 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
623 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
624 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
625 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
626 [PERF_RECORD_HEADER_MAX] = NULL,
627};
628
629struct sample_queue {
630 u64 timestamp;
631 u64 file_offset;
632 union perf_event *event;
633 struct list_head list;
634};
635
636static void perf_session_free_sample_buffers(struct perf_session *session)
637{
638 struct ordered_samples *os = &session->ordered_samples;
639
640 while (!list_empty(&os->to_free)) {
641 struct sample_queue *sq;
642
643 sq = list_entry(os->to_free.next, struct sample_queue, list);
644 list_del(&sq->list);
645 free(sq);
646 }
647}
648
649static int perf_session_deliver_event(struct perf_session *session,
650 union perf_event *event,
651 struct perf_sample *sample,
652 struct perf_tool *tool,
653 u64 file_offset);
654
655static void flush_sample_queue(struct perf_session *s,
656 struct perf_tool *tool)
657{
658 struct ordered_samples *os = &s->ordered_samples;
659 struct list_head *head = &os->samples;
660 struct sample_queue *tmp, *iter;
661 struct perf_sample sample;
662 u64 limit = os->next_flush;
663 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
664 unsigned idx = 0, progress_next = os->nr_samples / 16;
665 int ret;
666
667 if (!tool->ordered_samples || !limit)
668 return;
669
670 list_for_each_entry_safe(iter, tmp, head, list) {
671 if (iter->timestamp > limit)
672 break;
673
674 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample,
675 s->header.needs_swap);
676 if (ret)
677 pr_err("Can't parse sample, err = %d\n", ret);
678 else
679 perf_session_deliver_event(s, iter->event, &sample, tool,
680 iter->file_offset);
681
682 os->last_flush = iter->timestamp;
683 list_del(&iter->list);
684 list_add(&iter->list, &os->sample_cache);
685 if (++idx >= progress_next) {
686 progress_next += os->nr_samples / 16;
687 ui_progress__update(idx, os->nr_samples,
688 "Processing time ordered events...");
689 }
690 }
691
692 if (list_empty(head)) {
693 os->last_sample = NULL;
694 } else if (last_ts <= limit) {
695 os->last_sample =
696 list_entry(head->prev, struct sample_queue, list);
697 }
698
699 os->nr_samples = 0;
700}
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741static int process_finished_round(struct perf_tool *tool,
742 union perf_event *event __used,
743 struct perf_session *session)
744{
745 flush_sample_queue(session, tool);
746 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
747
748 return 0;
749}
750
751
752static void __queue_event(struct sample_queue *new, struct perf_session *s)
753{
754 struct ordered_samples *os = &s->ordered_samples;
755 struct sample_queue *sample = os->last_sample;
756 u64 timestamp = new->timestamp;
757 struct list_head *p;
758
759 ++os->nr_samples;
760 os->last_sample = new;
761
762 if (!sample) {
763 list_add(&new->list, &os->samples);
764 os->max_timestamp = timestamp;
765 return;
766 }
767
768
769
770
771
772
773 if (sample->timestamp <= timestamp) {
774 while (sample->timestamp <= timestamp) {
775 p = sample->list.next;
776 if (p == &os->samples) {
777 list_add_tail(&new->list, &os->samples);
778 os->max_timestamp = timestamp;
779 return;
780 }
781 sample = list_entry(p, struct sample_queue, list);
782 }
783 list_add_tail(&new->list, &sample->list);
784 } else {
785 while (sample->timestamp > timestamp) {
786 p = sample->list.prev;
787 if (p == &os->samples) {
788 list_add(&new->list, &os->samples);
789 return;
790 }
791 sample = list_entry(p, struct sample_queue, list);
792 }
793 list_add(&new->list, &sample->list);
794 }
795}
796
797#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
798
799static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
800 struct perf_sample *sample, u64 file_offset)
801{
802 struct ordered_samples *os = &s->ordered_samples;
803 struct list_head *sc = &os->sample_cache;
804 u64 timestamp = sample->time;
805 struct sample_queue *new;
806
807 if (!timestamp || timestamp == ~0ULL)
808 return -ETIME;
809
810 if (timestamp < s->ordered_samples.last_flush) {
811 printf("Warning: Timestamp below last timeslice flush\n");
812 return -EINVAL;
813 }
814
815 if (!list_empty(sc)) {
816 new = list_entry(sc->next, struct sample_queue, list);
817 list_del(&new->list);
818 } else if (os->sample_buffer) {
819 new = os->sample_buffer + os->sample_buffer_idx;
820 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
821 os->sample_buffer = NULL;
822 } else {
823 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
824 if (!os->sample_buffer)
825 return -ENOMEM;
826 list_add(&os->sample_buffer->list, &os->to_free);
827 os->sample_buffer_idx = 2;
828 new = os->sample_buffer + 1;
829 }
830
831 new->timestamp = timestamp;
832 new->file_offset = file_offset;
833 new->event = event;
834
835 __queue_event(new, s);
836
837 return 0;
838}
839
840static void callchain__printf(struct perf_sample *sample)
841{
842 unsigned int i;
843
844 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
845
846 for (i = 0; i < sample->callchain->nr; i++)
847 printf("..... %2d: %016" PRIx64 "\n",
848 i, sample->callchain->ips[i]);
849}
850
851static void branch_stack__printf(struct perf_sample *sample)
852{
853 uint64_t i;
854
855 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
856
857 for (i = 0; i < sample->branch_stack->nr; i++)
858 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
859 i, sample->branch_stack->entries[i].from,
860 sample->branch_stack->entries[i].to);
861}
862
863static void perf_session__print_tstamp(struct perf_session *session,
864 union perf_event *event,
865 struct perf_sample *sample)
866{
867 u64 sample_type = perf_evlist__sample_type(session->evlist);
868
869 if (event->header.type != PERF_RECORD_SAMPLE &&
870 !perf_evlist__sample_id_all(session->evlist)) {
871 fputs("-1 -1 ", stdout);
872 return;
873 }
874
875 if ((sample_type & PERF_SAMPLE_CPU))
876 printf("%u ", sample->cpu);
877
878 if (sample_type & PERF_SAMPLE_TIME)
879 printf("%" PRIu64 " ", sample->time);
880}
881
882static void dump_event(struct perf_session *session, union perf_event *event,
883 u64 file_offset, struct perf_sample *sample)
884{
885 if (!dump_trace)
886 return;
887
888 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
889 file_offset, event->header.size, event->header.type);
890
891 trace_event(event);
892
893 if (sample)
894 perf_session__print_tstamp(session, event, sample);
895
896 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
897 event->header.size, perf_event__name(event->header.type));
898}
899
900static void dump_sample(struct perf_session *session, union perf_event *event,
901 struct perf_sample *sample)
902{
903 u64 sample_type;
904
905 if (!dump_trace)
906 return;
907
908 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
909 event->header.misc, sample->pid, sample->tid, sample->ip,
910 sample->period, sample->addr);
911
912 sample_type = perf_evlist__sample_type(session->evlist);
913
914 if (sample_type & PERF_SAMPLE_CALLCHAIN)
915 callchain__printf(sample);
916
917 if (sample_type & PERF_SAMPLE_BRANCH_STACK)
918 branch_stack__printf(sample);
919}
920
921static struct machine *
922 perf_session__find_machine_for_cpumode(struct perf_session *session,
923 union perf_event *event)
924{
925 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
926
927 if (perf_guest &&
928 ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
929 (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
930 u32 pid;
931
932 if (event->header.type == PERF_RECORD_MMAP)
933 pid = event->mmap.pid;
934 else
935 pid = event->ip.pid;
936
937 return perf_session__findnew_machine(session, pid);
938 }
939
940 return perf_session__find_host_machine(session);
941}
942
943static int perf_session_deliver_event(struct perf_session *session,
944 union perf_event *event,
945 struct perf_sample *sample,
946 struct perf_tool *tool,
947 u64 file_offset)
948{
949 struct perf_evsel *evsel;
950 struct machine *machine;
951
952 dump_event(session, event, file_offset, sample);
953
954 evsel = perf_evlist__id2evsel(session->evlist, sample->id);
955 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
956
957
958
959
960
961
962
963
964
965
966
967
968
969 hists__inc_nr_events(&evsel->hists, event->header.type);
970 }
971
972 machine = perf_session__find_machine_for_cpumode(session, event);
973
974 switch (event->header.type) {
975 case PERF_RECORD_SAMPLE:
976 dump_sample(session, event, sample);
977 if (evsel == NULL) {
978 ++session->hists.stats.nr_unknown_id;
979 return 0;
980 }
981 if (machine == NULL) {
982 ++session->hists.stats.nr_unprocessable_samples;
983 return 0;
984 }
985 return tool->sample(tool, event, sample, evsel, machine);
986 case PERF_RECORD_MMAP:
987 return tool->mmap(tool, event, sample, machine);
988 case PERF_RECORD_COMM:
989 return tool->comm(tool, event, sample, machine);
990 case PERF_RECORD_FORK:
991 return tool->fork(tool, event, sample, machine);
992 case PERF_RECORD_EXIT:
993 return tool->exit(tool, event, sample, machine);
994 case PERF_RECORD_LOST:
995 if (tool->lost == perf_event__process_lost)
996 session->hists.stats.total_lost += event->lost.lost;
997 return tool->lost(tool, event, sample, machine);
998 case PERF_RECORD_READ:
999 return tool->read(tool, event, sample, evsel, machine);
1000 case PERF_RECORD_THROTTLE:
1001 return tool->throttle(tool, event, sample, machine);
1002 case PERF_RECORD_UNTHROTTLE:
1003 return tool->unthrottle(tool, event, sample, machine);
1004 default:
1005 ++session->hists.stats.nr_unknown_events;
1006 return -1;
1007 }
1008}
1009
1010static int perf_session__preprocess_sample(struct perf_session *session,
1011 union perf_event *event, struct perf_sample *sample)
1012{
1013 if (event->header.type != PERF_RECORD_SAMPLE ||
1014 !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
1015 return 0;
1016
1017 if (!ip_callchain__valid(sample->callchain, event)) {
1018 pr_debug("call-chain problem with event, skipping it.\n");
1019 ++session->hists.stats.nr_invalid_chains;
1020 session->hists.stats.total_invalid_chains += sample->period;
1021 return -EINVAL;
1022 }
1023 return 0;
1024}
1025
1026static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
1027 struct perf_tool *tool, u64 file_offset)
1028{
1029 int err;
1030
1031 dump_event(session, event, file_offset, NULL);
1032
1033
1034 switch (event->header.type) {
1035 case PERF_RECORD_HEADER_ATTR:
1036 err = tool->attr(event, &session->evlist);
1037 if (err == 0)
1038 perf_session__set_id_hdr_size(session);
1039 return err;
1040 case PERF_RECORD_HEADER_EVENT_TYPE:
1041 return tool->event_type(tool, event);
1042 case PERF_RECORD_HEADER_TRACING_DATA:
1043
1044 lseek(session->fd, file_offset, SEEK_SET);
1045 return tool->tracing_data(event, session);
1046 case PERF_RECORD_HEADER_BUILD_ID:
1047 return tool->build_id(tool, event, session);
1048 case PERF_RECORD_FINISHED_ROUND:
1049 return tool->finished_round(tool, event, session);
1050 default:
1051 return -EINVAL;
1052 }
1053}
1054
1055static void event_swap(union perf_event *event, bool sample_id_all)
1056{
1057 perf_event__swap_op swap;
1058
1059 swap = perf_event__swap_ops[event->header.type];
1060 if (swap)
1061 swap(event, sample_id_all);
1062}
1063
1064static int perf_session__process_event(struct perf_session *session,
1065 union perf_event *event,
1066 struct perf_tool *tool,
1067 u64 file_offset)
1068{
1069 struct perf_sample sample;
1070 int ret;
1071
1072 if (session->header.needs_swap)
1073 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1074
1075 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1076 return -EINVAL;
1077
1078 hists__inc_nr_events(&session->hists, event->header.type);
1079
1080 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1081 return perf_session__process_user_event(session, event, tool, file_offset);
1082
1083
1084
1085
1086 ret = perf_evlist__parse_sample(session->evlist, event, &sample,
1087 session->header.needs_swap);
1088 if (ret)
1089 return ret;
1090
1091
1092 if (perf_session__preprocess_sample(session, event, &sample))
1093 return 0;
1094
1095 if (tool->ordered_samples) {
1096 ret = perf_session_queue_event(session, event, &sample,
1097 file_offset);
1098 if (ret != -ETIME)
1099 return ret;
1100 }
1101
1102 return perf_session_deliver_event(session, event, &sample, tool,
1103 file_offset);
1104}
1105
1106void perf_event_header__bswap(struct perf_event_header *self)
1107{
1108 self->type = bswap_32(self->type);
1109 self->misc = bswap_16(self->misc);
1110 self->size = bswap_16(self->size);
1111}
1112
1113struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1114{
1115 return machine__findnew_thread(&session->host_machine, pid);
1116}
1117
1118static struct thread *perf_session__register_idle_thread(struct perf_session *self)
1119{
1120 struct thread *thread = perf_session__findnew(self, 0);
1121
1122 if (thread == NULL || thread__set_comm(thread, "swapper")) {
1123 pr_err("problem inserting idle task.\n");
1124 thread = NULL;
1125 }
1126
1127 return thread;
1128}
1129
1130static void perf_session__warn_about_errors(const struct perf_session *session,
1131 const struct perf_tool *tool)
1132{
1133 if (tool->lost == perf_event__process_lost &&
1134 session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
1135 ui__warning("Processed %d events and lost %d chunks!\n\n"
1136 "Check IO/CPU overload!\n\n",
1137 session->hists.stats.nr_events[0],
1138 session->hists.stats.nr_events[PERF_RECORD_LOST]);
1139 }
1140
1141 if (session->hists.stats.nr_unknown_events != 0) {
1142 ui__warning("Found %u unknown events!\n\n"
1143 "Is this an older tool processing a perf.data "
1144 "file generated by a more recent tool?\n\n"
1145 "If that is not the case, consider "
1146 "reporting to linux-kernel@vger.kernel.org.\n\n",
1147 session->hists.stats.nr_unknown_events);
1148 }
1149
1150 if (session->hists.stats.nr_unknown_id != 0) {
1151 ui__warning("%u samples with id not present in the header\n",
1152 session->hists.stats.nr_unknown_id);
1153 }
1154
1155 if (session->hists.stats.nr_invalid_chains != 0) {
1156 ui__warning("Found invalid callchains!\n\n"
1157 "%u out of %u events were discarded for this reason.\n\n"
1158 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1159 session->hists.stats.nr_invalid_chains,
1160 session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
1161 }
1162
1163 if (session->hists.stats.nr_unprocessable_samples != 0) {
1164 ui__warning("%u unprocessable samples recorded.\n"
1165 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1166 session->hists.stats.nr_unprocessable_samples);
1167 }
1168}
1169
1170#define session_done() (*(volatile int *)(&session_done))
1171volatile int session_done;
1172
1173static int __perf_session__process_pipe_events(struct perf_session *self,
1174 struct perf_tool *tool)
1175{
1176 union perf_event *event;
1177 uint32_t size, cur_size = 0;
1178 void *buf = NULL;
1179 int skip = 0;
1180 u64 head;
1181 int err;
1182 void *p;
1183
1184 perf_tool__fill_defaults(tool);
1185
1186 head = 0;
1187 cur_size = sizeof(union perf_event);
1188
1189 buf = malloc(cur_size);
1190 if (!buf)
1191 return -errno;
1192more:
1193 event = buf;
1194 err = readn(self->fd, event, sizeof(struct perf_event_header));
1195 if (err <= 0) {
1196 if (err == 0)
1197 goto done;
1198
1199 pr_err("failed to read event header\n");
1200 goto out_err;
1201 }
1202
1203 if (self->header.needs_swap)
1204 perf_event_header__bswap(&event->header);
1205
1206 size = event->header.size;
1207 if (size == 0)
1208 size = 8;
1209
1210 if (size > cur_size) {
1211 void *new = realloc(buf, size);
1212 if (!new) {
1213 pr_err("failed to allocate memory to read event\n");
1214 goto out_err;
1215 }
1216 buf = new;
1217 cur_size = size;
1218 event = buf;
1219 }
1220 p = event;
1221 p += sizeof(struct perf_event_header);
1222
1223 if (size - sizeof(struct perf_event_header)) {
1224 err = readn(self->fd, p, size - sizeof(struct perf_event_header));
1225 if (err <= 0) {
1226 if (err == 0) {
1227 pr_err("unexpected end of event stream\n");
1228 goto done;
1229 }
1230
1231 pr_err("failed to read event data\n");
1232 goto out_err;
1233 }
1234 }
1235
1236 if ((skip = perf_session__process_event(self, event, tool, head)) < 0) {
1237 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1238 head, event->header.size, event->header.type);
1239 err = -EINVAL;
1240 goto out_err;
1241 }
1242
1243 head += size;
1244
1245 if (skip > 0)
1246 head += skip;
1247
1248 if (!session_done())
1249 goto more;
1250done:
1251 err = 0;
1252out_err:
1253 free(buf);
1254 perf_session__warn_about_errors(self, tool);
1255 perf_session_free_sample_buffers(self);
1256 return err;
1257}
1258
1259static union perf_event *
1260fetch_mmaped_event(struct perf_session *session,
1261 u64 head, size_t mmap_size, char *buf)
1262{
1263 union perf_event *event;
1264
1265
1266
1267
1268
1269 if (head + sizeof(event->header) > mmap_size)
1270 return NULL;
1271
1272 event = (union perf_event *)(buf + head);
1273
1274 if (session->header.needs_swap)
1275 perf_event_header__bswap(&event->header);
1276
1277 if (head + event->header.size > mmap_size)
1278 return NULL;
1279
1280 return event;
1281}
1282
1283int __perf_session__process_events(struct perf_session *session,
1284 u64 data_offset, u64 data_size,
1285 u64 file_size, struct perf_tool *tool)
1286{
1287 u64 head, page_offset, file_offset, file_pos, progress_next;
1288 int err, mmap_prot, mmap_flags, map_idx = 0;
1289 size_t page_size, mmap_size;
1290 char *buf, *mmaps[8];
1291 union perf_event *event;
1292 uint32_t size;
1293
1294 perf_tool__fill_defaults(tool);
1295
1296 page_size = sysconf(_SC_PAGESIZE);
1297
1298 page_offset = page_size * (data_offset / page_size);
1299 file_offset = page_offset;
1300 head = data_offset - page_offset;
1301
1302 if (data_offset + data_size < file_size)
1303 file_size = data_offset + data_size;
1304
1305 progress_next = file_size / 16;
1306
1307 mmap_size = session->mmap_window;
1308 if (mmap_size > file_size)
1309 mmap_size = file_size;
1310
1311 memset(mmaps, 0, sizeof(mmaps));
1312
1313 mmap_prot = PROT_READ;
1314 mmap_flags = MAP_SHARED;
1315
1316 if (session->header.needs_swap) {
1317 mmap_prot |= PROT_WRITE;
1318 mmap_flags = MAP_PRIVATE;
1319 }
1320remap:
1321 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
1322 file_offset);
1323 if (buf == MAP_FAILED) {
1324 pr_err("failed to mmap file\n");
1325 err = -errno;
1326 goto out_err;
1327 }
1328 mmaps[map_idx] = buf;
1329 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1330 file_pos = file_offset + head;
1331
1332more:
1333 event = fetch_mmaped_event(session, head, mmap_size, buf);
1334 if (!event) {
1335 if (mmaps[map_idx]) {
1336 munmap(mmaps[map_idx], mmap_size);
1337 mmaps[map_idx] = NULL;
1338 }
1339
1340 page_offset = page_size * (head / page_size);
1341 file_offset += page_offset;
1342 head -= page_offset;
1343 goto remap;
1344 }
1345
1346 size = event->header.size;
1347
1348 if (size == 0 ||
1349 perf_session__process_event(session, event, tool, file_pos) < 0) {
1350 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1351 file_offset + head, event->header.size,
1352 event->header.type);
1353 err = -EINVAL;
1354 goto out_err;
1355 }
1356
1357 head += size;
1358 file_pos += size;
1359
1360 if (file_pos >= progress_next) {
1361 progress_next += file_size / 16;
1362 ui_progress__update(file_pos, file_size,
1363 "Processing events...");
1364 }
1365
1366 if (file_pos < file_size)
1367 goto more;
1368
1369 err = 0;
1370
1371 session->ordered_samples.next_flush = ULLONG_MAX;
1372 flush_sample_queue(session, tool);
1373out_err:
1374 perf_session__warn_about_errors(session, tool);
1375 perf_session_free_sample_buffers(session);
1376 return err;
1377}
1378
1379int perf_session__process_events(struct perf_session *self,
1380 struct perf_tool *tool)
1381{
1382 int err;
1383
1384 if (perf_session__register_idle_thread(self) == NULL)
1385 return -ENOMEM;
1386
1387 if (!self->fd_pipe)
1388 err = __perf_session__process_events(self,
1389 self->header.data_offset,
1390 self->header.data_size,
1391 self->size, tool);
1392 else
1393 err = __perf_session__process_pipe_events(self, tool);
1394
1395 return err;
1396}
1397
1398bool perf_session__has_traces(struct perf_session *session, const char *msg)
1399{
1400 if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
1401 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1402 return false;
1403 }
1404
1405 return true;
1406}
1407
1408int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1409 const char *symbol_name, u64 addr)
1410{
1411 char *bracket;
1412 enum map_type i;
1413 struct ref_reloc_sym *ref;
1414
1415 ref = zalloc(sizeof(struct ref_reloc_sym));
1416 if (ref == NULL)
1417 return -ENOMEM;
1418
1419 ref->name = strdup(symbol_name);
1420 if (ref->name == NULL) {
1421 free(ref);
1422 return -ENOMEM;
1423 }
1424
1425 bracket = strchr(ref->name, ']');
1426 if (bracket)
1427 *bracket = '\0';
1428
1429 ref->addr = addr;
1430
1431 for (i = 0; i < MAP__NR_TYPES; ++i) {
1432 struct kmap *kmap = map__kmap(maps[i]);
1433 kmap->ref_reloc_sym = ref;
1434 }
1435
1436 return 0;
1437}
1438
1439size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1440{
1441 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
1442 __dsos__fprintf(&self->host_machine.user_dsos, fp) +
1443 machines__fprintf_dsos(&self->machines, fp);
1444}
1445
1446size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1447 bool with_hits)
1448{
1449 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
1450 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
1451}
1452
1453size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1454{
1455 struct perf_evsel *pos;
1456 size_t ret = fprintf(fp, "Aggregated stats:\n");
1457
1458 ret += hists__fprintf_nr_events(&session->hists, fp);
1459
1460 list_for_each_entry(pos, &session->evlist->entries, node) {
1461 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1462 ret += hists__fprintf_nr_events(&pos->hists, fp);
1463 }
1464
1465 return ret;
1466}
1467
1468size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1469{
1470
1471
1472
1473
1474 return machine__fprintf(&session->host_machine, fp);
1475}
1476
1477void perf_session__remove_thread(struct perf_session *session,
1478 struct thread *th)
1479{
1480
1481
1482
1483
1484
1485
1486 machine__remove_thread(&session->host_machine, th);
1487}
1488
1489struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1490 unsigned int type)
1491{
1492 struct perf_evsel *pos;
1493
1494 list_for_each_entry(pos, &session->evlist->entries, node) {
1495 if (pos->attr.type == type)
1496 return pos;
1497 }
1498 return NULL;
1499}
1500
1501void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
1502 struct machine *machine, int print_sym,
1503 int print_dso, int print_symoffset)
1504{
1505 struct addr_location al;
1506 struct callchain_cursor_node *node;
1507
1508 if (perf_event__preprocess_sample(event, machine, &al, sample,
1509 NULL) < 0) {
1510 error("problem processing %d event, skipping it.\n",
1511 event->header.type);
1512 return;
1513 }
1514
1515 if (symbol_conf.use_callchain && sample->callchain) {
1516
1517 if (machine__resolve_callchain(machine, al.thread,
1518 sample->callchain, NULL) != 0) {
1519 if (verbose)
1520 error("Failed to resolve callchain. Skipping\n");
1521 return;
1522 }
1523 callchain_cursor_commit(&callchain_cursor);
1524
1525 while (1) {
1526 node = callchain_cursor_current(&callchain_cursor);
1527 if (!node)
1528 break;
1529
1530 printf("\t%16" PRIx64, node->ip);
1531 if (print_sym) {
1532 printf(" ");
1533 symbol__fprintf_symname(node->sym, stdout);
1534 }
1535 if (print_dso) {
1536 printf(" (");
1537 map__fprintf_dsoname(node->map, stdout);
1538 printf(")");
1539 }
1540 printf("\n");
1541
1542 callchain_cursor_advance(&callchain_cursor);
1543 }
1544
1545 } else {
1546 printf("%16" PRIx64, sample->ip);
1547 if (print_sym) {
1548 printf(" ");
1549 if (print_symoffset)
1550 symbol__fprintf_symname_offs(al.sym, &al,
1551 stdout);
1552 else
1553 symbol__fprintf_symname(al.sym, stdout);
1554 }
1555
1556 if (print_dso) {
1557 printf(" (");
1558 map__fprintf_dsoname(al.map, stdout);
1559 printf(")");
1560 }
1561 }
1562}
1563
1564int perf_session__cpu_bitmap(struct perf_session *session,
1565 const char *cpu_list, unsigned long *cpu_bitmap)
1566{
1567 int i;
1568 struct cpu_map *map;
1569
1570 for (i = 0; i < PERF_TYPE_MAX; ++i) {
1571 struct perf_evsel *evsel;
1572
1573 evsel = perf_session__find_first_evtype(session, i);
1574 if (!evsel)
1575 continue;
1576
1577 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
1578 pr_err("File does not contain CPU events. "
1579 "Remove -c option to proceed.\n");
1580 return -1;
1581 }
1582 }
1583
1584 map = cpu_map__new(cpu_list);
1585 if (map == NULL) {
1586 pr_err("Invalid cpu_list\n");
1587 return -1;
1588 }
1589
1590 for (i = 0; i < map->nr; i++) {
1591 int cpu = map->map[i];
1592
1593 if (cpu >= MAX_NR_CPUS) {
1594 pr_err("Requested CPU %d too large. "
1595 "Consider raising MAX_NR_CPUS\n", cpu);
1596 return -1;
1597 }
1598
1599 set_bit(cpu, cpu_bitmap);
1600 }
1601
1602 return 0;
1603}
1604
1605void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
1606 bool full)
1607{
1608 struct stat st;
1609 int ret;
1610
1611 if (session == NULL || fp == NULL)
1612 return;
1613
1614 ret = fstat(session->fd, &st);
1615 if (ret == -1)
1616 return;
1617
1618 fprintf(fp, "# ========\n");
1619 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
1620 perf_header__fprintf_info(session, fp, full);
1621 fprintf(fp, "# ========\n#\n");
1622}
1623
1624
1625int __perf_session__set_tracepoints_handlers(struct perf_session *session,
1626 const struct perf_evsel_str_handler *assocs,
1627 size_t nr_assocs)
1628{
1629 struct perf_evlist *evlist = session->evlist;
1630 struct event_format *format;
1631 struct perf_evsel *evsel;
1632 char *tracepoint, *name;
1633 size_t i;
1634 int err;
1635
1636 for (i = 0; i < nr_assocs; i++) {
1637 err = -ENOMEM;
1638 tracepoint = strdup(assocs[i].name);
1639 if (tracepoint == NULL)
1640 goto out;
1641
1642 err = -ENOENT;
1643 name = strchr(tracepoint, ':');
1644 if (name == NULL)
1645 goto out_free;
1646
1647 *name++ = '\0';
1648 format = pevent_find_event_by_name(session->pevent,
1649 tracepoint, name);
1650 if (format == NULL) {
1651
1652
1653
1654
1655 goto next;
1656 }
1657
1658 evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id);
1659 if (evsel == NULL)
1660 goto next;
1661
1662 err = -EEXIST;
1663 if (evsel->handler.func != NULL)
1664 goto out_free;
1665 evsel->handler.func = assocs[i].handler;
1666next:
1667 free(tracepoint);
1668 }
1669
1670 err = 0;
1671out:
1672 return err;
1673
1674out_free:
1675 free(tracepoint);
1676 goto out;
1677}
1678