1
2
3
4
5
6
7
8
9
10#include <byteswap.h>
11#include <linux/bitops.h>
12#include <api/fs/debugfs.h>
13#include <traceevent/event-parse.h>
14#include <linux/hw_breakpoint.h>
15#include <linux/perf_event.h>
16#include <sys/resource.h>
17#include "asm/bug.h"
18#include "callchain.h"
19#include "cgroup.h"
20#include "evsel.h"
21#include "evlist.h"
22#include "util.h"
23#include "cpumap.h"
24#include "thread_map.h"
25#include "target.h"
26#include "perf_regs.h"
27#include "debug.h"
28#include "trace-event.h"
29#include "stat.h"
30
31static struct {
32 bool sample_id_all;
33 bool exclude_guest;
34 bool mmap2;
35 bool cloexec;
36 bool clockid;
37 bool clockid_wrong;
38} perf_missing_features;
39
40static clockid_t clockid;
41
42static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
43{
44 return 0;
45}
46
47static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused)
48{
49}
50
51static struct {
52 size_t size;
53 int (*init)(struct perf_evsel *evsel);
54 void (*fini)(struct perf_evsel *evsel);
55} perf_evsel__object = {
56 .size = sizeof(struct perf_evsel),
57 .init = perf_evsel__no_extra_init,
58 .fini = perf_evsel__no_extra_fini,
59};
60
61int perf_evsel__object_config(size_t object_size,
62 int (*init)(struct perf_evsel *evsel),
63 void (*fini)(struct perf_evsel *evsel))
64{
65
66 if (object_size == 0)
67 goto set_methods;
68
69 if (perf_evsel__object.size > object_size)
70 return -EINVAL;
71
72 perf_evsel__object.size = object_size;
73
74set_methods:
75 if (init != NULL)
76 perf_evsel__object.init = init;
77
78 if (fini != NULL)
79 perf_evsel__object.fini = fini;
80
81 return 0;
82}
83
84#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
85
86int __perf_evsel__sample_size(u64 sample_type)
87{
88 u64 mask = sample_type & PERF_SAMPLE_MASK;
89 int size = 0;
90 int i;
91
92 for (i = 0; i < 64; i++) {
93 if (mask & (1ULL << i))
94 size++;
95 }
96
97 size *= sizeof(u64);
98
99 return size;
100}
101
102
103
104
105
106
107
108
109
110static int __perf_evsel__calc_id_pos(u64 sample_type)
111{
112 int idx = 0;
113
114 if (sample_type & PERF_SAMPLE_IDENTIFIER)
115 return 0;
116
117 if (!(sample_type & PERF_SAMPLE_ID))
118 return -1;
119
120 if (sample_type & PERF_SAMPLE_IP)
121 idx += 1;
122
123 if (sample_type & PERF_SAMPLE_TID)
124 idx += 1;
125
126 if (sample_type & PERF_SAMPLE_TIME)
127 idx += 1;
128
129 if (sample_type & PERF_SAMPLE_ADDR)
130 idx += 1;
131
132 return idx;
133}
134
135
136
137
138
139
140
141
142
143static int __perf_evsel__calc_is_pos(u64 sample_type)
144{
145 int idx = 1;
146
147 if (sample_type & PERF_SAMPLE_IDENTIFIER)
148 return 1;
149
150 if (!(sample_type & PERF_SAMPLE_ID))
151 return -1;
152
153 if (sample_type & PERF_SAMPLE_CPU)
154 idx += 1;
155
156 if (sample_type & PERF_SAMPLE_STREAM_ID)
157 idx += 1;
158
159 return idx;
160}
161
162void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
163{
164 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
165 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
166}
167
168void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
169 enum perf_event_sample_format bit)
170{
171 if (!(evsel->attr.sample_type & bit)) {
172 evsel->attr.sample_type |= bit;
173 evsel->sample_size += sizeof(u64);
174 perf_evsel__calc_id_pos(evsel);
175 }
176}
177
178void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
179 enum perf_event_sample_format bit)
180{
181 if (evsel->attr.sample_type & bit) {
182 evsel->attr.sample_type &= ~bit;
183 evsel->sample_size -= sizeof(u64);
184 perf_evsel__calc_id_pos(evsel);
185 }
186}
187
188void perf_evsel__set_sample_id(struct perf_evsel *evsel,
189 bool can_sample_identifier)
190{
191 if (can_sample_identifier) {
192 perf_evsel__reset_sample_bit(evsel, ID);
193 perf_evsel__set_sample_bit(evsel, IDENTIFIER);
194 } else {
195 perf_evsel__set_sample_bit(evsel, ID);
196 }
197 evsel->attr.read_format |= PERF_FORMAT_ID;
198}
199
200void perf_evsel__init(struct perf_evsel *evsel,
201 struct perf_event_attr *attr, int idx)
202{
203 evsel->idx = idx;
204 evsel->tracking = !idx;
205 evsel->attr = *attr;
206 evsel->leader = evsel;
207 evsel->unit = "";
208 evsel->scale = 1.0;
209 evsel->evlist = NULL;
210 INIT_LIST_HEAD(&evsel->node);
211 INIT_LIST_HEAD(&evsel->config_terms);
212 perf_evsel__object.init(evsel);
213 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
214 perf_evsel__calc_id_pos(evsel);
215 evsel->cmdline_group_boundary = false;
216}
217
218struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
219{
220 struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
221
222 if (evsel != NULL)
223 perf_evsel__init(evsel, attr, idx);
224
225 return evsel;
226}
227
228struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
229{
230 struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
231
232 if (evsel != NULL) {
233 struct perf_event_attr attr = {
234 .type = PERF_TYPE_TRACEPOINT,
235 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
236 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
237 };
238
239 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
240 goto out_free;
241
242 evsel->tp_format = trace_event__tp_format(sys, name);
243 if (evsel->tp_format == NULL)
244 goto out_free;
245
246 event_attr_init(&attr);
247 attr.config = evsel->tp_format->id;
248 attr.sample_period = 1;
249 perf_evsel__init(evsel, &attr, idx);
250 }
251
252 return evsel;
253
254out_free:
255 zfree(&evsel->name);
256 free(evsel);
257 return NULL;
258}
259
260const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
261 "cycles",
262 "instructions",
263 "cache-references",
264 "cache-misses",
265 "branches",
266 "branch-misses",
267 "bus-cycles",
268 "stalled-cycles-frontend",
269 "stalled-cycles-backend",
270 "ref-cycles",
271};
272
273static const char *__perf_evsel__hw_name(u64 config)
274{
275 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
276 return perf_evsel__hw_names[config];
277
278 return "unknown-hardware";
279}
280
281static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
282{
283 int colon = 0, r = 0;
284 struct perf_event_attr *attr = &evsel->attr;
285 bool exclude_guest_default = false;
286
287#define MOD_PRINT(context, mod) do { \
288 if (!attr->exclude_##context) { \
289 if (!colon) colon = ++r; \
290 r += scnprintf(bf + r, size - r, "%c", mod); \
291 } } while(0)
292
293 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
294 MOD_PRINT(kernel, 'k');
295 MOD_PRINT(user, 'u');
296 MOD_PRINT(hv, 'h');
297 exclude_guest_default = true;
298 }
299
300 if (attr->precise_ip) {
301 if (!colon)
302 colon = ++r;
303 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
304 exclude_guest_default = true;
305 }
306
307 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
308 MOD_PRINT(host, 'H');
309 MOD_PRINT(guest, 'G');
310 }
311#undef MOD_PRINT
312 if (colon)
313 bf[colon - 1] = ':';
314 return r;
315}
316
317static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
318{
319 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
320 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
321}
322
323const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
324 "cpu-clock",
325 "task-clock",
326 "page-faults",
327 "context-switches",
328 "cpu-migrations",
329 "minor-faults",
330 "major-faults",
331 "alignment-faults",
332 "emulation-faults",
333 "dummy",
334};
335
336static const char *__perf_evsel__sw_name(u64 config)
337{
338 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
339 return perf_evsel__sw_names[config];
340 return "unknown-software";
341}
342
343static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
344{
345 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
346 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
347}
348
349static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
350{
351 int r;
352
353 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
354
355 if (type & HW_BREAKPOINT_R)
356 r += scnprintf(bf + r, size - r, "r");
357
358 if (type & HW_BREAKPOINT_W)
359 r += scnprintf(bf + r, size - r, "w");
360
361 if (type & HW_BREAKPOINT_X)
362 r += scnprintf(bf + r, size - r, "x");
363
364 return r;
365}
366
367static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
368{
369 struct perf_event_attr *attr = &evsel->attr;
370 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
371 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
372}
373
374const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
375 [PERF_EVSEL__MAX_ALIASES] = {
376 { "L1-dcache", "l1-d", "l1d", "L1-data", },
377 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
378 { "LLC", "L2", },
379 { "dTLB", "d-tlb", "Data-TLB", },
380 { "iTLB", "i-tlb", "Instruction-TLB", },
381 { "branch", "branches", "bpu", "btb", "bpc", },
382 { "node", },
383};
384
385const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
386 [PERF_EVSEL__MAX_ALIASES] = {
387 { "load", "loads", "read", },
388 { "store", "stores", "write", },
389 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
390};
391
392const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
393 [PERF_EVSEL__MAX_ALIASES] = {
394 { "refs", "Reference", "ops", "access", },
395 { "misses", "miss", },
396};
397
398#define C(x) PERF_COUNT_HW_CACHE_##x
399#define CACHE_READ (1 << C(OP_READ))
400#define CACHE_WRITE (1 << C(OP_WRITE))
401#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
402#define COP(x) (1 << x)
403
404
405
406
407
408
409static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
410 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
411 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
412 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
413 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
414 [C(ITLB)] = (CACHE_READ),
415 [C(BPU)] = (CACHE_READ),
416 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
417};
418
419bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
420{
421 if (perf_evsel__hw_cache_stat[type] & COP(op))
422 return true;
423 else
424 return false;
425}
426
427int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
428 char *bf, size_t size)
429{
430 if (result) {
431 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
432 perf_evsel__hw_cache_op[op][0],
433 perf_evsel__hw_cache_result[result][0]);
434 }
435
436 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
437 perf_evsel__hw_cache_op[op][1]);
438}
439
440static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
441{
442 u8 op, result, type = (config >> 0) & 0xff;
443 const char *err = "unknown-ext-hardware-cache-type";
444
445 if (type > PERF_COUNT_HW_CACHE_MAX)
446 goto out_err;
447
448 op = (config >> 8) & 0xff;
449 err = "unknown-ext-hardware-cache-op";
450 if (op > PERF_COUNT_HW_CACHE_OP_MAX)
451 goto out_err;
452
453 result = (config >> 16) & 0xff;
454 err = "unknown-ext-hardware-cache-result";
455 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
456 goto out_err;
457
458 err = "invalid-cache";
459 if (!perf_evsel__is_cache_op_valid(type, op))
460 goto out_err;
461
462 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
463out_err:
464 return scnprintf(bf, size, "%s", err);
465}
466
467static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
468{
469 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
470 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
471}
472
473static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
474{
475 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
476 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
477}
478
479const char *perf_evsel__name(struct perf_evsel *evsel)
480{
481 char bf[128];
482
483 if (evsel->name)
484 return evsel->name;
485
486 switch (evsel->attr.type) {
487 case PERF_TYPE_RAW:
488 perf_evsel__raw_name(evsel, bf, sizeof(bf));
489 break;
490
491 case PERF_TYPE_HARDWARE:
492 perf_evsel__hw_name(evsel, bf, sizeof(bf));
493 break;
494
495 case PERF_TYPE_HW_CACHE:
496 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
497 break;
498
499 case PERF_TYPE_SOFTWARE:
500 perf_evsel__sw_name(evsel, bf, sizeof(bf));
501 break;
502
503 case PERF_TYPE_TRACEPOINT:
504 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
505 break;
506
507 case PERF_TYPE_BREAKPOINT:
508 perf_evsel__bp_name(evsel, bf, sizeof(bf));
509 break;
510
511 default:
512 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
513 evsel->attr.type);
514 break;
515 }
516
517 evsel->name = strdup(bf);
518
519 return evsel->name ?: "unknown";
520}
521
522const char *perf_evsel__group_name(struct perf_evsel *evsel)
523{
524 return evsel->group_name ?: "anon group";
525}
526
527int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
528{
529 int ret;
530 struct perf_evsel *pos;
531 const char *group_name = perf_evsel__group_name(evsel);
532
533 ret = scnprintf(buf, size, "%s", group_name);
534
535 ret += scnprintf(buf + ret, size - ret, " { %s",
536 perf_evsel__name(evsel));
537
538 for_each_group_member(pos, evsel)
539 ret += scnprintf(buf + ret, size - ret, ", %s",
540 perf_evsel__name(pos));
541
542 ret += scnprintf(buf + ret, size - ret, " }");
543
544 return ret;
545}
546
547static void
548perf_evsel__config_callgraph(struct perf_evsel *evsel,
549 struct record_opts *opts,
550 struct callchain_param *param)
551{
552 bool function = perf_evsel__is_function_event(evsel);
553 struct perf_event_attr *attr = &evsel->attr;
554
555 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
556
557 if (param->record_mode == CALLCHAIN_LBR) {
558 if (!opts->branch_stack) {
559 if (attr->exclude_user) {
560 pr_warning("LBR callstack option is only available "
561 "to get user callchain information. "
562 "Falling back to framepointers.\n");
563 } else {
564 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
565 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
566 PERF_SAMPLE_BRANCH_CALL_STACK;
567 }
568 } else
569 pr_warning("Cannot use LBR callstack with branch stack. "
570 "Falling back to framepointers.\n");
571 }
572
573 if (param->record_mode == CALLCHAIN_DWARF) {
574 if (!function) {
575 perf_evsel__set_sample_bit(evsel, REGS_USER);
576 perf_evsel__set_sample_bit(evsel, STACK_USER);
577 attr->sample_regs_user = PERF_REGS_MASK;
578 attr->sample_stack_user = param->dump_size;
579 attr->exclude_callchain_user = 1;
580 } else {
581 pr_info("Cannot use DWARF unwind for function trace event,"
582 " falling back to framepointers.\n");
583 }
584 }
585
586 if (function) {
587 pr_info("Disabling user space callchains for function trace event.\n");
588 attr->exclude_callchain_user = 1;
589 }
590}
591
592static void
593perf_evsel__reset_callgraph(struct perf_evsel *evsel,
594 struct callchain_param *param)
595{
596 struct perf_event_attr *attr = &evsel->attr;
597
598 perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
599 if (param->record_mode == CALLCHAIN_LBR) {
600 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
601 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
602 PERF_SAMPLE_BRANCH_CALL_STACK);
603 }
604 if (param->record_mode == CALLCHAIN_DWARF) {
605 perf_evsel__reset_sample_bit(evsel, REGS_USER);
606 perf_evsel__reset_sample_bit(evsel, STACK_USER);
607 }
608}
609
610static void apply_config_terms(struct perf_evsel *evsel,
611 struct record_opts *opts)
612{
613 struct perf_evsel_config_term *term;
614 struct list_head *config_terms = &evsel->config_terms;
615 struct perf_event_attr *attr = &evsel->attr;
616 struct callchain_param param;
617 u32 dump_size = 0;
618 char *callgraph_buf = NULL;
619
620
621 param.record_mode = callchain_param.record_mode;
622
623 list_for_each_entry(term, config_terms, list) {
624 switch (term->type) {
625 case PERF_EVSEL__CONFIG_TERM_PERIOD:
626 attr->sample_period = term->val.period;
627 attr->freq = 0;
628 break;
629 case PERF_EVSEL__CONFIG_TERM_FREQ:
630 attr->sample_freq = term->val.freq;
631 attr->freq = 1;
632 break;
633 case PERF_EVSEL__CONFIG_TERM_TIME:
634 if (term->val.time)
635 perf_evsel__set_sample_bit(evsel, TIME);
636 else
637 perf_evsel__reset_sample_bit(evsel, TIME);
638 break;
639 case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
640 callgraph_buf = term->val.callgraph;
641 break;
642 case PERF_EVSEL__CONFIG_TERM_STACK_USER:
643 dump_size = term->val.stack_user;
644 break;
645 default:
646 break;
647 }
648 }
649
650
651 if ((callgraph_buf != NULL) || (dump_size > 0)) {
652
653
654 if (callgraph_buf != NULL) {
655 if (!strcmp(callgraph_buf, "no")) {
656 param.enabled = false;
657 param.record_mode = CALLCHAIN_NONE;
658 } else {
659 param.enabled = true;
660 if (parse_callchain_record(callgraph_buf, ¶m)) {
661 pr_err("per-event callgraph setting for %s failed. "
662 "Apply callgraph global setting for it\n",
663 evsel->name);
664 return;
665 }
666 }
667 }
668 if (dump_size > 0) {
669 dump_size = round_up(dump_size, sizeof(u64));
670 param.dump_size = dump_size;
671 }
672
673
674 if (callchain_param.enabled)
675 perf_evsel__reset_callgraph(evsel, &callchain_param);
676
677
678 if (param.enabled)
679 perf_evsel__config_callgraph(evsel, opts, ¶m);
680 }
681}
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
712{
713 struct perf_evsel *leader = evsel->leader;
714 struct perf_event_attr *attr = &evsel->attr;
715 int track = evsel->tracking;
716 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
717
718 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
719 attr->inherit = !opts->no_inherit;
720
721 perf_evsel__set_sample_bit(evsel, IP);
722 perf_evsel__set_sample_bit(evsel, TID);
723
724 if (evsel->sample_read) {
725 perf_evsel__set_sample_bit(evsel, READ);
726
727
728
729
730
731 perf_evsel__set_sample_id(evsel, false);
732
733
734
735
736
737 if (leader->nr_members > 1) {
738 attr->read_format |= PERF_FORMAT_GROUP;
739 attr->inherit = 0;
740 }
741 }
742
743
744
745
746
747 if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
748 opts->user_interval != ULLONG_MAX)) {
749 if (opts->freq) {
750 perf_evsel__set_sample_bit(evsel, PERIOD);
751 attr->freq = 1;
752 attr->sample_freq = opts->freq;
753 } else {
754 attr->sample_period = opts->default_interval;
755 }
756 }
757
758
759
760
761
762 if ((leader != evsel) && leader->sample_read) {
763 attr->sample_freq = 0;
764 attr->sample_period = 0;
765 }
766
767 if (opts->no_samples)
768 attr->sample_freq = 0;
769
770 if (opts->inherit_stat)
771 attr->inherit_stat = 1;
772
773 if (opts->sample_address) {
774 perf_evsel__set_sample_bit(evsel, ADDR);
775 attr->mmap_data = track;
776 }
777
778
779
780
781
782
783 if (perf_evsel__is_function_event(evsel))
784 evsel->attr.exclude_callchain_user = 1;
785
786 if (callchain_param.enabled && !evsel->no_aux_samples)
787 perf_evsel__config_callgraph(evsel, opts, &callchain_param);
788
789 if (opts->sample_intr_regs) {
790 attr->sample_regs_intr = opts->sample_intr_regs;
791 perf_evsel__set_sample_bit(evsel, REGS_INTR);
792 }
793
794 if (target__has_cpu(&opts->target))
795 perf_evsel__set_sample_bit(evsel, CPU);
796
797 if (opts->period)
798 perf_evsel__set_sample_bit(evsel, PERIOD);
799
800
801
802
803 if (opts->sample_time &&
804 (!perf_missing_features.sample_id_all &&
805 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
806 opts->sample_time_set)))
807 perf_evsel__set_sample_bit(evsel, TIME);
808
809 if (opts->raw_samples && !evsel->no_aux_samples) {
810 perf_evsel__set_sample_bit(evsel, TIME);
811 perf_evsel__set_sample_bit(evsel, RAW);
812 perf_evsel__set_sample_bit(evsel, CPU);
813 }
814
815 if (opts->sample_address)
816 perf_evsel__set_sample_bit(evsel, DATA_SRC);
817
818 if (opts->no_buffering) {
819 attr->watermark = 0;
820 attr->wakeup_events = 1;
821 }
822 if (opts->branch_stack && !evsel->no_aux_samples) {
823 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
824 attr->branch_sample_type = opts->branch_stack;
825 }
826
827 if (opts->sample_weight)
828 perf_evsel__set_sample_bit(evsel, WEIGHT);
829
830 attr->task = track;
831 attr->mmap = track;
832 attr->mmap2 = track && !perf_missing_features.mmap2;
833 attr->comm = track;
834
835 if (opts->record_switch_events)
836 attr->context_switch = track;
837
838 if (opts->sample_transaction)
839 perf_evsel__set_sample_bit(evsel, TRANSACTION);
840
841 if (opts->running_time) {
842 evsel->attr.read_format |=
843 PERF_FORMAT_TOTAL_TIME_ENABLED |
844 PERF_FORMAT_TOTAL_TIME_RUNNING;
845 }
846
847
848
849
850
851
852
853 if (perf_evsel__is_group_leader(evsel))
854 attr->disabled = 1;
855
856
857
858
859
860 if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
861 !opts->initial_delay)
862 attr->enable_on_exec = 1;
863
864 if (evsel->immediate) {
865 attr->disabled = 0;
866 attr->enable_on_exec = 0;
867 }
868
869 clockid = opts->clockid;
870 if (opts->use_clockid) {
871 attr->use_clockid = 1;
872 attr->clockid = opts->clockid;
873 }
874
875
876
877
878
879 apply_config_terms(evsel, opts);
880}
881
882static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
883{
884 int cpu, thread;
885
886 if (evsel->system_wide)
887 nthreads = 1;
888
889 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
890
891 if (evsel->fd) {
892 for (cpu = 0; cpu < ncpus; cpu++) {
893 for (thread = 0; thread < nthreads; thread++) {
894 FD(evsel, cpu, thread) = -1;
895 }
896 }
897 }
898
899 return evsel->fd != NULL ? 0 : -ENOMEM;
900}
901
902static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
903 int ioc, void *arg)
904{
905 int cpu, thread;
906
907 if (evsel->system_wide)
908 nthreads = 1;
909
910 for (cpu = 0; cpu < ncpus; cpu++) {
911 for (thread = 0; thread < nthreads; thread++) {
912 int fd = FD(evsel, cpu, thread),
913 err = ioctl(fd, ioc, arg);
914
915 if (err)
916 return err;
917 }
918 }
919
920 return 0;
921}
922
923int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
924 const char *filter)
925{
926 return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
927 PERF_EVENT_IOC_SET_FILTER,
928 (void *)filter);
929}
930
931int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
932{
933 char *new_filter = strdup(filter);
934
935 if (new_filter != NULL) {
936 free(evsel->filter);
937 evsel->filter = new_filter;
938 return 0;
939 }
940
941 return -1;
942}
943
944int perf_evsel__append_filter(struct perf_evsel *evsel,
945 const char *op, const char *filter)
946{
947 char *new_filter;
948
949 if (evsel->filter == NULL)
950 return perf_evsel__set_filter(evsel, filter);
951
952 if (asprintf(&new_filter,"(%s) %s (%s)", evsel->filter, op, filter) > 0) {
953 free(evsel->filter);
954 evsel->filter = new_filter;
955 return 0;
956 }
957
958 return -1;
959}
960
961int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
962{
963 return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
964 PERF_EVENT_IOC_ENABLE,
965 0);
966}
967
968int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
969{
970 if (ncpus == 0 || nthreads == 0)
971 return 0;
972
973 if (evsel->system_wide)
974 nthreads = 1;
975
976 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
977 if (evsel->sample_id == NULL)
978 return -ENOMEM;
979
980 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
981 if (evsel->id == NULL) {
982 xyarray__delete(evsel->sample_id);
983 evsel->sample_id = NULL;
984 return -ENOMEM;
985 }
986
987 return 0;
988}
989
990static void perf_evsel__free_fd(struct perf_evsel *evsel)
991{
992 xyarray__delete(evsel->fd);
993 evsel->fd = NULL;
994}
995
996static void perf_evsel__free_id(struct perf_evsel *evsel)
997{
998 xyarray__delete(evsel->sample_id);
999 evsel->sample_id = NULL;
1000 zfree(&evsel->id);
1001}
1002
1003static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
1004{
1005 struct perf_evsel_config_term *term, *h;
1006
1007 list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
1008 list_del(&term->list);
1009 free(term);
1010 }
1011}
1012
1013void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
1014{
1015 int cpu, thread;
1016
1017 if (evsel->system_wide)
1018 nthreads = 1;
1019
1020 for (cpu = 0; cpu < ncpus; cpu++)
1021 for (thread = 0; thread < nthreads; ++thread) {
1022 close(FD(evsel, cpu, thread));
1023 FD(evsel, cpu, thread) = -1;
1024 }
1025}
1026
1027void perf_evsel__exit(struct perf_evsel *evsel)
1028{
1029 assert(list_empty(&evsel->node));
1030 assert(evsel->evlist == NULL);
1031 perf_evsel__free_fd(evsel);
1032 perf_evsel__free_id(evsel);
1033 perf_evsel__free_config_terms(evsel);
1034 close_cgroup(evsel->cgrp);
1035 cpu_map__put(evsel->cpus);
1036 cpu_map__put(evsel->own_cpus);
1037 thread_map__put(evsel->threads);
1038 zfree(&evsel->group_name);
1039 zfree(&evsel->name);
1040 perf_evsel__object.fini(evsel);
1041}
1042
1043void perf_evsel__delete(struct perf_evsel *evsel)
1044{
1045 perf_evsel__exit(evsel);
1046 free(evsel);
1047}
1048
1049void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
1050 struct perf_counts_values *count)
1051{
1052 struct perf_counts_values tmp;
1053
1054 if (!evsel->prev_raw_counts)
1055 return;
1056
1057 if (cpu == -1) {
1058 tmp = evsel->prev_raw_counts->aggr;
1059 evsel->prev_raw_counts->aggr = *count;
1060 } else {
1061 tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
1062 *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
1063 }
1064
1065 count->val = count->val - tmp.val;
1066 count->ena = count->ena - tmp.ena;
1067 count->run = count->run - tmp.run;
1068}
1069
1070void perf_counts_values__scale(struct perf_counts_values *count,
1071 bool scale, s8 *pscaled)
1072{
1073 s8 scaled = 0;
1074
1075 if (scale) {
1076 if (count->run == 0) {
1077 scaled = -1;
1078 count->val = 0;
1079 } else if (count->run < count->ena) {
1080 scaled = 1;
1081 count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
1082 }
1083 } else
1084 count->ena = count->run = 0;
1085
1086 if (pscaled)
1087 *pscaled = scaled;
1088}
1089
1090int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
1091 struct perf_counts_values *count)
1092{
1093 memset(count, 0, sizeof(*count));
1094
1095 if (FD(evsel, cpu, thread) < 0)
1096 return -EINVAL;
1097
1098 if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0)
1099 return -errno;
1100
1101 return 0;
1102}
1103
1104int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
1105 int cpu, int thread, bool scale)
1106{
1107 struct perf_counts_values count;
1108 size_t nv = scale ? 3 : 1;
1109
1110 if (FD(evsel, cpu, thread) < 0)
1111 return -EINVAL;
1112
1113 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
1114 return -ENOMEM;
1115
1116 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
1117 return -errno;
1118
1119 perf_evsel__compute_deltas(evsel, cpu, thread, &count);
1120 perf_counts_values__scale(&count, scale, NULL);
1121 *perf_counts(evsel->counts, cpu, thread) = count;
1122 return 0;
1123}
1124
1125static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
1126{
1127 struct perf_evsel *leader = evsel->leader;
1128 int fd;
1129
1130 if (perf_evsel__is_group_leader(evsel))
1131 return -1;
1132
1133
1134
1135
1136
1137 BUG_ON(!leader->fd);
1138
1139 fd = FD(leader, cpu, thread);
1140 BUG_ON(fd == -1);
1141
1142 return fd;
1143}
1144
1145struct bit_names {
1146 int bit;
1147 const char *name;
1148};
1149
1150static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
1151{
1152 bool first_bit = true;
1153 int i = 0;
1154
1155 do {
1156 if (value & bits[i].bit) {
1157 buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
1158 first_bit = false;
1159 }
1160 } while (bits[++i].name != NULL);
1161}
1162
1163static void __p_sample_type(char *buf, size_t size, u64 value)
1164{
1165#define bit_name(n) { PERF_SAMPLE_##n, #n }
1166 struct bit_names bits[] = {
1167 bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
1168 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
1169 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
1170 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1171 bit_name(IDENTIFIER), bit_name(REGS_INTR),
1172 { .name = NULL, }
1173 };
1174#undef bit_name
1175 __p_bits(buf, size, value, bits);
1176}
1177
1178static void __p_read_format(char *buf, size_t size, u64 value)
1179{
1180#define bit_name(n) { PERF_FORMAT_##n, #n }
1181 struct bit_names bits[] = {
1182 bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
1183 bit_name(ID), bit_name(GROUP),
1184 { .name = NULL, }
1185 };
1186#undef bit_name
1187 __p_bits(buf, size, value, bits);
1188}
1189
1190#define BUF_SIZE 1024
1191
1192#define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
1193#define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
1194#define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
1195#define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
1196#define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
1197
1198#define PRINT_ATTRn(_n, _f, _p) \
1199do { \
1200 if (attr->_f) { \
1201 _p(attr->_f); \
1202 ret += attr__fprintf(fp, _n, buf, priv);\
1203 } \
1204} while (0)
1205
1206#define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
1207
1208int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
1209 attr__fprintf_f attr__fprintf, void *priv)
1210{
1211 char buf[BUF_SIZE];
1212 int ret = 0;
1213
1214 PRINT_ATTRf(type, p_unsigned);
1215 PRINT_ATTRf(size, p_unsigned);
1216 PRINT_ATTRf(config, p_hex);
1217 PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
1218 PRINT_ATTRf(sample_type, p_sample_type);
1219 PRINT_ATTRf(read_format, p_read_format);
1220
1221 PRINT_ATTRf(disabled, p_unsigned);
1222 PRINT_ATTRf(inherit, p_unsigned);
1223 PRINT_ATTRf(pinned, p_unsigned);
1224 PRINT_ATTRf(exclusive, p_unsigned);
1225 PRINT_ATTRf(exclude_user, p_unsigned);
1226 PRINT_ATTRf(exclude_kernel, p_unsigned);
1227 PRINT_ATTRf(exclude_hv, p_unsigned);
1228 PRINT_ATTRf(exclude_idle, p_unsigned);
1229 PRINT_ATTRf(mmap, p_unsigned);
1230 PRINT_ATTRf(comm, p_unsigned);
1231 PRINT_ATTRf(freq, p_unsigned);
1232 PRINT_ATTRf(inherit_stat, p_unsigned);
1233 PRINT_ATTRf(enable_on_exec, p_unsigned);
1234 PRINT_ATTRf(task, p_unsigned);
1235 PRINT_ATTRf(watermark, p_unsigned);
1236 PRINT_ATTRf(precise_ip, p_unsigned);
1237 PRINT_ATTRf(mmap_data, p_unsigned);
1238 PRINT_ATTRf(sample_id_all, p_unsigned);
1239 PRINT_ATTRf(exclude_host, p_unsigned);
1240 PRINT_ATTRf(exclude_guest, p_unsigned);
1241 PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
1242 PRINT_ATTRf(exclude_callchain_user, p_unsigned);
1243 PRINT_ATTRf(mmap2, p_unsigned);
1244 PRINT_ATTRf(comm_exec, p_unsigned);
1245 PRINT_ATTRf(use_clockid, p_unsigned);
1246 PRINT_ATTRf(context_switch, p_unsigned);
1247
1248 PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
1249 PRINT_ATTRf(bp_type, p_unsigned);
1250 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
1251 PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
1252 PRINT_ATTRf(sample_regs_user, p_hex);
1253 PRINT_ATTRf(sample_stack_user, p_unsigned);
1254 PRINT_ATTRf(clockid, p_signed);
1255 PRINT_ATTRf(sample_regs_intr, p_hex);
1256 PRINT_ATTRf(aux_watermark, p_unsigned);
1257
1258 return ret;
1259}
1260
1261static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1262 void *priv __attribute__((unused)))
1263{
1264 return fprintf(fp, " %-32s %s\n", name, val);
1265}
1266
1267static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
1268 struct thread_map *threads)
1269{
1270 int cpu, thread, nthreads;
1271 unsigned long flags = PERF_FLAG_FD_CLOEXEC;
1272 int pid = -1, err;
1273 enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
1274
1275 if (evsel->system_wide)
1276 nthreads = 1;
1277 else
1278 nthreads = threads->nr;
1279
1280 if (evsel->fd == NULL &&
1281 perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
1282 return -ENOMEM;
1283
1284 if (evsel->cgrp) {
1285 flags |= PERF_FLAG_PID_CGROUP;
1286 pid = evsel->cgrp->fd;
1287 }
1288
1289fallback_missing_features:
1290 if (perf_missing_features.clockid_wrong)
1291 evsel->attr.clockid = CLOCK_MONOTONIC;
1292 if (perf_missing_features.clockid) {
1293 evsel->attr.use_clockid = 0;
1294 evsel->attr.clockid = 0;
1295 }
1296 if (perf_missing_features.cloexec)
1297 flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
1298 if (perf_missing_features.mmap2)
1299 evsel->attr.mmap2 = 0;
1300 if (perf_missing_features.exclude_guest)
1301 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
1302retry_sample_id:
1303 if (perf_missing_features.sample_id_all)
1304 evsel->attr.sample_id_all = 0;
1305
1306 if (verbose >= 2) {
1307 fprintf(stderr, "%.60s\n", graph_dotted_line);
1308 fprintf(stderr, "perf_event_attr:\n");
1309 perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
1310 fprintf(stderr, "%.60s\n", graph_dotted_line);
1311 }
1312
1313 for (cpu = 0; cpu < cpus->nr; cpu++) {
1314
1315 for (thread = 0; thread < nthreads; thread++) {
1316 int group_fd;
1317
1318 if (!evsel->cgrp && !evsel->system_wide)
1319 pid = thread_map__pid(threads, thread);
1320
1321 group_fd = get_group_fd(evsel, cpu, thread);
1322retry_open:
1323 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
1324 pid, cpus->map[cpu], group_fd, flags);
1325
1326 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
1327 pid,
1328 cpus->map[cpu],
1329 group_fd, flags);
1330 if (FD(evsel, cpu, thread) < 0) {
1331 err = -errno;
1332 pr_debug2("sys_perf_event_open failed, error %d\n",
1333 err);
1334 goto try_fallback;
1335 }
1336 set_rlimit = NO_CHANGE;
1337
1338
1339
1340
1341
1342
1343 if (perf_missing_features.clockid ||
1344 perf_missing_features.clockid_wrong) {
1345 err = -EINVAL;
1346 goto out_close;
1347 }
1348 }
1349 }
1350
1351 return 0;
1352
1353try_fallback:
1354
1355
1356
1357
1358 if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
1359 struct rlimit l;
1360 int old_errno = errno;
1361
1362 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1363 if (set_rlimit == NO_CHANGE)
1364 l.rlim_cur = l.rlim_max;
1365 else {
1366 l.rlim_cur = l.rlim_max + 1000;
1367 l.rlim_max = l.rlim_cur;
1368 }
1369 if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
1370 set_rlimit++;
1371 errno = old_errno;
1372 goto retry_open;
1373 }
1374 }
1375 errno = old_errno;
1376 }
1377
1378 if (err != -EINVAL || cpu > 0 || thread > 0)
1379 goto out_close;
1380
1381
1382
1383
1384
1385 if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
1386 perf_missing_features.clockid_wrong = true;
1387 goto fallback_missing_features;
1388 } else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
1389 perf_missing_features.clockid = true;
1390 goto fallback_missing_features;
1391 } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
1392 perf_missing_features.cloexec = true;
1393 goto fallback_missing_features;
1394 } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
1395 perf_missing_features.mmap2 = true;
1396 goto fallback_missing_features;
1397 } else if (!perf_missing_features.exclude_guest &&
1398 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
1399 perf_missing_features.exclude_guest = true;
1400 goto fallback_missing_features;
1401 } else if (!perf_missing_features.sample_id_all) {
1402 perf_missing_features.sample_id_all = true;
1403 goto retry_sample_id;
1404 }
1405
1406out_close:
1407 do {
1408 while (--thread >= 0) {
1409 close(FD(evsel, cpu, thread));
1410 FD(evsel, cpu, thread) = -1;
1411 }
1412 thread = nthreads;
1413 } while (--cpu >= 0);
1414 return err;
1415}
1416
1417void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
1418{
1419 if (evsel->fd == NULL)
1420 return;
1421
1422 perf_evsel__close_fd(evsel, ncpus, nthreads);
1423 perf_evsel__free_fd(evsel);
1424}
1425
1426static struct {
1427 struct cpu_map map;
1428 int cpus[1];
1429} empty_cpu_map = {
1430 .map.nr = 1,
1431 .cpus = { -1, },
1432};
1433
1434static struct {
1435 struct thread_map map;
1436 int threads[1];
1437} empty_thread_map = {
1438 .map.nr = 1,
1439 .threads = { -1, },
1440};
1441
1442int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
1443 struct thread_map *threads)
1444{
1445 if (cpus == NULL) {
1446
1447 cpus = &empty_cpu_map.map;
1448 }
1449
1450 if (threads == NULL)
1451 threads = &empty_thread_map.map;
1452
1453 return __perf_evsel__open(evsel, cpus, threads);
1454}
1455
1456int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
1457 struct cpu_map *cpus)
1458{
1459 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
1460}
1461
1462int perf_evsel__open_per_thread(struct perf_evsel *evsel,
1463 struct thread_map *threads)
1464{
1465 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
1466}
1467
1468static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
1469 const union perf_event *event,
1470 struct perf_sample *sample)
1471{
1472 u64 type = evsel->attr.sample_type;
1473 const u64 *array = event->sample.array;
1474 bool swapped = evsel->needs_swap;
1475 union u64_swap u;
1476
1477 array += ((event->header.size -
1478 sizeof(event->header)) / sizeof(u64)) - 1;
1479
1480 if (type & PERF_SAMPLE_IDENTIFIER) {
1481 sample->id = *array;
1482 array--;
1483 }
1484
1485 if (type & PERF_SAMPLE_CPU) {
1486 u.val64 = *array;
1487 if (swapped) {
1488
1489 u.val64 = bswap_64(u.val64);
1490 u.val32[0] = bswap_32(u.val32[0]);
1491 }
1492
1493 sample->cpu = u.val32[0];
1494 array--;
1495 }
1496
1497 if (type & PERF_SAMPLE_STREAM_ID) {
1498 sample->stream_id = *array;
1499 array--;
1500 }
1501
1502 if (type & PERF_SAMPLE_ID) {
1503 sample->id = *array;
1504 array--;
1505 }
1506
1507 if (type & PERF_SAMPLE_TIME) {
1508 sample->time = *array;
1509 array--;
1510 }
1511
1512 if (type & PERF_SAMPLE_TID) {
1513 u.val64 = *array;
1514 if (swapped) {
1515
1516 u.val64 = bswap_64(u.val64);
1517 u.val32[0] = bswap_32(u.val32[0]);
1518 u.val32[1] = bswap_32(u.val32[1]);
1519 }
1520
1521 sample->pid = u.val32[0];
1522 sample->tid = u.val32[1];
1523 array--;
1524 }
1525
1526 return 0;
1527}
1528
1529static inline bool overflow(const void *endp, u16 max_size, const void *offset,
1530 u64 size)
1531{
1532 return size > max_size || offset + size > endp;
1533}
1534
1535#define OVERFLOW_CHECK(offset, size, max_size) \
1536 do { \
1537 if (overflow(endp, (max_size), (offset), (size))) \
1538 return -EFAULT; \
1539 } while (0)
1540
1541#define OVERFLOW_CHECK_u64(offset) \
1542 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1543
1544int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
1545 struct perf_sample *data)
1546{
1547 u64 type = evsel->attr.sample_type;
1548 bool swapped = evsel->needs_swap;
1549 const u64 *array;
1550 u16 max_size = event->header.size;
1551 const void *endp = (void *)event + max_size;
1552 u64 sz;
1553
1554
1555
1556
1557
1558 union u64_swap u;
1559
1560 memset(data, 0, sizeof(*data));
1561 data->cpu = data->pid = data->tid = -1;
1562 data->stream_id = data->id = data->time = -1ULL;
1563 data->period = evsel->attr.sample_period;
1564 data->weight = 0;
1565
1566 if (event->header.type != PERF_RECORD_SAMPLE) {
1567 if (!evsel->attr.sample_id_all)
1568 return 0;
1569 return perf_evsel__parse_id_sample(evsel, event, data);
1570 }
1571
1572 array = event->sample.array;
1573
1574
1575
1576
1577
1578
1579 if (evsel->sample_size + sizeof(event->header) > event->header.size)
1580 return -EFAULT;
1581
1582 data->id = -1ULL;
1583 if (type & PERF_SAMPLE_IDENTIFIER) {
1584 data->id = *array;
1585 array++;
1586 }
1587
1588 if (type & PERF_SAMPLE_IP) {
1589 data->ip = *array;
1590 array++;
1591 }
1592
1593 if (type & PERF_SAMPLE_TID) {
1594 u.val64 = *array;
1595 if (swapped) {
1596
1597 u.val64 = bswap_64(u.val64);
1598 u.val32[0] = bswap_32(u.val32[0]);
1599 u.val32[1] = bswap_32(u.val32[1]);
1600 }
1601
1602 data->pid = u.val32[0];
1603 data->tid = u.val32[1];
1604 array++;
1605 }
1606
1607 if (type & PERF_SAMPLE_TIME) {
1608 data->time = *array;
1609 array++;
1610 }
1611
1612 data->addr = 0;
1613 if (type & PERF_SAMPLE_ADDR) {
1614 data->addr = *array;
1615 array++;
1616 }
1617
1618 if (type & PERF_SAMPLE_ID) {
1619 data->id = *array;
1620 array++;
1621 }
1622
1623 if (type & PERF_SAMPLE_STREAM_ID) {
1624 data->stream_id = *array;
1625 array++;
1626 }
1627
1628 if (type & PERF_SAMPLE_CPU) {
1629
1630 u.val64 = *array;
1631 if (swapped) {
1632
1633 u.val64 = bswap_64(u.val64);
1634 u.val32[0] = bswap_32(u.val32[0]);
1635 }
1636
1637 data->cpu = u.val32[0];
1638 array++;
1639 }
1640
1641 if (type & PERF_SAMPLE_PERIOD) {
1642 data->period = *array;
1643 array++;
1644 }
1645
1646 if (type & PERF_SAMPLE_READ) {
1647 u64 read_format = evsel->attr.read_format;
1648
1649 OVERFLOW_CHECK_u64(array);
1650 if (read_format & PERF_FORMAT_GROUP)
1651 data->read.group.nr = *array;
1652 else
1653 data->read.one.value = *array;
1654
1655 array++;
1656
1657 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1658 OVERFLOW_CHECK_u64(array);
1659 data->read.time_enabled = *array;
1660 array++;
1661 }
1662
1663 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1664 OVERFLOW_CHECK_u64(array);
1665 data->read.time_running = *array;
1666 array++;
1667 }
1668
1669
1670 if (read_format & PERF_FORMAT_GROUP) {
1671 const u64 max_group_nr = UINT64_MAX /
1672 sizeof(struct sample_read_value);
1673
1674 if (data->read.group.nr > max_group_nr)
1675 return -EFAULT;
1676 sz = data->read.group.nr *
1677 sizeof(struct sample_read_value);
1678 OVERFLOW_CHECK(array, sz, max_size);
1679 data->read.group.values =
1680 (struct sample_read_value *)array;
1681 array = (void *)array + sz;
1682 } else {
1683 OVERFLOW_CHECK_u64(array);
1684 data->read.one.id = *array;
1685 array++;
1686 }
1687 }
1688
1689 if (type & PERF_SAMPLE_CALLCHAIN) {
1690 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
1691
1692 OVERFLOW_CHECK_u64(array);
1693 data->callchain = (struct ip_callchain *)array++;
1694 if (data->callchain->nr > max_callchain_nr)
1695 return -EFAULT;
1696 sz = data->callchain->nr * sizeof(u64);
1697 OVERFLOW_CHECK(array, sz, max_size);
1698 array = (void *)array + sz;
1699 }
1700
1701 if (type & PERF_SAMPLE_RAW) {
1702 OVERFLOW_CHECK_u64(array);
1703 u.val64 = *array;
1704 if (WARN_ONCE(swapped,
1705 "Endianness of raw data not corrected!\n")) {
1706
1707 u.val64 = bswap_64(u.val64);
1708 u.val32[0] = bswap_32(u.val32[0]);
1709 u.val32[1] = bswap_32(u.val32[1]);
1710 }
1711 data->raw_size = u.val32[0];
1712 array = (void *)array + sizeof(u32);
1713
1714 OVERFLOW_CHECK(array, data->raw_size, max_size);
1715 data->raw_data = (void *)array;
1716 array = (void *)array + data->raw_size;
1717 }
1718
1719 if (type & PERF_SAMPLE_BRANCH_STACK) {
1720 const u64 max_branch_nr = UINT64_MAX /
1721 sizeof(struct branch_entry);
1722
1723 OVERFLOW_CHECK_u64(array);
1724 data->branch_stack = (struct branch_stack *)array++;
1725
1726 if (data->branch_stack->nr > max_branch_nr)
1727 return -EFAULT;
1728 sz = data->branch_stack->nr * sizeof(struct branch_entry);
1729 OVERFLOW_CHECK(array, sz, max_size);
1730 array = (void *)array + sz;
1731 }
1732
1733 if (type & PERF_SAMPLE_REGS_USER) {
1734 OVERFLOW_CHECK_u64(array);
1735 data->user_regs.abi = *array;
1736 array++;
1737
1738 if (data->user_regs.abi) {
1739 u64 mask = evsel->attr.sample_regs_user;
1740
1741 sz = hweight_long(mask) * sizeof(u64);
1742 OVERFLOW_CHECK(array, sz, max_size);
1743 data->user_regs.mask = mask;
1744 data->user_regs.regs = (u64 *)array;
1745 array = (void *)array + sz;
1746 }
1747 }
1748
1749 if (type & PERF_SAMPLE_STACK_USER) {
1750 OVERFLOW_CHECK_u64(array);
1751 sz = *array++;
1752
1753 data->user_stack.offset = ((char *)(array - 1)
1754 - (char *) event);
1755
1756 if (!sz) {
1757 data->user_stack.size = 0;
1758 } else {
1759 OVERFLOW_CHECK(array, sz, max_size);
1760 data->user_stack.data = (char *)array;
1761 array = (void *)array + sz;
1762 OVERFLOW_CHECK_u64(array);
1763 data->user_stack.size = *array++;
1764 if (WARN_ONCE(data->user_stack.size > sz,
1765 "user stack dump failure\n"))
1766 return -EFAULT;
1767 }
1768 }
1769
1770 data->weight = 0;
1771 if (type & PERF_SAMPLE_WEIGHT) {
1772 OVERFLOW_CHECK_u64(array);
1773 data->weight = *array;
1774 array++;
1775 }
1776
1777 data->data_src = PERF_MEM_DATA_SRC_NONE;
1778 if (type & PERF_SAMPLE_DATA_SRC) {
1779 OVERFLOW_CHECK_u64(array);
1780 data->data_src = *array;
1781 array++;
1782 }
1783
1784 data->transaction = 0;
1785 if (type & PERF_SAMPLE_TRANSACTION) {
1786 OVERFLOW_CHECK_u64(array);
1787 data->transaction = *array;
1788 array++;
1789 }
1790
1791 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
1792 if (type & PERF_SAMPLE_REGS_INTR) {
1793 OVERFLOW_CHECK_u64(array);
1794 data->intr_regs.abi = *array;
1795 array++;
1796
1797 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
1798 u64 mask = evsel->attr.sample_regs_intr;
1799
1800 sz = hweight_long(mask) * sizeof(u64);
1801 OVERFLOW_CHECK(array, sz, max_size);
1802 data->intr_regs.mask = mask;
1803 data->intr_regs.regs = (u64 *)array;
1804 array = (void *)array + sz;
1805 }
1806 }
1807
1808 return 0;
1809}
1810
1811size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
1812 u64 read_format)
1813{
1814 size_t sz, result = sizeof(struct sample_event);
1815
1816 if (type & PERF_SAMPLE_IDENTIFIER)
1817 result += sizeof(u64);
1818
1819 if (type & PERF_SAMPLE_IP)
1820 result += sizeof(u64);
1821
1822 if (type & PERF_SAMPLE_TID)
1823 result += sizeof(u64);
1824
1825 if (type & PERF_SAMPLE_TIME)
1826 result += sizeof(u64);
1827
1828 if (type & PERF_SAMPLE_ADDR)
1829 result += sizeof(u64);
1830
1831 if (type & PERF_SAMPLE_ID)
1832 result += sizeof(u64);
1833
1834 if (type & PERF_SAMPLE_STREAM_ID)
1835 result += sizeof(u64);
1836
1837 if (type & PERF_SAMPLE_CPU)
1838 result += sizeof(u64);
1839
1840 if (type & PERF_SAMPLE_PERIOD)
1841 result += sizeof(u64);
1842
1843 if (type & PERF_SAMPLE_READ) {
1844 result += sizeof(u64);
1845 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1846 result += sizeof(u64);
1847 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1848 result += sizeof(u64);
1849
1850 if (read_format & PERF_FORMAT_GROUP) {
1851 sz = sample->read.group.nr *
1852 sizeof(struct sample_read_value);
1853 result += sz;
1854 } else {
1855 result += sizeof(u64);
1856 }
1857 }
1858
1859 if (type & PERF_SAMPLE_CALLCHAIN) {
1860 sz = (sample->callchain->nr + 1) * sizeof(u64);
1861 result += sz;
1862 }
1863
1864 if (type & PERF_SAMPLE_RAW) {
1865 result += sizeof(u32);
1866 result += sample->raw_size;
1867 }
1868
1869 if (type & PERF_SAMPLE_BRANCH_STACK) {
1870 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1871 sz += sizeof(u64);
1872 result += sz;
1873 }
1874
1875 if (type & PERF_SAMPLE_REGS_USER) {
1876 if (sample->user_regs.abi) {
1877 result += sizeof(u64);
1878 sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
1879 result += sz;
1880 } else {
1881 result += sizeof(u64);
1882 }
1883 }
1884
1885 if (type & PERF_SAMPLE_STACK_USER) {
1886 sz = sample->user_stack.size;
1887 result += sizeof(u64);
1888 if (sz) {
1889 result += sz;
1890 result += sizeof(u64);
1891 }
1892 }
1893
1894 if (type & PERF_SAMPLE_WEIGHT)
1895 result += sizeof(u64);
1896
1897 if (type & PERF_SAMPLE_DATA_SRC)
1898 result += sizeof(u64);
1899
1900 if (type & PERF_SAMPLE_TRANSACTION)
1901 result += sizeof(u64);
1902
1903 if (type & PERF_SAMPLE_REGS_INTR) {
1904 if (sample->intr_regs.abi) {
1905 result += sizeof(u64);
1906 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
1907 result += sz;
1908 } else {
1909 result += sizeof(u64);
1910 }
1911 }
1912
1913 return result;
1914}
1915
1916int perf_event__synthesize_sample(union perf_event *event, u64 type,
1917 u64 read_format,
1918 const struct perf_sample *sample,
1919 bool swapped)
1920{
1921 u64 *array;
1922 size_t sz;
1923
1924
1925
1926
1927 union u64_swap u;
1928
1929 array = event->sample.array;
1930
1931 if (type & PERF_SAMPLE_IDENTIFIER) {
1932 *array = sample->id;
1933 array++;
1934 }
1935
1936 if (type & PERF_SAMPLE_IP) {
1937 *array = sample->ip;
1938 array++;
1939 }
1940
1941 if (type & PERF_SAMPLE_TID) {
1942 u.val32[0] = sample->pid;
1943 u.val32[1] = sample->tid;
1944 if (swapped) {
1945
1946
1947
1948 u.val32[0] = bswap_32(u.val32[0]);
1949 u.val32[1] = bswap_32(u.val32[1]);
1950 u.val64 = bswap_64(u.val64);
1951 }
1952
1953 *array = u.val64;
1954 array++;
1955 }
1956
1957 if (type & PERF_SAMPLE_TIME) {
1958 *array = sample->time;
1959 array++;
1960 }
1961
1962 if (type & PERF_SAMPLE_ADDR) {
1963 *array = sample->addr;
1964 array++;
1965 }
1966
1967 if (type & PERF_SAMPLE_ID) {
1968 *array = sample->id;
1969 array++;
1970 }
1971
1972 if (type & PERF_SAMPLE_STREAM_ID) {
1973 *array = sample->stream_id;
1974 array++;
1975 }
1976
1977 if (type & PERF_SAMPLE_CPU) {
1978 u.val32[0] = sample->cpu;
1979 if (swapped) {
1980
1981
1982
1983 u.val32[0] = bswap_32(u.val32[0]);
1984 u.val64 = bswap_64(u.val64);
1985 }
1986 *array = u.val64;
1987 array++;
1988 }
1989
1990 if (type & PERF_SAMPLE_PERIOD) {
1991 *array = sample->period;
1992 array++;
1993 }
1994
1995 if (type & PERF_SAMPLE_READ) {
1996 if (read_format & PERF_FORMAT_GROUP)
1997 *array = sample->read.group.nr;
1998 else
1999 *array = sample->read.one.value;
2000 array++;
2001
2002 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2003 *array = sample->read.time_enabled;
2004 array++;
2005 }
2006
2007 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2008 *array = sample->read.time_running;
2009 array++;
2010 }
2011
2012
2013 if (read_format & PERF_FORMAT_GROUP) {
2014 sz = sample->read.group.nr *
2015 sizeof(struct sample_read_value);
2016 memcpy(array, sample->read.group.values, sz);
2017 array = (void *)array + sz;
2018 } else {
2019 *array = sample->read.one.id;
2020 array++;
2021 }
2022 }
2023
2024 if (type & PERF_SAMPLE_CALLCHAIN) {
2025 sz = (sample->callchain->nr + 1) * sizeof(u64);
2026 memcpy(array, sample->callchain, sz);
2027 array = (void *)array + sz;
2028 }
2029
2030 if (type & PERF_SAMPLE_RAW) {
2031 u.val32[0] = sample->raw_size;
2032 if (WARN_ONCE(swapped,
2033 "Endianness of raw data not corrected!\n")) {
2034
2035
2036
2037 u.val32[0] = bswap_32(u.val32[0]);
2038 u.val32[1] = bswap_32(u.val32[1]);
2039 u.val64 = bswap_64(u.val64);
2040 }
2041 *array = u.val64;
2042 array = (void *)array + sizeof(u32);
2043
2044 memcpy(array, sample->raw_data, sample->raw_size);
2045 array = (void *)array + sample->raw_size;
2046 }
2047
2048 if (type & PERF_SAMPLE_BRANCH_STACK) {
2049 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
2050 sz += sizeof(u64);
2051 memcpy(array, sample->branch_stack, sz);
2052 array = (void *)array + sz;
2053 }
2054
2055 if (type & PERF_SAMPLE_REGS_USER) {
2056 if (sample->user_regs.abi) {
2057 *array++ = sample->user_regs.abi;
2058 sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
2059 memcpy(array, sample->user_regs.regs, sz);
2060 array = (void *)array + sz;
2061 } else {
2062 *array++ = 0;
2063 }
2064 }
2065
2066 if (type & PERF_SAMPLE_STACK_USER) {
2067 sz = sample->user_stack.size;
2068 *array++ = sz;
2069 if (sz) {
2070 memcpy(array, sample->user_stack.data, sz);
2071 array = (void *)array + sz;
2072 *array++ = sz;
2073 }
2074 }
2075
2076 if (type & PERF_SAMPLE_WEIGHT) {
2077 *array = sample->weight;
2078 array++;
2079 }
2080
2081 if (type & PERF_SAMPLE_DATA_SRC) {
2082 *array = sample->data_src;
2083 array++;
2084 }
2085
2086 if (type & PERF_SAMPLE_TRANSACTION) {
2087 *array = sample->transaction;
2088 array++;
2089 }
2090
2091 if (type & PERF_SAMPLE_REGS_INTR) {
2092 if (sample->intr_regs.abi) {
2093 *array++ = sample->intr_regs.abi;
2094 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
2095 memcpy(array, sample->intr_regs.regs, sz);
2096 array = (void *)array + sz;
2097 } else {
2098 *array++ = 0;
2099 }
2100 }
2101
2102 return 0;
2103}
2104
2105struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
2106{
2107 return pevent_find_field(evsel->tp_format, name);
2108}
2109
2110void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
2111 const char *name)
2112{
2113 struct format_field *field = perf_evsel__field(evsel, name);
2114 int offset;
2115
2116 if (!field)
2117 return NULL;
2118
2119 offset = field->offset;
2120
2121 if (field->flags & FIELD_IS_DYNAMIC) {
2122 offset = *(int *)(sample->raw_data + field->offset);
2123 offset &= 0xffff;
2124 }
2125
2126 return sample->raw_data + offset;
2127}
2128
2129u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
2130 const char *name)
2131{
2132 struct format_field *field = perf_evsel__field(evsel, name);
2133 void *ptr;
2134 u64 value;
2135
2136 if (!field)
2137 return 0;
2138
2139 ptr = sample->raw_data + field->offset;
2140
2141 switch (field->size) {
2142 case 1:
2143 return *(u8 *)ptr;
2144 case 2:
2145 value = *(u16 *)ptr;
2146 break;
2147 case 4:
2148 value = *(u32 *)ptr;
2149 break;
2150 case 8:
2151 memcpy(&value, ptr, sizeof(u64));
2152 break;
2153 default:
2154 return 0;
2155 }
2156
2157 if (!evsel->needs_swap)
2158 return value;
2159
2160 switch (field->size) {
2161 case 2:
2162 return bswap_16(value);
2163 case 4:
2164 return bswap_32(value);
2165 case 8:
2166 return bswap_64(value);
2167 default:
2168 return 0;
2169 }
2170
2171 return 0;
2172}
2173
2174static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
2175{
2176 va_list args;
2177 int ret = 0;
2178
2179 if (!*first) {
2180 ret += fprintf(fp, ",");
2181 } else {
2182 ret += fprintf(fp, ":");
2183 *first = false;
2184 }
2185
2186 va_start(args, fmt);
2187 ret += vfprintf(fp, fmt, args);
2188 va_end(args);
2189 return ret;
2190}
2191
2192static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv)
2193{
2194 return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
2195}
2196
2197int perf_evsel__fprintf(struct perf_evsel *evsel,
2198 struct perf_attr_details *details, FILE *fp)
2199{
2200 bool first = true;
2201 int printed = 0;
2202
2203 if (details->event_group) {
2204 struct perf_evsel *pos;
2205
2206 if (!perf_evsel__is_group_leader(evsel))
2207 return 0;
2208
2209 if (evsel->nr_members > 1)
2210 printed += fprintf(fp, "%s{", evsel->group_name ?: "");
2211
2212 printed += fprintf(fp, "%s", perf_evsel__name(evsel));
2213 for_each_group_member(pos, evsel)
2214 printed += fprintf(fp, ",%s", perf_evsel__name(pos));
2215
2216 if (evsel->nr_members > 1)
2217 printed += fprintf(fp, "}");
2218 goto out;
2219 }
2220
2221 printed += fprintf(fp, "%s", perf_evsel__name(evsel));
2222
2223 if (details->verbose) {
2224 printed += perf_event_attr__fprintf(fp, &evsel->attr,
2225 __print_attr__fprintf, &first);
2226 } else if (details->freq) {
2227 const char *term = "sample_freq";
2228
2229 if (!evsel->attr.freq)
2230 term = "sample_period";
2231
2232 printed += comma_fprintf(fp, &first, " %s=%" PRIu64,
2233 term, (u64)evsel->attr.sample_freq);
2234 }
2235out:
2236 fputc('\n', fp);
2237 return ++printed;
2238}
2239
2240bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
2241 char *msg, size_t msgsize)
2242{
2243 if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
2244 evsel->attr.type == PERF_TYPE_HARDWARE &&
2245 evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
2246
2247
2248
2249
2250
2251
2252
2253
2254 scnprintf(msg, msgsize, "%s",
2255"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2256
2257 evsel->attr.type = PERF_TYPE_SOFTWARE;
2258 evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
2259
2260 zfree(&evsel->name);
2261 return true;
2262 }
2263
2264 return false;
2265}
2266
2267int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
2268 int err, char *msg, size_t size)
2269{
2270 char sbuf[STRERR_BUFSIZE];
2271
2272 switch (err) {
2273 case EPERM:
2274 case EACCES:
2275 return scnprintf(msg, size,
2276 "You may not have permission to collect %sstats.\n"
2277 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
2278 " -1 - Not paranoid at all\n"
2279 " 0 - Disallow raw tracepoint access for unpriv\n"
2280 " 1 - Disallow cpu events for unpriv\n"
2281 " 2 - Disallow kernel profiling for unpriv",
2282 target->system_wide ? "system-wide " : "");
2283 case ENOENT:
2284 return scnprintf(msg, size, "The %s event is not supported.",
2285 perf_evsel__name(evsel));
2286 case EMFILE:
2287 return scnprintf(msg, size, "%s",
2288 "Too many events are opened.\n"
2289 "Probably the maximum number of open file descriptors has been reached.\n"
2290 "Hint: Try again after reducing the number of events.\n"
2291 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2292 case ENODEV:
2293 if (target->cpu_list)
2294 return scnprintf(msg, size, "%s",
2295 "No such device - did you specify an out-of-range profile CPU?\n");
2296 break;
2297 case EOPNOTSUPP:
2298 if (evsel->attr.precise_ip)
2299 return scnprintf(msg, size, "%s",
2300 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2301#if defined(__i386__) || defined(__x86_64__)
2302 if (evsel->attr.type == PERF_TYPE_HARDWARE)
2303 return scnprintf(msg, size, "%s",
2304 "No hardware sampling interrupt available.\n"
2305 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2306#endif
2307 break;
2308 case EBUSY:
2309 if (find_process("oprofiled"))
2310 return scnprintf(msg, size,
2311 "The PMU counters are busy/taken by another profiler.\n"
2312 "We found oprofile daemon running, please stop it and try again.");
2313 break;
2314 case EINVAL:
2315 if (perf_missing_features.clockid)
2316 return scnprintf(msg, size, "clockid feature not supported.");
2317 if (perf_missing_features.clockid_wrong)
2318 return scnprintf(msg, size, "wrong clockid (%d).", clockid);
2319 break;
2320 default:
2321 break;
2322 }
2323
2324 return scnprintf(msg, size,
2325 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2326 "/bin/dmesg may provide additional information.\n"
2327 "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
2328 err, strerror_r(err, sbuf, sizeof(sbuf)),
2329 perf_evsel__name(evsel));
2330}
2331