1
2
3
4
5
6
7
8
9#include <byteswap.h>
10#include <errno.h>
11#include <inttypes.h>
12#include <linux/bitops.h>
13#include <api/fs/fs.h>
14#include <api/fs/tracing_path.h>
15#include <traceevent/event-parse.h>
16#include <linux/hw_breakpoint.h>
17#include <linux/perf_event.h>
18#include <linux/compiler.h>
19#include <linux/err.h>
20#include <linux/zalloc.h>
21#include <sys/ioctl.h>
22#include <sys/resource.h>
23#include <sys/types.h>
24#include <dirent.h>
25#include <stdlib.h>
26#include <perf/evsel.h>
27#include "asm/bug.h"
28#include "bpf_counter.h"
29#include "callchain.h"
30#include "cgroup.h"
31#include "counts.h"
32#include "event.h"
33#include "evsel.h"
34#include "util/env.h"
35#include "util/evsel_config.h"
36#include "util/evsel_fprintf.h"
37#include "evlist.h"
38#include <perf/cpumap.h>
39#include "thread_map.h"
40#include "target.h"
41#include "perf_regs.h"
42#include "record.h"
43#include "debug.h"
44#include "trace-event.h"
45#include "stat.h"
46#include "string2.h"
47#include "memswap.h"
48#include "util.h"
49#include "hashmap.h"
50#include "pmu-hybrid.h"
51#include "../perf-sys.h"
52#include "util/parse-branch-options.h"
53#include <internal/xyarray.h>
54#include <internal/lib.h>
55
56#include <linux/ctype.h>
57
58struct perf_missing_features perf_missing_features;
59
60static clockid_t clockid;
61
62static int evsel__no_extra_init(struct evsel *evsel __maybe_unused)
63{
64 return 0;
65}
66
67void __weak test_attr__ready(void) { }
68
69static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused)
70{
71}
72
73static struct {
74 size_t size;
75 int (*init)(struct evsel *evsel);
76 void (*fini)(struct evsel *evsel);
77} perf_evsel__object = {
78 .size = sizeof(struct evsel),
79 .init = evsel__no_extra_init,
80 .fini = evsel__no_extra_fini,
81};
82
83int evsel__object_config(size_t object_size, int (*init)(struct evsel *evsel),
84 void (*fini)(struct evsel *evsel))
85{
86
87 if (object_size == 0)
88 goto set_methods;
89
90 if (perf_evsel__object.size > object_size)
91 return -EINVAL;
92
93 perf_evsel__object.size = object_size;
94
95set_methods:
96 if (init != NULL)
97 perf_evsel__object.init = init;
98
99 if (fini != NULL)
100 perf_evsel__object.fini = fini;
101
102 return 0;
103}
104
105#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
106
107int __evsel__sample_size(u64 sample_type)
108{
109 u64 mask = sample_type & PERF_SAMPLE_MASK;
110 int size = 0;
111 int i;
112
113 for (i = 0; i < 64; i++) {
114 if (mask & (1ULL << i))
115 size++;
116 }
117
118 size *= sizeof(u64);
119
120 return size;
121}
122
123
124
125
126
127
128
129
130
131static int __perf_evsel__calc_id_pos(u64 sample_type)
132{
133 int idx = 0;
134
135 if (sample_type & PERF_SAMPLE_IDENTIFIER)
136 return 0;
137
138 if (!(sample_type & PERF_SAMPLE_ID))
139 return -1;
140
141 if (sample_type & PERF_SAMPLE_IP)
142 idx += 1;
143
144 if (sample_type & PERF_SAMPLE_TID)
145 idx += 1;
146
147 if (sample_type & PERF_SAMPLE_TIME)
148 idx += 1;
149
150 if (sample_type & PERF_SAMPLE_ADDR)
151 idx += 1;
152
153 return idx;
154}
155
156
157
158
159
160
161
162
163
164static int __perf_evsel__calc_is_pos(u64 sample_type)
165{
166 int idx = 1;
167
168 if (sample_type & PERF_SAMPLE_IDENTIFIER)
169 return 1;
170
171 if (!(sample_type & PERF_SAMPLE_ID))
172 return -1;
173
174 if (sample_type & PERF_SAMPLE_CPU)
175 idx += 1;
176
177 if (sample_type & PERF_SAMPLE_STREAM_ID)
178 idx += 1;
179
180 return idx;
181}
182
183void evsel__calc_id_pos(struct evsel *evsel)
184{
185 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type);
186 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type);
187}
188
189void __evsel__set_sample_bit(struct evsel *evsel,
190 enum perf_event_sample_format bit)
191{
192 if (!(evsel->core.attr.sample_type & bit)) {
193 evsel->core.attr.sample_type |= bit;
194 evsel->sample_size += sizeof(u64);
195 evsel__calc_id_pos(evsel);
196 }
197}
198
199void __evsel__reset_sample_bit(struct evsel *evsel,
200 enum perf_event_sample_format bit)
201{
202 if (evsel->core.attr.sample_type & bit) {
203 evsel->core.attr.sample_type &= ~bit;
204 evsel->sample_size -= sizeof(u64);
205 evsel__calc_id_pos(evsel);
206 }
207}
208
209void evsel__set_sample_id(struct evsel *evsel,
210 bool can_sample_identifier)
211{
212 if (can_sample_identifier) {
213 evsel__reset_sample_bit(evsel, ID);
214 evsel__set_sample_bit(evsel, IDENTIFIER);
215 } else {
216 evsel__set_sample_bit(evsel, ID);
217 }
218 evsel->core.attr.read_format |= PERF_FORMAT_ID;
219}
220
221
222
223
224
225
226
227
228
229bool evsel__is_function_event(struct evsel *evsel)
230{
231#define FUNCTION_EVENT "ftrace:function"
232
233 return evsel->name &&
234 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
235
236#undef FUNCTION_EVENT
237}
238
239void evsel__init(struct evsel *evsel,
240 struct perf_event_attr *attr, int idx)
241{
242 perf_evsel__init(&evsel->core, attr, idx);
243 evsel->tracking = !idx;
244 evsel->unit = strdup("");
245 evsel->scale = 1.0;
246 evsel->max_events = ULONG_MAX;
247 evsel->evlist = NULL;
248 evsel->bpf_obj = NULL;
249 evsel->bpf_fd = -1;
250 INIT_LIST_HEAD(&evsel->config_terms);
251 INIT_LIST_HEAD(&evsel->bpf_counter_list);
252 perf_evsel__object.init(evsel);
253 evsel->sample_size = __evsel__sample_size(attr->sample_type);
254 evsel__calc_id_pos(evsel);
255 evsel->cmdline_group_boundary = false;
256 evsel->metric_expr = NULL;
257 evsel->metric_name = NULL;
258 evsel->metric_events = NULL;
259 evsel->per_pkg_mask = NULL;
260 evsel->collect_stat = false;
261 evsel->pmu_name = NULL;
262}
263
264struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
265{
266 struct evsel *evsel = zalloc(perf_evsel__object.size);
267
268 if (!evsel)
269 return NULL;
270 evsel__init(evsel, attr, idx);
271
272 if (evsel__is_bpf_output(evsel)) {
273 evsel->core.attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
274 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
275 evsel->core.attr.sample_period = 1;
276 }
277
278 if (evsel__is_clock(evsel)) {
279 free((char *)evsel->unit);
280 evsel->unit = strdup("msec");
281 evsel->scale = 1e-6;
282 }
283
284 return evsel;
285}
286
287static bool perf_event_can_profile_kernel(void)
288{
289 return perf_event_paranoid_check(1);
290}
291
292struct evsel *evsel__new_cycles(bool precise __maybe_unused, __u32 type, __u64 config)
293{
294 struct perf_event_attr attr = {
295 .type = type,
296 .config = config,
297 .exclude_kernel = !perf_event_can_profile_kernel(),
298 };
299 struct evsel *evsel;
300
301 event_attr_init(&attr);
302
303
304
305
306
307 evsel = evsel__new(&attr);
308 if (evsel == NULL)
309 goto out;
310
311 arch_evsel__fixup_new_cycles(&evsel->core.attr);
312
313 evsel->precise_max = true;
314
315
316 if (asprintf(&evsel->name, "cycles%s%s%.*s",
317 (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
318 attr.exclude_kernel ? "u" : "",
319 attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0)
320 goto error_free;
321out:
322 return evsel;
323error_free:
324 evsel__delete(evsel);
325 evsel = NULL;
326 goto out;
327}
328
329int copy_config_terms(struct list_head *dst, struct list_head *src)
330{
331 struct evsel_config_term *pos, *tmp;
332
333 list_for_each_entry(pos, src, list) {
334 tmp = malloc(sizeof(*tmp));
335 if (tmp == NULL)
336 return -ENOMEM;
337
338 *tmp = *pos;
339 if (tmp->free_str) {
340 tmp->val.str = strdup(pos->val.str);
341 if (tmp->val.str == NULL) {
342 free(tmp);
343 return -ENOMEM;
344 }
345 }
346 list_add_tail(&tmp->list, dst);
347 }
348 return 0;
349}
350
351static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
352{
353 return copy_config_terms(&dst->config_terms, &src->config_terms);
354}
355
356
357
358
359
360
361
362
363struct evsel *evsel__clone(struct evsel *orig)
364{
365 struct evsel *evsel;
366
367 BUG_ON(orig->core.fd);
368 BUG_ON(orig->counts);
369 BUG_ON(orig->priv);
370 BUG_ON(orig->per_pkg_mask);
371
372
373 if (orig->bpf_obj)
374 return NULL;
375
376 evsel = evsel__new(&orig->core.attr);
377 if (evsel == NULL)
378 return NULL;
379
380 evsel->core.cpus = perf_cpu_map__get(orig->core.cpus);
381 evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus);
382 evsel->core.threads = perf_thread_map__get(orig->core.threads);
383 evsel->core.nr_members = orig->core.nr_members;
384 evsel->core.system_wide = orig->core.system_wide;
385
386 if (orig->name) {
387 evsel->name = strdup(orig->name);
388 if (evsel->name == NULL)
389 goto out_err;
390 }
391 if (orig->group_name) {
392 evsel->group_name = strdup(orig->group_name);
393 if (evsel->group_name == NULL)
394 goto out_err;
395 }
396 if (orig->pmu_name) {
397 evsel->pmu_name = strdup(orig->pmu_name);
398 if (evsel->pmu_name == NULL)
399 goto out_err;
400 }
401 if (orig->filter) {
402 evsel->filter = strdup(orig->filter);
403 if (evsel->filter == NULL)
404 goto out_err;
405 }
406 if (orig->metric_id) {
407 evsel->metric_id = strdup(orig->metric_id);
408 if (evsel->metric_id == NULL)
409 goto out_err;
410 }
411 evsel->cgrp = cgroup__get(orig->cgrp);
412 evsel->tp_format = orig->tp_format;
413 evsel->handler = orig->handler;
414 evsel->core.leader = orig->core.leader;
415
416 evsel->max_events = orig->max_events;
417 evsel->tool_event = orig->tool_event;
418 free((char *)evsel->unit);
419 evsel->unit = strdup(orig->unit);
420 if (evsel->unit == NULL)
421 goto out_err;
422
423 evsel->scale = orig->scale;
424 evsel->snapshot = orig->snapshot;
425 evsel->per_pkg = orig->per_pkg;
426 evsel->percore = orig->percore;
427 evsel->precise_max = orig->precise_max;
428 evsel->use_uncore_alias = orig->use_uncore_alias;
429 evsel->is_libpfm_event = orig->is_libpfm_event;
430
431 evsel->exclude_GH = orig->exclude_GH;
432 evsel->sample_read = orig->sample_read;
433 evsel->auto_merge_stats = orig->auto_merge_stats;
434 evsel->collect_stat = orig->collect_stat;
435 evsel->weak_group = orig->weak_group;
436 evsel->use_config_name = orig->use_config_name;
437
438 if (evsel__copy_config_terms(evsel, orig) < 0)
439 goto out_err;
440
441 return evsel;
442
443out_err:
444 evsel__delete(evsel);
445 return NULL;
446}
447
448
449
450
451struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx)
452{
453 struct evsel *evsel = zalloc(perf_evsel__object.size);
454 int err = -ENOMEM;
455
456 if (evsel == NULL) {
457 goto out_err;
458 } else {
459 struct perf_event_attr attr = {
460 .type = PERF_TYPE_TRACEPOINT,
461 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
462 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
463 };
464
465 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
466 goto out_free;
467
468 evsel->tp_format = trace_event__tp_format(sys, name);
469 if (IS_ERR(evsel->tp_format)) {
470 err = PTR_ERR(evsel->tp_format);
471 goto out_free;
472 }
473
474 event_attr_init(&attr);
475 attr.config = evsel->tp_format->id;
476 attr.sample_period = 1;
477 evsel__init(evsel, &attr, idx);
478 }
479
480 return evsel;
481
482out_free:
483 zfree(&evsel->name);
484 free(evsel);
485out_err:
486 return ERR_PTR(err);
487}
488
489const char *evsel__hw_names[PERF_COUNT_HW_MAX] = {
490 "cycles",
491 "instructions",
492 "cache-references",
493 "cache-misses",
494 "branches",
495 "branch-misses",
496 "bus-cycles",
497 "stalled-cycles-frontend",
498 "stalled-cycles-backend",
499 "ref-cycles",
500};
501
502char *evsel__bpf_counter_events;
503
504bool evsel__match_bpf_counter_events(const char *name)
505{
506 int name_len;
507 bool match;
508 char *ptr;
509
510 if (!evsel__bpf_counter_events)
511 return false;
512
513 ptr = strstr(evsel__bpf_counter_events, name);
514 name_len = strlen(name);
515
516
517 match = (ptr != NULL) &&
518 ((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) &&
519 ((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0'));
520
521 return match;
522}
523
524static const char *__evsel__hw_name(u64 config)
525{
526 if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config])
527 return evsel__hw_names[config];
528
529 return "unknown-hardware";
530}
531
532static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
533{
534 int colon = 0, r = 0;
535 struct perf_event_attr *attr = &evsel->core.attr;
536 bool exclude_guest_default = false;
537
538#define MOD_PRINT(context, mod) do { \
539 if (!attr->exclude_##context) { \
540 if (!colon) colon = ++r; \
541 r += scnprintf(bf + r, size - r, "%c", mod); \
542 } } while(0)
543
544 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
545 MOD_PRINT(kernel, 'k');
546 MOD_PRINT(user, 'u');
547 MOD_PRINT(hv, 'h');
548 exclude_guest_default = true;
549 }
550
551 if (attr->precise_ip) {
552 if (!colon)
553 colon = ++r;
554 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
555 exclude_guest_default = true;
556 }
557
558 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
559 MOD_PRINT(host, 'H');
560 MOD_PRINT(guest, 'G');
561 }
562#undef MOD_PRINT
563 if (colon)
564 bf[colon - 1] = ':';
565 return r;
566}
567
568static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
569{
570 int r = scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config));
571 return r + evsel__add_modifiers(evsel, bf + r, size - r);
572}
573
574const char *evsel__sw_names[PERF_COUNT_SW_MAX] = {
575 "cpu-clock",
576 "task-clock",
577 "page-faults",
578 "context-switches",
579 "cpu-migrations",
580 "minor-faults",
581 "major-faults",
582 "alignment-faults",
583 "emulation-faults",
584 "dummy",
585};
586
587static const char *__evsel__sw_name(u64 config)
588{
589 if (config < PERF_COUNT_SW_MAX && evsel__sw_names[config])
590 return evsel__sw_names[config];
591 return "unknown-software";
592}
593
594static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size)
595{
596 int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config));
597 return r + evsel__add_modifiers(evsel, bf + r, size - r);
598}
599
600static int __evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
601{
602 int r;
603
604 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
605
606 if (type & HW_BREAKPOINT_R)
607 r += scnprintf(bf + r, size - r, "r");
608
609 if (type & HW_BREAKPOINT_W)
610 r += scnprintf(bf + r, size - r, "w");
611
612 if (type & HW_BREAKPOINT_X)
613 r += scnprintf(bf + r, size - r, "x");
614
615 return r;
616}
617
618static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size)
619{
620 struct perf_event_attr *attr = &evsel->core.attr;
621 int r = __evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
622 return r + evsel__add_modifiers(evsel, bf + r, size - r);
623}
624
625const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = {
626 { "L1-dcache", "l1-d", "l1d", "L1-data", },
627 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
628 { "LLC", "L2", },
629 { "dTLB", "d-tlb", "Data-TLB", },
630 { "iTLB", "i-tlb", "Instruction-TLB", },
631 { "branch", "branches", "bpu", "btb", "bpc", },
632 { "node", },
633};
634
635const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = {
636 { "load", "loads", "read", },
637 { "store", "stores", "write", },
638 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
639};
640
641const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = {
642 { "refs", "Reference", "ops", "access", },
643 { "misses", "miss", },
644};
645
646#define C(x) PERF_COUNT_HW_CACHE_##x
647#define CACHE_READ (1 << C(OP_READ))
648#define CACHE_WRITE (1 << C(OP_WRITE))
649#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
650#define COP(x) (1 << x)
651
652
653
654
655
656
657static unsigned long evsel__hw_cache_stat[C(MAX)] = {
658 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
659 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
660 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
661 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
662 [C(ITLB)] = (CACHE_READ),
663 [C(BPU)] = (CACHE_READ),
664 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
665};
666
667bool evsel__is_cache_op_valid(u8 type, u8 op)
668{
669 if (evsel__hw_cache_stat[type] & COP(op))
670 return true;
671 else
672 return false;
673}
674
675int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size)
676{
677 if (result) {
678 return scnprintf(bf, size, "%s-%s-%s", evsel__hw_cache[type][0],
679 evsel__hw_cache_op[op][0],
680 evsel__hw_cache_result[result][0]);
681 }
682
683 return scnprintf(bf, size, "%s-%s", evsel__hw_cache[type][0],
684 evsel__hw_cache_op[op][1]);
685}
686
687static int __evsel__hw_cache_name(u64 config, char *bf, size_t size)
688{
689 u8 op, result, type = (config >> 0) & 0xff;
690 const char *err = "unknown-ext-hardware-cache-type";
691
692 if (type >= PERF_COUNT_HW_CACHE_MAX)
693 goto out_err;
694
695 op = (config >> 8) & 0xff;
696 err = "unknown-ext-hardware-cache-op";
697 if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
698 goto out_err;
699
700 result = (config >> 16) & 0xff;
701 err = "unknown-ext-hardware-cache-result";
702 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
703 goto out_err;
704
705 err = "invalid-cache";
706 if (!evsel__is_cache_op_valid(type, op))
707 goto out_err;
708
709 return __evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
710out_err:
711 return scnprintf(bf, size, "%s", err);
712}
713
714static int evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size)
715{
716 int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size);
717 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
718}
719
720static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size)
721{
722 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config);
723 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
724}
725
726static int evsel__tool_name(char *bf, size_t size)
727{
728 int ret = scnprintf(bf, size, "duration_time");
729 return ret;
730}
731
732const char *evsel__name(struct evsel *evsel)
733{
734 char bf[128];
735
736 if (!evsel)
737 goto out_unknown;
738
739 if (evsel->name)
740 return evsel->name;
741
742 switch (evsel->core.attr.type) {
743 case PERF_TYPE_RAW:
744 evsel__raw_name(evsel, bf, sizeof(bf));
745 break;
746
747 case PERF_TYPE_HARDWARE:
748 evsel__hw_name(evsel, bf, sizeof(bf));
749 break;
750
751 case PERF_TYPE_HW_CACHE:
752 evsel__hw_cache_name(evsel, bf, sizeof(bf));
753 break;
754
755 case PERF_TYPE_SOFTWARE:
756 if (evsel->tool_event)
757 evsel__tool_name(bf, sizeof(bf));
758 else
759 evsel__sw_name(evsel, bf, sizeof(bf));
760 break;
761
762 case PERF_TYPE_TRACEPOINT:
763 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
764 break;
765
766 case PERF_TYPE_BREAKPOINT:
767 evsel__bp_name(evsel, bf, sizeof(bf));
768 break;
769
770 default:
771 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
772 evsel->core.attr.type);
773 break;
774 }
775
776 evsel->name = strdup(bf);
777
778 if (evsel->name)
779 return evsel->name;
780out_unknown:
781 return "unknown";
782}
783
784const char *evsel__metric_id(const struct evsel *evsel)
785{
786 if (evsel->metric_id)
787 return evsel->metric_id;
788
789 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && evsel->tool_event)
790 return "duration_time";
791
792 return "unknown";
793}
794
795const char *evsel__group_name(struct evsel *evsel)
796{
797 return evsel->group_name ?: "anon group";
798}
799
800
801
802
803
804
805
806
807
808
809
810int evsel__group_desc(struct evsel *evsel, char *buf, size_t size)
811{
812 int ret = 0;
813 struct evsel *pos;
814 const char *group_name = evsel__group_name(evsel);
815
816 if (!evsel->forced_leader)
817 ret = scnprintf(buf, size, "%s { ", group_name);
818
819 ret += scnprintf(buf + ret, size - ret, "%s", evsel__name(evsel));
820
821 for_each_group_member(pos, evsel)
822 ret += scnprintf(buf + ret, size - ret, ", %s", evsel__name(pos));
823
824 if (!evsel->forced_leader)
825 ret += scnprintf(buf + ret, size - ret, " }");
826
827 return ret;
828}
829
830static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
831 struct callchain_param *param)
832{
833 bool function = evsel__is_function_event(evsel);
834 struct perf_event_attr *attr = &evsel->core.attr;
835
836 evsel__set_sample_bit(evsel, CALLCHAIN);
837
838 attr->sample_max_stack = param->max_stack;
839
840 if (opts->kernel_callchains)
841 attr->exclude_callchain_user = 1;
842 if (opts->user_callchains)
843 attr->exclude_callchain_kernel = 1;
844 if (param->record_mode == CALLCHAIN_LBR) {
845 if (!opts->branch_stack) {
846 if (attr->exclude_user) {
847 pr_warning("LBR callstack option is only available "
848 "to get user callchain information. "
849 "Falling back to framepointers.\n");
850 } else {
851 evsel__set_sample_bit(evsel, BRANCH_STACK);
852 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
853 PERF_SAMPLE_BRANCH_CALL_STACK |
854 PERF_SAMPLE_BRANCH_NO_CYCLES |
855 PERF_SAMPLE_BRANCH_NO_FLAGS |
856 PERF_SAMPLE_BRANCH_HW_INDEX;
857 }
858 } else
859 pr_warning("Cannot use LBR callstack with branch stack. "
860 "Falling back to framepointers.\n");
861 }
862
863 if (param->record_mode == CALLCHAIN_DWARF) {
864 if (!function) {
865 evsel__set_sample_bit(evsel, REGS_USER);
866 evsel__set_sample_bit(evsel, STACK_USER);
867 if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) {
868 attr->sample_regs_user |= DWARF_MINIMAL_REGS;
869 pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
870 "specifying a subset with --user-regs may render DWARF unwinding unreliable, "
871 "so the minimal registers set (IP, SP) is explicitly forced.\n");
872 } else {
873 attr->sample_regs_user |= PERF_REGS_MASK;
874 }
875 attr->sample_stack_user = param->dump_size;
876 attr->exclude_callchain_user = 1;
877 } else {
878 pr_info("Cannot use DWARF unwind for function trace event,"
879 " falling back to framepointers.\n");
880 }
881 }
882
883 if (function) {
884 pr_info("Disabling user space callchains for function trace event.\n");
885 attr->exclude_callchain_user = 1;
886 }
887}
888
889void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
890 struct callchain_param *param)
891{
892 if (param->enabled)
893 return __evsel__config_callchain(evsel, opts, param);
894}
895
896static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param)
897{
898 struct perf_event_attr *attr = &evsel->core.attr;
899
900 evsel__reset_sample_bit(evsel, CALLCHAIN);
901 if (param->record_mode == CALLCHAIN_LBR) {
902 evsel__reset_sample_bit(evsel, BRANCH_STACK);
903 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
904 PERF_SAMPLE_BRANCH_CALL_STACK |
905 PERF_SAMPLE_BRANCH_HW_INDEX);
906 }
907 if (param->record_mode == CALLCHAIN_DWARF) {
908 evsel__reset_sample_bit(evsel, REGS_USER);
909 evsel__reset_sample_bit(evsel, STACK_USER);
910 }
911}
912
913static void evsel__apply_config_terms(struct evsel *evsel,
914 struct record_opts *opts, bool track)
915{
916 struct evsel_config_term *term;
917 struct list_head *config_terms = &evsel->config_terms;
918 struct perf_event_attr *attr = &evsel->core.attr;
919
920 struct callchain_param param = {
921 .record_mode = callchain_param.record_mode,
922 };
923 u32 dump_size = 0;
924 int max_stack = 0;
925 const char *callgraph_buf = NULL;
926
927 list_for_each_entry(term, config_terms, list) {
928 switch (term->type) {
929 case EVSEL__CONFIG_TERM_PERIOD:
930 if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
931 attr->sample_period = term->val.period;
932 attr->freq = 0;
933 evsel__reset_sample_bit(evsel, PERIOD);
934 }
935 break;
936 case EVSEL__CONFIG_TERM_FREQ:
937 if (!(term->weak && opts->user_freq != UINT_MAX)) {
938 attr->sample_freq = term->val.freq;
939 attr->freq = 1;
940 evsel__set_sample_bit(evsel, PERIOD);
941 }
942 break;
943 case EVSEL__CONFIG_TERM_TIME:
944 if (term->val.time)
945 evsel__set_sample_bit(evsel, TIME);
946 else
947 evsel__reset_sample_bit(evsel, TIME);
948 break;
949 case EVSEL__CONFIG_TERM_CALLGRAPH:
950 callgraph_buf = term->val.str;
951 break;
952 case EVSEL__CONFIG_TERM_BRANCH:
953 if (term->val.str && strcmp(term->val.str, "no")) {
954 evsel__set_sample_bit(evsel, BRANCH_STACK);
955 parse_branch_str(term->val.str,
956 &attr->branch_sample_type);
957 } else
958 evsel__reset_sample_bit(evsel, BRANCH_STACK);
959 break;
960 case EVSEL__CONFIG_TERM_STACK_USER:
961 dump_size = term->val.stack_user;
962 break;
963 case EVSEL__CONFIG_TERM_MAX_STACK:
964 max_stack = term->val.max_stack;
965 break;
966 case EVSEL__CONFIG_TERM_MAX_EVENTS:
967 evsel->max_events = term->val.max_events;
968 break;
969 case EVSEL__CONFIG_TERM_INHERIT:
970
971
972
973
974
975
976 attr->inherit = term->val.inherit ? 1 : 0;
977 break;
978 case EVSEL__CONFIG_TERM_OVERWRITE:
979 attr->write_backward = term->val.overwrite ? 1 : 0;
980 break;
981 case EVSEL__CONFIG_TERM_DRV_CFG:
982 break;
983 case EVSEL__CONFIG_TERM_PERCORE:
984 break;
985 case EVSEL__CONFIG_TERM_AUX_OUTPUT:
986 attr->aux_output = term->val.aux_output ? 1 : 0;
987 break;
988 case EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE:
989
990 break;
991 case EVSEL__CONFIG_TERM_CFG_CHG:
992 break;
993 default:
994 break;
995 }
996 }
997
998
999 if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
1000 bool sample_address = false;
1001
1002 if (max_stack) {
1003 param.max_stack = max_stack;
1004 if (callgraph_buf == NULL)
1005 callgraph_buf = "fp";
1006 }
1007
1008
1009 if (callgraph_buf != NULL) {
1010 if (!strcmp(callgraph_buf, "no")) {
1011 param.enabled = false;
1012 param.record_mode = CALLCHAIN_NONE;
1013 } else {
1014 param.enabled = true;
1015 if (parse_callchain_record(callgraph_buf, ¶m)) {
1016 pr_err("per-event callgraph setting for %s failed. "
1017 "Apply callgraph global setting for it\n",
1018 evsel->name);
1019 return;
1020 }
1021 if (param.record_mode == CALLCHAIN_DWARF)
1022 sample_address = true;
1023 }
1024 }
1025 if (dump_size > 0) {
1026 dump_size = round_up(dump_size, sizeof(u64));
1027 param.dump_size = dump_size;
1028 }
1029
1030
1031 if (callchain_param.enabled)
1032 evsel__reset_callgraph(evsel, &callchain_param);
1033
1034
1035 if (param.enabled) {
1036 if (sample_address) {
1037 evsel__set_sample_bit(evsel, ADDR);
1038 evsel__set_sample_bit(evsel, DATA_SRC);
1039 evsel->core.attr.mmap_data = track;
1040 }
1041 evsel__config_callchain(evsel, opts, ¶m);
1042 }
1043 }
1044}
1045
1046struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type)
1047{
1048 struct evsel_config_term *term, *found_term = NULL;
1049
1050 list_for_each_entry(term, &evsel->config_terms, list) {
1051 if (term->type == type)
1052 found_term = term;
1053 }
1054
1055 return found_term;
1056}
1057
1058void __weak arch_evsel__set_sample_weight(struct evsel *evsel)
1059{
1060 evsel__set_sample_bit(evsel, WEIGHT);
1061}
1062
1063void __weak arch_evsel__fixup_new_cycles(struct perf_event_attr *attr __maybe_unused)
1064{
1065}
1066
1067static void evsel__set_default_freq_period(struct record_opts *opts,
1068 struct perf_event_attr *attr)
1069{
1070 if (opts->freq) {
1071 attr->freq = 1;
1072 attr->sample_freq = opts->freq;
1073 } else {
1074 attr->sample_period = opts->default_interval;
1075 }
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106void evsel__config(struct evsel *evsel, struct record_opts *opts,
1107 struct callchain_param *callchain)
1108{
1109 struct evsel *leader = evsel__leader(evsel);
1110 struct perf_event_attr *attr = &evsel->core.attr;
1111 int track = evsel->tracking;
1112 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
1113
1114 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
1115 attr->inherit = !opts->no_inherit;
1116 attr->write_backward = opts->overwrite ? 1 : 0;
1117
1118 evsel__set_sample_bit(evsel, IP);
1119 evsel__set_sample_bit(evsel, TID);
1120
1121 if (evsel->sample_read) {
1122 evsel__set_sample_bit(evsel, READ);
1123
1124
1125
1126
1127
1128 evsel__set_sample_id(evsel, false);
1129
1130
1131
1132
1133
1134 if (leader->core.nr_members > 1) {
1135 attr->read_format |= PERF_FORMAT_GROUP;
1136 attr->inherit = 0;
1137 }
1138 }
1139
1140
1141
1142
1143
1144 if ((evsel->is_libpfm_event && !attr->sample_period) ||
1145 (!evsel->is_libpfm_event && (!attr->sample_period ||
1146 opts->user_freq != UINT_MAX ||
1147 opts->user_interval != ULLONG_MAX)))
1148 evsel__set_default_freq_period(opts, attr);
1149
1150
1151
1152
1153
1154 if (attr->freq)
1155 evsel__set_sample_bit(evsel, PERIOD);
1156
1157 if (opts->no_samples)
1158 attr->sample_freq = 0;
1159
1160 if (opts->inherit_stat) {
1161 evsel->core.attr.read_format |=
1162 PERF_FORMAT_TOTAL_TIME_ENABLED |
1163 PERF_FORMAT_TOTAL_TIME_RUNNING |
1164 PERF_FORMAT_ID;
1165 attr->inherit_stat = 1;
1166 }
1167
1168 if (opts->sample_address) {
1169 evsel__set_sample_bit(evsel, ADDR);
1170 attr->mmap_data = track;
1171 }
1172
1173
1174
1175
1176
1177
1178 if (evsel__is_function_event(evsel))
1179 evsel->core.attr.exclude_callchain_user = 1;
1180
1181 if (callchain && callchain->enabled && !evsel->no_aux_samples)
1182 evsel__config_callchain(evsel, opts, callchain);
1183
1184 if (opts->sample_intr_regs && !evsel->no_aux_samples &&
1185 !evsel__is_dummy_event(evsel)) {
1186 attr->sample_regs_intr = opts->sample_intr_regs;
1187 evsel__set_sample_bit(evsel, REGS_INTR);
1188 }
1189
1190 if (opts->sample_user_regs && !evsel->no_aux_samples &&
1191 !evsel__is_dummy_event(evsel)) {
1192 attr->sample_regs_user |= opts->sample_user_regs;
1193 evsel__set_sample_bit(evsel, REGS_USER);
1194 }
1195
1196 if (target__has_cpu(&opts->target) || opts->sample_cpu)
1197 evsel__set_sample_bit(evsel, CPU);
1198
1199
1200
1201
1202 if (opts->sample_time &&
1203 (!perf_missing_features.sample_id_all &&
1204 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
1205 opts->sample_time_set)))
1206 evsel__set_sample_bit(evsel, TIME);
1207
1208 if (opts->raw_samples && !evsel->no_aux_samples) {
1209 evsel__set_sample_bit(evsel, TIME);
1210 evsel__set_sample_bit(evsel, RAW);
1211 evsel__set_sample_bit(evsel, CPU);
1212 }
1213
1214 if (opts->sample_address)
1215 evsel__set_sample_bit(evsel, DATA_SRC);
1216
1217 if (opts->sample_phys_addr)
1218 evsel__set_sample_bit(evsel, PHYS_ADDR);
1219
1220 if (opts->no_buffering) {
1221 attr->watermark = 0;
1222 attr->wakeup_events = 1;
1223 }
1224 if (opts->branch_stack && !evsel->no_aux_samples) {
1225 evsel__set_sample_bit(evsel, BRANCH_STACK);
1226 attr->branch_sample_type = opts->branch_stack;
1227 }
1228
1229 if (opts->sample_weight)
1230 arch_evsel__set_sample_weight(evsel);
1231
1232 attr->task = track;
1233 attr->mmap = track;
1234 attr->mmap2 = track && !perf_missing_features.mmap2;
1235 attr->comm = track;
1236 attr->build_id = track && opts->build_id;
1237
1238
1239
1240
1241
1242 if (!opts->text_poke)
1243 attr->ksymbol = track && !perf_missing_features.ksymbol;
1244 attr->bpf_event = track && !opts->no_bpf_event && !perf_missing_features.bpf;
1245
1246 if (opts->record_namespaces)
1247 attr->namespaces = track;
1248
1249 if (opts->record_cgroup) {
1250 attr->cgroup = track && !perf_missing_features.cgroup;
1251 evsel__set_sample_bit(evsel, CGROUP);
1252 }
1253
1254 if (opts->sample_data_page_size)
1255 evsel__set_sample_bit(evsel, DATA_PAGE_SIZE);
1256
1257 if (opts->sample_code_page_size)
1258 evsel__set_sample_bit(evsel, CODE_PAGE_SIZE);
1259
1260 if (opts->record_switch_events)
1261 attr->context_switch = track;
1262
1263 if (opts->sample_transaction)
1264 evsel__set_sample_bit(evsel, TRANSACTION);
1265
1266 if (opts->running_time) {
1267 evsel->core.attr.read_format |=
1268 PERF_FORMAT_TOTAL_TIME_ENABLED |
1269 PERF_FORMAT_TOTAL_TIME_RUNNING;
1270 }
1271
1272
1273
1274
1275
1276
1277
1278 if (evsel__is_group_leader(evsel))
1279 attr->disabled = 1;
1280
1281
1282
1283
1284
1285 if (target__none(&opts->target) && evsel__is_group_leader(evsel) &&
1286 !opts->initial_delay)
1287 attr->enable_on_exec = 1;
1288
1289 if (evsel->immediate) {
1290 attr->disabled = 0;
1291 attr->enable_on_exec = 0;
1292 }
1293
1294 clockid = opts->clockid;
1295 if (opts->use_clockid) {
1296 attr->use_clockid = 1;
1297 attr->clockid = opts->clockid;
1298 }
1299
1300 if (evsel->precise_max)
1301 attr->precise_ip = 3;
1302
1303 if (opts->all_user) {
1304 attr->exclude_kernel = 1;
1305 attr->exclude_user = 0;
1306 }
1307
1308 if (opts->all_kernel) {
1309 attr->exclude_kernel = 0;
1310 attr->exclude_user = 1;
1311 }
1312
1313 if (evsel->core.own_cpus || evsel->unit)
1314 evsel->core.attr.read_format |= PERF_FORMAT_ID;
1315
1316
1317
1318
1319
1320 evsel__apply_config_terms(evsel, opts, track);
1321
1322 evsel->ignore_missing_thread = opts->ignore_missing_thread;
1323
1324
1325 if (opts->period_set) {
1326 if (opts->period)
1327 evsel__set_sample_bit(evsel, PERIOD);
1328 else
1329 evsel__reset_sample_bit(evsel, PERIOD);
1330 }
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340 if (evsel__is_dummy_event(evsel))
1341 evsel__reset_sample_bit(evsel, BRANCH_STACK);
1342}
1343
1344int evsel__set_filter(struct evsel *evsel, const char *filter)
1345{
1346 char *new_filter = strdup(filter);
1347
1348 if (new_filter != NULL) {
1349 free(evsel->filter);
1350 evsel->filter = new_filter;
1351 return 0;
1352 }
1353
1354 return -1;
1355}
1356
1357static int evsel__append_filter(struct evsel *evsel, const char *fmt, const char *filter)
1358{
1359 char *new_filter;
1360
1361 if (evsel->filter == NULL)
1362 return evsel__set_filter(evsel, filter);
1363
1364 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) {
1365 free(evsel->filter);
1366 evsel->filter = new_filter;
1367 return 0;
1368 }
1369
1370 return -1;
1371}
1372
1373int evsel__append_tp_filter(struct evsel *evsel, const char *filter)
1374{
1375 return evsel__append_filter(evsel, "(%s) && (%s)", filter);
1376}
1377
1378int evsel__append_addr_filter(struct evsel *evsel, const char *filter)
1379{
1380 return evsel__append_filter(evsel, "%s,%s", filter);
1381}
1382
1383
1384int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx)
1385{
1386 return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx);
1387}
1388
1389int evsel__enable(struct evsel *evsel)
1390{
1391 int err = perf_evsel__enable(&evsel->core);
1392
1393 if (!err)
1394 evsel->disabled = false;
1395 return err;
1396}
1397
1398
1399int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx)
1400{
1401 return perf_evsel__disable_cpu(&evsel->core, cpu_map_idx);
1402}
1403
1404int evsel__disable(struct evsel *evsel)
1405{
1406 int err = perf_evsel__disable(&evsel->core);
1407
1408
1409
1410
1411
1412
1413 if (!err)
1414 evsel->disabled = true;
1415
1416 return err;
1417}
1418
1419void free_config_terms(struct list_head *config_terms)
1420{
1421 struct evsel_config_term *term, *h;
1422
1423 list_for_each_entry_safe(term, h, config_terms, list) {
1424 list_del_init(&term->list);
1425 if (term->free_str)
1426 zfree(&term->val.str);
1427 free(term);
1428 }
1429}
1430
1431static void evsel__free_config_terms(struct evsel *evsel)
1432{
1433 free_config_terms(&evsel->config_terms);
1434}
1435
1436void evsel__exit(struct evsel *evsel)
1437{
1438 assert(list_empty(&evsel->core.node));
1439 assert(evsel->evlist == NULL);
1440 bpf_counter__destroy(evsel);
1441 evsel__free_counts(evsel);
1442 perf_evsel__free_fd(&evsel->core);
1443 perf_evsel__free_id(&evsel->core);
1444 evsel__free_config_terms(evsel);
1445 cgroup__put(evsel->cgrp);
1446 perf_cpu_map__put(evsel->core.cpus);
1447 perf_cpu_map__put(evsel->core.own_cpus);
1448 perf_thread_map__put(evsel->core.threads);
1449 zfree(&evsel->group_name);
1450 zfree(&evsel->name);
1451 zfree(&evsel->pmu_name);
1452 zfree(&evsel->unit);
1453 zfree(&evsel->metric_id);
1454 evsel__zero_per_pkg(evsel);
1455 hashmap__free(evsel->per_pkg_mask);
1456 evsel->per_pkg_mask = NULL;
1457 zfree(&evsel->metric_events);
1458 perf_evsel__object.fini(evsel);
1459}
1460
1461void evsel__delete(struct evsel *evsel)
1462{
1463 evsel__exit(evsel);
1464 free(evsel);
1465}
1466
1467void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread,
1468 struct perf_counts_values *count)
1469{
1470 struct perf_counts_values tmp;
1471
1472 if (!evsel->prev_raw_counts)
1473 return;
1474
1475 if (cpu_map_idx == -1) {
1476 tmp = evsel->prev_raw_counts->aggr;
1477 evsel->prev_raw_counts->aggr = *count;
1478 } else {
1479 tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
1480 *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
1481 }
1482
1483 count->val = count->val - tmp.val;
1484 count->ena = count->ena - tmp.ena;
1485 count->run = count->run - tmp.run;
1486}
1487
1488static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
1489{
1490 struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread);
1491
1492 return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count);
1493}
1494
1495static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
1496 u64 val, u64 ena, u64 run)
1497{
1498 struct perf_counts_values *count;
1499
1500 count = perf_counts(counter->counts, cpu_map_idx, thread);
1501
1502 count->val = val;
1503 count->ena = ena;
1504 count->run = run;
1505
1506 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
1507}
1508
1509static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data)
1510{
1511 u64 read_format = leader->core.attr.read_format;
1512 struct sample_read_value *v;
1513 u64 nr, ena = 0, run = 0, i;
1514
1515 nr = *data++;
1516
1517 if (nr != (u64) leader->core.nr_members)
1518 return -EINVAL;
1519
1520 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1521 ena = *data++;
1522
1523 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1524 run = *data++;
1525
1526 v = (struct sample_read_value *) data;
1527
1528 evsel__set_count(leader, cpu_map_idx, thread, v[0].value, ena, run);
1529
1530 for (i = 1; i < nr; i++) {
1531 struct evsel *counter;
1532
1533 counter = evlist__id2evsel(leader->evlist, v[i].id);
1534 if (!counter)
1535 return -EINVAL;
1536
1537 evsel__set_count(counter, cpu_map_idx, thread, v[i].value, ena, run);
1538 }
1539
1540 return 0;
1541}
1542
1543static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread)
1544{
1545 struct perf_stat_evsel *ps = leader->stats;
1546 u64 read_format = leader->core.attr.read_format;
1547 int size = perf_evsel__read_size(&leader->core);
1548 u64 *data = ps->group_data;
1549
1550 if (!(read_format & PERF_FORMAT_ID))
1551 return -EINVAL;
1552
1553 if (!evsel__is_group_leader(leader))
1554 return -EINVAL;
1555
1556 if (!data) {
1557 data = zalloc(size);
1558 if (!data)
1559 return -ENOMEM;
1560
1561 ps->group_data = data;
1562 }
1563
1564 if (FD(leader, cpu_map_idx, thread) < 0)
1565 return -EINVAL;
1566
1567 if (readn(FD(leader, cpu_map_idx, thread), data, size) <= 0)
1568 return -errno;
1569
1570 return evsel__process_group_data(leader, cpu_map_idx, thread, data);
1571}
1572
1573int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
1574{
1575 u64 read_format = evsel->core.attr.read_format;
1576
1577 if (read_format & PERF_FORMAT_GROUP)
1578 return evsel__read_group(evsel, cpu_map_idx, thread);
1579
1580 return evsel__read_one(evsel, cpu_map_idx, thread);
1581}
1582
1583int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale)
1584{
1585 struct perf_counts_values count;
1586 size_t nv = scale ? 3 : 1;
1587
1588 if (FD(evsel, cpu_map_idx, thread) < 0)
1589 return -EINVAL;
1590
1591 if (evsel->counts == NULL && evsel__alloc_counts(evsel) < 0)
1592 return -ENOMEM;
1593
1594 if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0)
1595 return -errno;
1596
1597 evsel__compute_deltas(evsel, cpu_map_idx, thread, &count);
1598 perf_counts_values__scale(&count, scale, NULL);
1599 *perf_counts(evsel->counts, cpu_map_idx, thread) = count;
1600 return 0;
1601}
1602
1603static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other,
1604 int cpu_map_idx)
1605{
1606 struct perf_cpu cpu;
1607
1608 cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
1609 return perf_cpu_map__idx(other->core.cpus, cpu);
1610}
1611
1612static int evsel__hybrid_group_cpu_map_idx(struct evsel *evsel, int cpu_map_idx)
1613{
1614 struct evsel *leader = evsel__leader(evsel);
1615
1616 if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) ||
1617 (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) {
1618 return evsel__match_other_cpu(evsel, leader, cpu_map_idx);
1619 }
1620
1621 return cpu_map_idx;
1622}
1623
1624static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread)
1625{
1626 struct evsel *leader = evsel__leader(evsel);
1627 int fd;
1628
1629 if (evsel__is_group_leader(evsel))
1630 return -1;
1631
1632
1633
1634
1635
1636 BUG_ON(!leader->core.fd);
1637
1638 cpu_map_idx = evsel__hybrid_group_cpu_map_idx(evsel, cpu_map_idx);
1639 if (cpu_map_idx == -1)
1640 return -1;
1641
1642 fd = FD(leader, cpu_map_idx, thread);
1643 BUG_ON(fd == -1);
1644
1645 return fd;
1646}
1647
1648static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx)
1649{
1650 for (int cpu = 0; cpu < nr_cpus; cpu++)
1651 for (int thread = thread_idx; thread < nr_threads - 1; thread++)
1652 FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
1653}
1654
1655static int update_fds(struct evsel *evsel,
1656 int nr_cpus, int cpu_map_idx,
1657 int nr_threads, int thread_idx)
1658{
1659 struct evsel *pos;
1660
1661 if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads)
1662 return -EINVAL;
1663
1664 evlist__for_each_entry(evsel->evlist, pos) {
1665 nr_cpus = pos != evsel ? nr_cpus : cpu_map_idx;
1666
1667 evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
1668
1669
1670
1671
1672
1673 if (pos == evsel)
1674 break;
1675 }
1676 return 0;
1677}
1678
1679static bool evsel__ignore_missing_thread(struct evsel *evsel,
1680 int nr_cpus, int cpu_map_idx,
1681 struct perf_thread_map *threads,
1682 int thread, int err)
1683{
1684 pid_t ignore_pid = perf_thread_map__pid(threads, thread);
1685
1686 if (!evsel->ignore_missing_thread)
1687 return false;
1688
1689
1690 if (evsel->core.system_wide)
1691 return false;
1692
1693
1694 if (err != -ESRCH)
1695 return false;
1696
1697
1698 if (threads->nr == 1)
1699 return false;
1700
1701
1702
1703
1704
1705 if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread))
1706 return false;
1707
1708 if (thread_map__remove(threads, thread))
1709 return false;
1710
1711 pr_warning("WARNING: Ignored open failure for pid %d\n",
1712 ignore_pid);
1713 return true;
1714}
1715
1716static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1717 void *priv __maybe_unused)
1718{
1719 return fprintf(fp, " %-32s %s\n", name, val);
1720}
1721
1722static void display_attr(struct perf_event_attr *attr)
1723{
1724 if (verbose >= 2 || debug_peo_args) {
1725 fprintf(stderr, "%.60s\n", graph_dotted_line);
1726 fprintf(stderr, "perf_event_attr:\n");
1727 perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
1728 fprintf(stderr, "%.60s\n", graph_dotted_line);
1729 }
1730}
1731
1732bool evsel__precise_ip_fallback(struct evsel *evsel)
1733{
1734
1735 if (!evsel->precise_max)
1736 return false;
1737
1738
1739
1740
1741
1742 if (!evsel->core.attr.precise_ip) {
1743 evsel->core.attr.precise_ip = evsel->precise_ip_original;
1744 return false;
1745 }
1746
1747 if (!evsel->precise_ip_original)
1748 evsel->precise_ip_original = evsel->core.attr.precise_ip;
1749
1750 evsel->core.attr.precise_ip--;
1751 pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
1752 display_attr(&evsel->core.attr);
1753 return true;
1754}
1755
1756static struct perf_cpu_map *empty_cpu_map;
1757static struct perf_thread_map *empty_thread_map;
1758
1759static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
1760 struct perf_thread_map *threads)
1761{
1762 int nthreads;
1763
1764 if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
1765 (perf_missing_features.aux_output && evsel->core.attr.aux_output))
1766 return -EINVAL;
1767
1768 if (cpus == NULL) {
1769 if (empty_cpu_map == NULL) {
1770 empty_cpu_map = perf_cpu_map__dummy_new();
1771 if (empty_cpu_map == NULL)
1772 return -ENOMEM;
1773 }
1774
1775 cpus = empty_cpu_map;
1776 }
1777
1778 if (threads == NULL) {
1779 if (empty_thread_map == NULL) {
1780 empty_thread_map = thread_map__new_by_tid(-1);
1781 if (empty_thread_map == NULL)
1782 return -ENOMEM;
1783 }
1784
1785 threads = empty_thread_map;
1786 }
1787
1788 if (evsel->core.system_wide)
1789 nthreads = 1;
1790 else
1791 nthreads = threads->nr;
1792
1793 if (evsel->core.fd == NULL &&
1794 perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0)
1795 return -ENOMEM;
1796
1797 evsel->open_flags = PERF_FLAG_FD_CLOEXEC;
1798 if (evsel->cgrp)
1799 evsel->open_flags |= PERF_FLAG_PID_CGROUP;
1800
1801 return 0;
1802}
1803
1804static void evsel__disable_missing_features(struct evsel *evsel)
1805{
1806 if (perf_missing_features.weight_struct) {
1807 evsel__set_sample_bit(evsel, WEIGHT);
1808 evsel__reset_sample_bit(evsel, WEIGHT_STRUCT);
1809 }
1810 if (perf_missing_features.clockid_wrong)
1811 evsel->core.attr.clockid = CLOCK_MONOTONIC;
1812 if (perf_missing_features.clockid) {
1813 evsel->core.attr.use_clockid = 0;
1814 evsel->core.attr.clockid = 0;
1815 }
1816 if (perf_missing_features.cloexec)
1817 evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
1818 if (perf_missing_features.mmap2)
1819 evsel->core.attr.mmap2 = 0;
1820 if (evsel->pmu && evsel->pmu->missing_features.exclude_guest)
1821 evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0;
1822 if (perf_missing_features.lbr_flags)
1823 evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
1824 PERF_SAMPLE_BRANCH_NO_CYCLES);
1825 if (perf_missing_features.group_read && evsel->core.attr.inherit)
1826 evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
1827 if (perf_missing_features.ksymbol)
1828 evsel->core.attr.ksymbol = 0;
1829 if (perf_missing_features.bpf)
1830 evsel->core.attr.bpf_event = 0;
1831 if (perf_missing_features.branch_hw_idx)
1832 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX;
1833 if (perf_missing_features.sample_id_all)
1834 evsel->core.attr.sample_id_all = 0;
1835}
1836
1837int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
1838 struct perf_thread_map *threads)
1839{
1840 int err;
1841
1842 err = __evsel__prepare_open(evsel, cpus, threads);
1843 if (err)
1844 return err;
1845
1846 evsel__disable_missing_features(evsel);
1847
1848 return err;
1849}
1850
1851bool evsel__detect_missing_features(struct evsel *evsel)
1852{
1853
1854
1855
1856
1857 if (!perf_missing_features.weight_struct &&
1858 (evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) {
1859 perf_missing_features.weight_struct = true;
1860 pr_debug2("switching off weight struct support\n");
1861 return true;
1862 } else if (!perf_missing_features.code_page_size &&
1863 (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) {
1864 perf_missing_features.code_page_size = true;
1865 pr_debug2_peo("Kernel has no PERF_SAMPLE_CODE_PAGE_SIZE support, bailing out\n");
1866 return false;
1867 } else if (!perf_missing_features.data_page_size &&
1868 (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) {
1869 perf_missing_features.data_page_size = true;
1870 pr_debug2_peo("Kernel has no PERF_SAMPLE_DATA_PAGE_SIZE support, bailing out\n");
1871 return false;
1872 } else if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) {
1873 perf_missing_features.cgroup = true;
1874 pr_debug2_peo("Kernel has no cgroup sampling support, bailing out\n");
1875 return false;
1876 } else if (!perf_missing_features.branch_hw_idx &&
1877 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) {
1878 perf_missing_features.branch_hw_idx = true;
1879 pr_debug2("switching off branch HW index support\n");
1880 return true;
1881 } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) {
1882 perf_missing_features.aux_output = true;
1883 pr_debug2_peo("Kernel has no attr.aux_output support, bailing out\n");
1884 return false;
1885 } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) {
1886 perf_missing_features.bpf = true;
1887 pr_debug2_peo("switching off bpf_event\n");
1888 return true;
1889 } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) {
1890 perf_missing_features.ksymbol = true;
1891 pr_debug2_peo("switching off ksymbol\n");
1892 return true;
1893 } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) {
1894 perf_missing_features.write_backward = true;
1895 pr_debug2_peo("switching off write_backward\n");
1896 return false;
1897 } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) {
1898 perf_missing_features.clockid_wrong = true;
1899 pr_debug2_peo("switching off clockid\n");
1900 return true;
1901 } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) {
1902 perf_missing_features.clockid = true;
1903 pr_debug2_peo("switching off use_clockid\n");
1904 return true;
1905 } else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) {
1906 perf_missing_features.cloexec = true;
1907 pr_debug2_peo("switching off cloexec flag\n");
1908 return true;
1909 } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) {
1910 perf_missing_features.mmap2 = true;
1911 pr_debug2_peo("switching off mmap2\n");
1912 return true;
1913 } else if ((evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host) &&
1914 (evsel->pmu == NULL || evsel->pmu->missing_features.exclude_guest)) {
1915 if (evsel->pmu == NULL) {
1916 evsel->pmu = evsel__find_pmu(evsel);
1917 if (evsel->pmu)
1918 evsel->pmu->missing_features.exclude_guest = true;
1919 else {
1920
1921 evsel->core.attr.exclude_host = false;
1922 evsel->core.attr.exclude_guest = false;
1923 }
1924 }
1925
1926 if (evsel->exclude_GH) {
1927 pr_debug2_peo("PMU has no exclude_host/guest support, bailing out\n");
1928 return false;
1929 }
1930 if (!perf_missing_features.exclude_guest) {
1931 perf_missing_features.exclude_guest = true;
1932 pr_debug2_peo("switching off exclude_guest, exclude_host\n");
1933 }
1934 return true;
1935 } else if (!perf_missing_features.sample_id_all) {
1936 perf_missing_features.sample_id_all = true;
1937 pr_debug2_peo("switching off sample_id_all\n");
1938 return true;
1939 } else if (!perf_missing_features.lbr_flags &&
1940 (evsel->core.attr.branch_sample_type &
1941 (PERF_SAMPLE_BRANCH_NO_CYCLES |
1942 PERF_SAMPLE_BRANCH_NO_FLAGS))) {
1943 perf_missing_features.lbr_flags = true;
1944 pr_debug2_peo("switching off branch sample type no (cycles/flags)\n");
1945 return true;
1946 } else if (!perf_missing_features.group_read &&
1947 evsel->core.attr.inherit &&
1948 (evsel->core.attr.read_format & PERF_FORMAT_GROUP) &&
1949 evsel__is_group_leader(evsel)) {
1950 perf_missing_features.group_read = true;
1951 pr_debug2_peo("switching off group read\n");
1952 return true;
1953 } else {
1954 return false;
1955 }
1956}
1957
1958bool evsel__increase_rlimit(enum rlimit_action *set_rlimit)
1959{
1960 int old_errno;
1961 struct rlimit l;
1962
1963 if (*set_rlimit < INCREASED_MAX) {
1964 old_errno = errno;
1965
1966 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1967 if (*set_rlimit == NO_CHANGE) {
1968 l.rlim_cur = l.rlim_max;
1969 } else {
1970 l.rlim_cur = l.rlim_max + 1000;
1971 l.rlim_max = l.rlim_cur;
1972 }
1973 if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
1974 (*set_rlimit) += 1;
1975 errno = old_errno;
1976 return true;
1977 }
1978 }
1979 errno = old_errno;
1980 }
1981
1982 return false;
1983}
1984
1985static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
1986 struct perf_thread_map *threads,
1987 int start_cpu_map_idx, int end_cpu_map_idx)
1988{
1989 int idx, thread, nthreads;
1990 int pid = -1, err, old_errno;
1991 enum rlimit_action set_rlimit = NO_CHANGE;
1992
1993 err = __evsel__prepare_open(evsel, cpus, threads);
1994 if (err)
1995 return err;
1996
1997 if (cpus == NULL)
1998 cpus = empty_cpu_map;
1999
2000 if (threads == NULL)
2001 threads = empty_thread_map;
2002
2003 if (evsel->core.system_wide)
2004 nthreads = 1;
2005 else
2006 nthreads = threads->nr;
2007
2008 if (evsel->cgrp)
2009 pid = evsel->cgrp->fd;
2010
2011fallback_missing_features:
2012 evsel__disable_missing_features(evsel);
2013
2014 display_attr(&evsel->core.attr);
2015
2016 for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) {
2017
2018 for (thread = 0; thread < nthreads; thread++) {
2019 int fd, group_fd;
2020retry_open:
2021 if (thread >= nthreads)
2022 break;
2023
2024 if (!evsel->cgrp && !evsel->core.system_wide)
2025 pid = perf_thread_map__pid(threads, thread);
2026
2027 group_fd = get_group_fd(evsel, idx, thread);
2028
2029 test_attr__ready();
2030
2031 pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
2032 pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags);
2033
2034 fd = sys_perf_event_open(&evsel->core.attr, pid,
2035 perf_cpu_map__cpu(cpus, idx).cpu,
2036 group_fd, evsel->open_flags);
2037
2038 FD(evsel, idx, thread) = fd;
2039
2040 if (fd < 0) {
2041 err = -errno;
2042
2043 pr_debug2_peo("\nsys_perf_event_open failed, error %d\n",
2044 err);
2045 goto try_fallback;
2046 }
2047
2048 bpf_counter__install_pe(evsel, idx, fd);
2049
2050 if (unlikely(test_attr__enabled)) {
2051 test_attr__open(&evsel->core.attr, pid,
2052 perf_cpu_map__cpu(cpus, idx),
2053 fd, group_fd, evsel->open_flags);
2054 }
2055
2056 pr_debug2_peo(" = %d\n", fd);
2057
2058 if (evsel->bpf_fd >= 0) {
2059 int evt_fd = fd;
2060 int bpf_fd = evsel->bpf_fd;
2061
2062 err = ioctl(evt_fd,
2063 PERF_EVENT_IOC_SET_BPF,
2064 bpf_fd);
2065 if (err && errno != EEXIST) {
2066 pr_err("failed to attach bpf fd %d: %s\n",
2067 bpf_fd, strerror(errno));
2068 err = -EINVAL;
2069 goto out_close;
2070 }
2071 }
2072
2073 set_rlimit = NO_CHANGE;
2074
2075
2076
2077
2078
2079 if (perf_missing_features.clockid ||
2080 perf_missing_features.clockid_wrong) {
2081 err = -EINVAL;
2082 goto out_close;
2083 }
2084 }
2085 }
2086
2087 return 0;
2088
2089try_fallback:
2090 if (evsel__precise_ip_fallback(evsel))
2091 goto retry_open;
2092
2093 if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus),
2094 idx, threads, thread, err)) {
2095
2096 nthreads--;
2097
2098
2099 err = 0;
2100 goto retry_open;
2101 }
2102
2103
2104
2105
2106 if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit))
2107 goto retry_open;
2108
2109 if (err != -EINVAL || idx > 0 || thread > 0)
2110 goto out_close;
2111
2112 if (evsel__detect_missing_features(evsel))
2113 goto fallback_missing_features;
2114out_close:
2115 if (err)
2116 threads->err_thread = thread;
2117
2118 old_errno = errno;
2119 do {
2120 while (--thread >= 0) {
2121 if (FD(evsel, idx, thread) >= 0)
2122 close(FD(evsel, idx, thread));
2123 FD(evsel, idx, thread) = -1;
2124 }
2125 thread = nthreads;
2126 } while (--idx >= 0);
2127 errno = old_errno;
2128 return err;
2129}
2130
2131int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
2132 struct perf_thread_map *threads)
2133{
2134 return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus));
2135}
2136
2137void evsel__close(struct evsel *evsel)
2138{
2139 perf_evsel__close(&evsel->core);
2140 perf_evsel__free_id(&evsel->core);
2141}
2142
2143int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
2144{
2145 if (cpu_map_idx == -1)
2146 return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus));
2147
2148 return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1);
2149}
2150
2151int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads)
2152{
2153 return evsel__open(evsel, NULL, threads);
2154}
2155
2156static int perf_evsel__parse_id_sample(const struct evsel *evsel,
2157 const union perf_event *event,
2158 struct perf_sample *sample)
2159{
2160 u64 type = evsel->core.attr.sample_type;
2161 const __u64 *array = event->sample.array;
2162 bool swapped = evsel->needs_swap;
2163 union u64_swap u;
2164
2165 array += ((event->header.size -
2166 sizeof(event->header)) / sizeof(u64)) - 1;
2167
2168 if (type & PERF_SAMPLE_IDENTIFIER) {
2169 sample->id = *array;
2170 array--;
2171 }
2172
2173 if (type & PERF_SAMPLE_CPU) {
2174 u.val64 = *array;
2175 if (swapped) {
2176
2177 u.val64 = bswap_64(u.val64);
2178 u.val32[0] = bswap_32(u.val32[0]);
2179 }
2180
2181 sample->cpu = u.val32[0];
2182 array--;
2183 }
2184
2185 if (type & PERF_SAMPLE_STREAM_ID) {
2186 sample->stream_id = *array;
2187 array--;
2188 }
2189
2190 if (type & PERF_SAMPLE_ID) {
2191 sample->id = *array;
2192 array--;
2193 }
2194
2195 if (type & PERF_SAMPLE_TIME) {
2196 sample->time = *array;
2197 array--;
2198 }
2199
2200 if (type & PERF_SAMPLE_TID) {
2201 u.val64 = *array;
2202 if (swapped) {
2203
2204 u.val64 = bswap_64(u.val64);
2205 u.val32[0] = bswap_32(u.val32[0]);
2206 u.val32[1] = bswap_32(u.val32[1]);
2207 }
2208
2209 sample->pid = u.val32[0];
2210 sample->tid = u.val32[1];
2211 array--;
2212 }
2213
2214 return 0;
2215}
2216
2217static inline bool overflow(const void *endp, u16 max_size, const void *offset,
2218 u64 size)
2219{
2220 return size > max_size || offset + size > endp;
2221}
2222
2223#define OVERFLOW_CHECK(offset, size, max_size) \
2224 do { \
2225 if (overflow(endp, (max_size), (offset), (size))) \
2226 return -EFAULT; \
2227 } while (0)
2228
2229#define OVERFLOW_CHECK_u64(offset) \
2230 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
2231
2232static int
2233perf_event__check_size(union perf_event *event, unsigned int sample_size)
2234{
2235
2236
2237
2238
2239
2240 if (sample_size + sizeof(event->header) > event->header.size)
2241 return -EFAULT;
2242
2243 return 0;
2244}
2245
2246void __weak arch_perf_parse_sample_weight(struct perf_sample *data,
2247 const __u64 *array,
2248 u64 type __maybe_unused)
2249{
2250 data->weight = *array;
2251}
2252
2253u64 evsel__bitfield_swap_branch_flags(u64 value)
2254{
2255 u64 new_val = 0;
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280 if (tep_is_bigendian()) {
2281 new_val = bitfield_swap(value, 0, 1);
2282 new_val |= bitfield_swap(value, 1, 1);
2283 new_val |= bitfield_swap(value, 2, 1);
2284 new_val |= bitfield_swap(value, 3, 1);
2285 new_val |= bitfield_swap(value, 4, 16);
2286 new_val |= bitfield_swap(value, 20, 4);
2287 new_val |= bitfield_swap(value, 24, 40);
2288 } else {
2289 new_val = bitfield_swap(value, 63, 1);
2290 new_val |= bitfield_swap(value, 62, 1);
2291 new_val |= bitfield_swap(value, 61, 1);
2292 new_val |= bitfield_swap(value, 60, 1);
2293 new_val |= bitfield_swap(value, 44, 16);
2294 new_val |= bitfield_swap(value, 40, 4);
2295 new_val |= bitfield_swap(value, 0, 40);
2296 }
2297
2298 return new_val;
2299}
2300
2301int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
2302 struct perf_sample *data)
2303{
2304 u64 type = evsel->core.attr.sample_type;
2305 bool swapped = evsel->needs_swap;
2306 const __u64 *array;
2307 u16 max_size = event->header.size;
2308 const void *endp = (void *)event + max_size;
2309 u64 sz;
2310
2311
2312
2313
2314
2315 union u64_swap u;
2316
2317 memset(data, 0, sizeof(*data));
2318 data->cpu = data->pid = data->tid = -1;
2319 data->stream_id = data->id = data->time = -1ULL;
2320 data->period = evsel->core.attr.sample_period;
2321 data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2322 data->misc = event->header.misc;
2323 data->id = -1ULL;
2324 data->data_src = PERF_MEM_DATA_SRC_NONE;
2325
2326 if (event->header.type != PERF_RECORD_SAMPLE) {
2327 if (!evsel->core.attr.sample_id_all)
2328 return 0;
2329 return perf_evsel__parse_id_sample(evsel, event, data);
2330 }
2331
2332 array = event->sample.array;
2333
2334 if (perf_event__check_size(event, evsel->sample_size))
2335 return -EFAULT;
2336
2337 if (type & PERF_SAMPLE_IDENTIFIER) {
2338 data->id = *array;
2339 array++;
2340 }
2341
2342 if (type & PERF_SAMPLE_IP) {
2343 data->ip = *array;
2344 array++;
2345 }
2346
2347 if (type & PERF_SAMPLE_TID) {
2348 u.val64 = *array;
2349 if (swapped) {
2350
2351 u.val64 = bswap_64(u.val64);
2352 u.val32[0] = bswap_32(u.val32[0]);
2353 u.val32[1] = bswap_32(u.val32[1]);
2354 }
2355
2356 data->pid = u.val32[0];
2357 data->tid = u.val32[1];
2358 array++;
2359 }
2360
2361 if (type & PERF_SAMPLE_TIME) {
2362 data->time = *array;
2363 array++;
2364 }
2365
2366 if (type & PERF_SAMPLE_ADDR) {
2367 data->addr = *array;
2368 array++;
2369 }
2370
2371 if (type & PERF_SAMPLE_ID) {
2372 data->id = *array;
2373 array++;
2374 }
2375
2376 if (type & PERF_SAMPLE_STREAM_ID) {
2377 data->stream_id = *array;
2378 array++;
2379 }
2380
2381 if (type & PERF_SAMPLE_CPU) {
2382
2383 u.val64 = *array;
2384 if (swapped) {
2385
2386 u.val64 = bswap_64(u.val64);
2387 u.val32[0] = bswap_32(u.val32[0]);
2388 }
2389
2390 data->cpu = u.val32[0];
2391 array++;
2392 }
2393
2394 if (type & PERF_SAMPLE_PERIOD) {
2395 data->period = *array;
2396 array++;
2397 }
2398
2399 if (type & PERF_SAMPLE_READ) {
2400 u64 read_format = evsel->core.attr.read_format;
2401
2402 OVERFLOW_CHECK_u64(array);
2403 if (read_format & PERF_FORMAT_GROUP)
2404 data->read.group.nr = *array;
2405 else
2406 data->read.one.value = *array;
2407
2408 array++;
2409
2410 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2411 OVERFLOW_CHECK_u64(array);
2412 data->read.time_enabled = *array;
2413 array++;
2414 }
2415
2416 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2417 OVERFLOW_CHECK_u64(array);
2418 data->read.time_running = *array;
2419 array++;
2420 }
2421
2422
2423 if (read_format & PERF_FORMAT_GROUP) {
2424 const u64 max_group_nr = UINT64_MAX /
2425 sizeof(struct sample_read_value);
2426
2427 if (data->read.group.nr > max_group_nr)
2428 return -EFAULT;
2429 sz = data->read.group.nr *
2430 sizeof(struct sample_read_value);
2431 OVERFLOW_CHECK(array, sz, max_size);
2432 data->read.group.values =
2433 (struct sample_read_value *)array;
2434 array = (void *)array + sz;
2435 } else {
2436 OVERFLOW_CHECK_u64(array);
2437 data->read.one.id = *array;
2438 array++;
2439 }
2440 }
2441
2442 if (type & PERF_SAMPLE_CALLCHAIN) {
2443 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
2444
2445 OVERFLOW_CHECK_u64(array);
2446 data->callchain = (struct ip_callchain *)array++;
2447 if (data->callchain->nr > max_callchain_nr)
2448 return -EFAULT;
2449 sz = data->callchain->nr * sizeof(u64);
2450 OVERFLOW_CHECK(array, sz, max_size);
2451 array = (void *)array + sz;
2452 }
2453
2454 if (type & PERF_SAMPLE_RAW) {
2455 OVERFLOW_CHECK_u64(array);
2456 u.val64 = *array;
2457
2458
2459
2460
2461
2462
2463
2464 if (swapped) {
2465 u.val64 = bswap_64(u.val64);
2466 u.val32[0] = bswap_32(u.val32[0]);
2467 u.val32[1] = bswap_32(u.val32[1]);
2468 }
2469 data->raw_size = u.val32[0];
2470
2471
2472
2473
2474
2475 if (swapped)
2476 mem_bswap_64((void *) array, data->raw_size);
2477
2478 array = (void *)array + sizeof(u32);
2479
2480 OVERFLOW_CHECK(array, data->raw_size, max_size);
2481 data->raw_data = (void *)array;
2482 array = (void *)array + data->raw_size;
2483 }
2484
2485 if (type & PERF_SAMPLE_BRANCH_STACK) {
2486 const u64 max_branch_nr = UINT64_MAX /
2487 sizeof(struct branch_entry);
2488 struct branch_entry *e;
2489 unsigned int i;
2490
2491 OVERFLOW_CHECK_u64(array);
2492 data->branch_stack = (struct branch_stack *)array++;
2493
2494 if (data->branch_stack->nr > max_branch_nr)
2495 return -EFAULT;
2496
2497 sz = data->branch_stack->nr * sizeof(struct branch_entry);
2498 if (evsel__has_branch_hw_idx(evsel)) {
2499 sz += sizeof(u64);
2500 e = &data->branch_stack->entries[0];
2501 } else {
2502 data->no_hw_idx = true;
2503
2504
2505
2506
2507 e = (struct branch_entry *)&data->branch_stack->hw_idx;
2508 }
2509
2510 if (swapped) {
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521 for (i = 0; i < data->branch_stack->nr; i++, e++)
2522 e->flags.value = evsel__bitfield_swap_branch_flags(e->flags.value);
2523 }
2524
2525 OVERFLOW_CHECK(array, sz, max_size);
2526 array = (void *)array + sz;
2527 }
2528
2529 if (type & PERF_SAMPLE_REGS_USER) {
2530 OVERFLOW_CHECK_u64(array);
2531 data->user_regs.abi = *array;
2532 array++;
2533
2534 if (data->user_regs.abi) {
2535 u64 mask = evsel->core.attr.sample_regs_user;
2536
2537 sz = hweight64(mask) * sizeof(u64);
2538 OVERFLOW_CHECK(array, sz, max_size);
2539 data->user_regs.mask = mask;
2540 data->user_regs.regs = (u64 *)array;
2541 array = (void *)array + sz;
2542 }
2543 }
2544
2545 if (type & PERF_SAMPLE_STACK_USER) {
2546 OVERFLOW_CHECK_u64(array);
2547 sz = *array++;
2548
2549 data->user_stack.offset = ((char *)(array - 1)
2550 - (char *) event);
2551
2552 if (!sz) {
2553 data->user_stack.size = 0;
2554 } else {
2555 OVERFLOW_CHECK(array, sz, max_size);
2556 data->user_stack.data = (char *)array;
2557 array = (void *)array + sz;
2558 OVERFLOW_CHECK_u64(array);
2559 data->user_stack.size = *array++;
2560 if (WARN_ONCE(data->user_stack.size > sz,
2561 "user stack dump failure\n"))
2562 return -EFAULT;
2563 }
2564 }
2565
2566 if (type & PERF_SAMPLE_WEIGHT_TYPE) {
2567 OVERFLOW_CHECK_u64(array);
2568 arch_perf_parse_sample_weight(data, array, type);
2569 array++;
2570 }
2571
2572 if (type & PERF_SAMPLE_DATA_SRC) {
2573 OVERFLOW_CHECK_u64(array);
2574 data->data_src = *array;
2575 array++;
2576 }
2577
2578 if (type & PERF_SAMPLE_TRANSACTION) {
2579 OVERFLOW_CHECK_u64(array);
2580 data->transaction = *array;
2581 array++;
2582 }
2583
2584 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
2585 if (type & PERF_SAMPLE_REGS_INTR) {
2586 OVERFLOW_CHECK_u64(array);
2587 data->intr_regs.abi = *array;
2588 array++;
2589
2590 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
2591 u64 mask = evsel->core.attr.sample_regs_intr;
2592
2593 sz = hweight64(mask) * sizeof(u64);
2594 OVERFLOW_CHECK(array, sz, max_size);
2595 data->intr_regs.mask = mask;
2596 data->intr_regs.regs = (u64 *)array;
2597 array = (void *)array + sz;
2598 }
2599 }
2600
2601 data->phys_addr = 0;
2602 if (type & PERF_SAMPLE_PHYS_ADDR) {
2603 data->phys_addr = *array;
2604 array++;
2605 }
2606
2607 data->cgroup = 0;
2608 if (type & PERF_SAMPLE_CGROUP) {
2609 data->cgroup = *array;
2610 array++;
2611 }
2612
2613 data->data_page_size = 0;
2614 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
2615 data->data_page_size = *array;
2616 array++;
2617 }
2618
2619 data->code_page_size = 0;
2620 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
2621 data->code_page_size = *array;
2622 array++;
2623 }
2624
2625 if (type & PERF_SAMPLE_AUX) {
2626 OVERFLOW_CHECK_u64(array);
2627 sz = *array++;
2628
2629 OVERFLOW_CHECK(array, sz, max_size);
2630
2631 if (swapped)
2632 mem_bswap_64((char *)array, sz);
2633 data->aux_sample.size = sz;
2634 data->aux_sample.data = (char *)array;
2635 array = (void *)array + sz;
2636 }
2637
2638 return 0;
2639}
2640
2641int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
2642 u64 *timestamp)
2643{
2644 u64 type = evsel->core.attr.sample_type;
2645 const __u64 *array;
2646
2647 if (!(type & PERF_SAMPLE_TIME))
2648 return -1;
2649
2650 if (event->header.type != PERF_RECORD_SAMPLE) {
2651 struct perf_sample data = {
2652 .time = -1ULL,
2653 };
2654
2655 if (!evsel->core.attr.sample_id_all)
2656 return -1;
2657 if (perf_evsel__parse_id_sample(evsel, event, &data))
2658 return -1;
2659
2660 *timestamp = data.time;
2661 return 0;
2662 }
2663
2664 array = event->sample.array;
2665
2666 if (perf_event__check_size(event, evsel->sample_size))
2667 return -EFAULT;
2668
2669 if (type & PERF_SAMPLE_IDENTIFIER)
2670 array++;
2671
2672 if (type & PERF_SAMPLE_IP)
2673 array++;
2674
2675 if (type & PERF_SAMPLE_TID)
2676 array++;
2677
2678 if (type & PERF_SAMPLE_TIME)
2679 *timestamp = *array;
2680
2681 return 0;
2682}
2683
2684struct tep_format_field *evsel__field(struct evsel *evsel, const char *name)
2685{
2686 return tep_find_field(evsel->tp_format, name);
2687}
2688
2689void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name)
2690{
2691 struct tep_format_field *field = evsel__field(evsel, name);
2692 int offset;
2693
2694 if (!field)
2695 return NULL;
2696
2697 offset = field->offset;
2698
2699 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2700 offset = *(int *)(sample->raw_data + field->offset);
2701 offset &= 0xffff;
2702 if (field->flags & TEP_FIELD_IS_RELATIVE)
2703 offset += field->offset + field->size;
2704 }
2705
2706 return sample->raw_data + offset;
2707}
2708
2709u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample,
2710 bool needs_swap)
2711{
2712 u64 value;
2713 void *ptr = sample->raw_data + field->offset;
2714
2715 switch (field->size) {
2716 case 1:
2717 return *(u8 *)ptr;
2718 case 2:
2719 value = *(u16 *)ptr;
2720 break;
2721 case 4:
2722 value = *(u32 *)ptr;
2723 break;
2724 case 8:
2725 memcpy(&value, ptr, sizeof(u64));
2726 break;
2727 default:
2728 return 0;
2729 }
2730
2731 if (!needs_swap)
2732 return value;
2733
2734 switch (field->size) {
2735 case 2:
2736 return bswap_16(value);
2737 case 4:
2738 return bswap_32(value);
2739 case 8:
2740 return bswap_64(value);
2741 default:
2742 return 0;
2743 }
2744
2745 return 0;
2746}
2747
2748u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name)
2749{
2750 struct tep_format_field *field = evsel__field(evsel, name);
2751
2752 if (!field)
2753 return 0;
2754
2755 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
2756}
2757
2758bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
2759{
2760 int paranoid;
2761
2762 if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
2763 evsel->core.attr.type == PERF_TYPE_HARDWARE &&
2764 evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
2765
2766
2767
2768
2769
2770
2771
2772
2773 scnprintf(msg, msgsize, "%s",
2774"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2775
2776 evsel->core.attr.type = PERF_TYPE_SOFTWARE;
2777 evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK;
2778
2779 zfree(&evsel->name);
2780 return true;
2781 } else if (err == EACCES && !evsel->core.attr.exclude_kernel &&
2782 (paranoid = perf_event_paranoid()) > 1) {
2783 const char *name = evsel__name(evsel);
2784 char *new_name;
2785 const char *sep = ":";
2786
2787
2788 if (evsel->core.attr.exclude_user)
2789 return false;
2790
2791
2792 if (strchr(name, '/') ||
2793 (strchr(name, ':') && !evsel->is_libpfm_event))
2794 sep = "";
2795
2796 if (asprintf(&new_name, "%s%su", name, sep) < 0)
2797 return false;
2798
2799 if (evsel->name)
2800 free(evsel->name);
2801 evsel->name = new_name;
2802 scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying "
2803 "to fall back to excluding kernel and hypervisor "
2804 " samples", paranoid);
2805 evsel->core.attr.exclude_kernel = 1;
2806 evsel->core.attr.exclude_hv = 1;
2807
2808 return true;
2809 }
2810
2811 return false;
2812}
2813
2814static bool find_process(const char *name)
2815{
2816 size_t len = strlen(name);
2817 DIR *dir;
2818 struct dirent *d;
2819 int ret = -1;
2820
2821 dir = opendir(procfs__mountpoint());
2822 if (!dir)
2823 return false;
2824
2825
2826 while (ret && (d = readdir(dir)) != NULL) {
2827 char path[PATH_MAX];
2828 char *data;
2829 size_t size;
2830
2831 if ((d->d_type != DT_DIR) ||
2832 !strcmp(".", d->d_name) ||
2833 !strcmp("..", d->d_name))
2834 continue;
2835
2836 scnprintf(path, sizeof(path), "%s/%s/comm",
2837 procfs__mountpoint(), d->d_name);
2838
2839 if (filename__read_str(path, &data, &size))
2840 continue;
2841
2842 ret = strncmp(name, data, len);
2843 free(data);
2844 }
2845
2846 closedir(dir);
2847 return ret ? false : true;
2848}
2849
2850int evsel__open_strerror(struct evsel *evsel, struct target *target,
2851 int err, char *msg, size_t size)
2852{
2853 char sbuf[STRERR_BUFSIZE];
2854 int printed = 0, enforced = 0;
2855
2856 switch (err) {
2857 case EPERM:
2858 case EACCES:
2859 printed += scnprintf(msg + printed, size - printed,
2860 "Access to performance monitoring and observability operations is limited.\n");
2861
2862 if (!sysfs__read_int("fs/selinux/enforce", &enforced)) {
2863 if (enforced) {
2864 printed += scnprintf(msg + printed, size - printed,
2865 "Enforced MAC policy settings (SELinux) can limit access to performance\n"
2866 "monitoring and observability operations. Inspect system audit records for\n"
2867 "more perf_event access control information and adjusting the policy.\n");
2868 }
2869 }
2870
2871 if (err == EPERM)
2872 printed += scnprintf(msg, size,
2873 "No permission to enable %s event.\n\n", evsel__name(evsel));
2874
2875 return scnprintf(msg + printed, size - printed,
2876 "Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open\n"
2877 "access to performance monitoring and observability operations for processes\n"
2878 "without CAP_PERFMON, CAP_SYS_PTRACE or CAP_SYS_ADMIN Linux capability.\n"
2879 "More information can be found at 'Perf events and tool security' document:\n"
2880 "https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html\n"
2881 "perf_event_paranoid setting is %d:\n"
2882 " -1: Allow use of (almost) all events by all users\n"
2883 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
2884 ">= 0: Disallow raw and ftrace function tracepoint access\n"
2885 ">= 1: Disallow CPU event access\n"
2886 ">= 2: Disallow kernel profiling\n"
2887 "To make the adjusted perf_event_paranoid setting permanent preserve it\n"
2888 "in /etc/sysctl.conf (e.g. kernel.perf_event_paranoid = <setting>)",
2889 perf_event_paranoid());
2890 case ENOENT:
2891 return scnprintf(msg, size, "The %s event is not supported.", evsel__name(evsel));
2892 case EMFILE:
2893 return scnprintf(msg, size, "%s",
2894 "Too many events are opened.\n"
2895 "Probably the maximum number of open file descriptors has been reached.\n"
2896 "Hint: Try again after reducing the number of events.\n"
2897 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2898 case ENOMEM:
2899 if (evsel__has_callchain(evsel) &&
2900 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
2901 return scnprintf(msg, size,
2902 "Not enough memory to setup event with callchain.\n"
2903 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
2904 "Hint: Current value: %d", sysctl__max_stack());
2905 break;
2906 case ENODEV:
2907 if (target->cpu_list)
2908 return scnprintf(msg, size, "%s",
2909 "No such device - did you specify an out-of-range profile CPU?");
2910 break;
2911 case EOPNOTSUPP:
2912 if (evsel->core.attr.aux_output)
2913 return scnprintf(msg, size,
2914 "%s: PMU Hardware doesn't support 'aux_output' feature",
2915 evsel__name(evsel));
2916 if (evsel->core.attr.sample_period != 0)
2917 return scnprintf(msg, size,
2918 "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
2919 evsel__name(evsel));
2920 if (evsel->core.attr.precise_ip)
2921 return scnprintf(msg, size, "%s",
2922 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2923#if defined(__i386__) || defined(__x86_64__)
2924 if (evsel->core.attr.type == PERF_TYPE_HARDWARE)
2925 return scnprintf(msg, size, "%s",
2926 "No hardware sampling interrupt available.\n");
2927#endif
2928 break;
2929 case EBUSY:
2930 if (find_process("oprofiled"))
2931 return scnprintf(msg, size,
2932 "The PMU counters are busy/taken by another profiler.\n"
2933 "We found oprofile daemon running, please stop it and try again.");
2934 break;
2935 case EINVAL:
2936 if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_size)
2937 return scnprintf(msg, size, "Asking for the code page size isn't supported by this kernel.");
2938 if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_size)
2939 return scnprintf(msg, size, "Asking for the data page size isn't supported by this kernel.");
2940 if (evsel->core.attr.write_backward && perf_missing_features.write_backward)
2941 return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
2942 if (perf_missing_features.clockid)
2943 return scnprintf(msg, size, "clockid feature not supported.");
2944 if (perf_missing_features.clockid_wrong)
2945 return scnprintf(msg, size, "wrong clockid (%d).", clockid);
2946 if (perf_missing_features.aux_output)
2947 return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel.");
2948 if (!target__has_cpu(target))
2949 return scnprintf(msg, size,
2950 "Invalid event (%s) in per-thread mode, enable system wide with '-a'.",
2951 evsel__name(evsel));
2952 break;
2953 case ENODATA:
2954 return scnprintf(msg, size, "Cannot collect data source with the load latency event alone. "
2955 "Please add an auxiliary event in front of the load latency event.");
2956 default:
2957 break;
2958 }
2959
2960 return scnprintf(msg, size,
2961 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2962 "/bin/dmesg | grep -i perf may provide additional information.\n",
2963 err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel));
2964}
2965
2966struct perf_env *evsel__env(struct evsel *evsel)
2967{
2968 if (evsel && evsel->evlist)
2969 return evsel->evlist->env;
2970 return &perf_env;
2971}
2972
2973static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
2974{
2975 int cpu_map_idx, thread;
2976
2977 for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) {
2978 for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
2979 thread++) {
2980 int fd = FD(evsel, cpu_map_idx, thread);
2981
2982 if (perf_evlist__id_add_fd(&evlist->core, &evsel->core,
2983 cpu_map_idx, thread, fd) < 0)
2984 return -1;
2985 }
2986 }
2987
2988 return 0;
2989}
2990
2991int evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
2992{
2993 struct perf_cpu_map *cpus = evsel->core.cpus;
2994 struct perf_thread_map *threads = evsel->core.threads;
2995
2996 if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr))
2997 return -ENOMEM;
2998
2999 return store_evsel_ids(evsel, evlist);
3000}
3001
3002void evsel__zero_per_pkg(struct evsel *evsel)
3003{
3004 struct hashmap_entry *cur;
3005 size_t bkt;
3006
3007 if (evsel->per_pkg_mask) {
3008 hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt)
3009 free((char *)cur->key);
3010
3011 hashmap__clear(evsel->per_pkg_mask);
3012 }
3013}
3014
3015bool evsel__is_hybrid(struct evsel *evsel)
3016{
3017 return evsel->pmu_name && perf_pmu__is_hybrid(evsel->pmu_name);
3018}
3019
3020struct evsel *evsel__leader(struct evsel *evsel)
3021{
3022 return container_of(evsel->core.leader, struct evsel, core);
3023}
3024
3025bool evsel__has_leader(struct evsel *evsel, struct evsel *leader)
3026{
3027 return evsel->core.leader == &leader->core;
3028}
3029
3030bool evsel__is_leader(struct evsel *evsel)
3031{
3032 return evsel__has_leader(evsel, evsel);
3033}
3034
3035void evsel__set_leader(struct evsel *evsel, struct evsel *leader)
3036{
3037 evsel->core.leader = &leader->core;
3038}
3039
3040int evsel__source_count(const struct evsel *evsel)
3041{
3042 struct evsel *pos;
3043 int count = 0;
3044
3045 evlist__for_each_entry(evsel->evlist, pos) {
3046 if (pos->metric_leader == evsel)
3047 count++;
3048 }
3049 return count;
3050}
3051