1
2
3
4
5#include <assert.h>
6#include <limits.h>
7#include <unistd.h>
8#include <sys/file.h>
9#include <sys/time.h>
10#include <linux/err.h>
11#include <linux/zalloc.h>
12#include <api/fs/fs.h>
13#include <perf/bpf_perf.h>
14
15#include "bpf_counter.h"
16#include "bpf-utils.h"
17#include "counts.h"
18#include "debug.h"
19#include "evsel.h"
20#include "evlist.h"
21#include "target.h"
22#include "cgroup.h"
23#include "cpumap.h"
24#include "thread_map.h"
25
26#include "bpf_skel/bpf_prog_profiler.skel.h"
27#include "bpf_skel/bperf_u.h"
28#include "bpf_skel/bperf_leader.skel.h"
29#include "bpf_skel/bperf_follower.skel.h"
30
31#define ATTR_MAP_SIZE 16
32
33static inline void *u64_to_ptr(__u64 ptr)
34{
35 return (void *)(unsigned long)ptr;
36}
37
38static struct bpf_counter *bpf_counter_alloc(void)
39{
40 struct bpf_counter *counter;
41
42 counter = zalloc(sizeof(*counter));
43 if (counter)
44 INIT_LIST_HEAD(&counter->list);
45 return counter;
46}
47
48static int bpf_program_profiler__destroy(struct evsel *evsel)
49{
50 struct bpf_counter *counter, *tmp;
51
52 list_for_each_entry_safe(counter, tmp,
53 &evsel->bpf_counter_list, list) {
54 list_del_init(&counter->list);
55 bpf_prog_profiler_bpf__destroy(counter->skel);
56 free(counter);
57 }
58 assert(list_empty(&evsel->bpf_counter_list));
59
60 return 0;
61}
62
63static char *bpf_target_prog_name(int tgt_fd)
64{
65 struct bpf_func_info *func_info;
66 struct perf_bpil *info_linear;
67 const struct btf_type *t;
68 struct btf *btf = NULL;
69 char *name = NULL;
70
71 info_linear = get_bpf_prog_info_linear(tgt_fd, 1UL << PERF_BPIL_FUNC_INFO);
72 if (IS_ERR_OR_NULL(info_linear)) {
73 pr_debug("failed to get info_linear for prog FD %d\n", tgt_fd);
74 return NULL;
75 }
76
77 if (info_linear->info.btf_id == 0) {
78 pr_debug("prog FD %d doesn't have valid btf\n", tgt_fd);
79 goto out;
80 }
81
82 btf = btf__load_from_kernel_by_id(info_linear->info.btf_id);
83 if (libbpf_get_error(btf)) {
84 pr_debug("failed to load btf for prog FD %d\n", tgt_fd);
85 goto out;
86 }
87
88 func_info = u64_to_ptr(info_linear->info.func_info);
89 t = btf__type_by_id(btf, func_info[0].type_id);
90 if (!t) {
91 pr_debug("btf %d doesn't have type %d\n",
92 info_linear->info.btf_id, func_info[0].type_id);
93 goto out;
94 }
95 name = strdup(btf__name_by_offset(btf, t->name_off));
96out:
97 btf__free(btf);
98 free(info_linear);
99 return name;
100}
101
102static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
103{
104 struct bpf_prog_profiler_bpf *skel;
105 struct bpf_counter *counter;
106 struct bpf_program *prog;
107 char *prog_name;
108 int prog_fd;
109 int err;
110
111 prog_fd = bpf_prog_get_fd_by_id(prog_id);
112 if (prog_fd < 0) {
113 pr_err("Failed to open fd for bpf prog %u\n", prog_id);
114 return -1;
115 }
116 counter = bpf_counter_alloc();
117 if (!counter) {
118 close(prog_fd);
119 return -1;
120 }
121
122 skel = bpf_prog_profiler_bpf__open();
123 if (!skel) {
124 pr_err("Failed to open bpf skeleton\n");
125 goto err_out;
126 }
127
128 skel->rodata->num_cpu = evsel__nr_cpus(evsel);
129
130 bpf_map__set_max_entries(skel->maps.events, evsel__nr_cpus(evsel));
131 bpf_map__set_max_entries(skel->maps.fentry_readings, 1);
132 bpf_map__set_max_entries(skel->maps.accum_readings, 1);
133
134 prog_name = bpf_target_prog_name(prog_fd);
135 if (!prog_name) {
136 pr_err("Failed to get program name for bpf prog %u. Does it have BTF?\n", prog_id);
137 goto err_out;
138 }
139
140 bpf_object__for_each_program(prog, skel->obj) {
141 err = bpf_program__set_attach_target(prog, prog_fd, prog_name);
142 if (err) {
143 pr_err("bpf_program__set_attach_target failed.\n"
144 "Does bpf prog %u have BTF?\n", prog_id);
145 goto err_out;
146 }
147 }
148 set_max_rlimit();
149 err = bpf_prog_profiler_bpf__load(skel);
150 if (err) {
151 pr_err("bpf_prog_profiler_bpf__load failed\n");
152 goto err_out;
153 }
154
155 assert(skel != NULL);
156 counter->skel = skel;
157 list_add(&counter->list, &evsel->bpf_counter_list);
158 close(prog_fd);
159 return 0;
160err_out:
161 bpf_prog_profiler_bpf__destroy(skel);
162 free(counter);
163 close(prog_fd);
164 return -1;
165}
166
167static int bpf_program_profiler__load(struct evsel *evsel, struct target *target)
168{
169 char *bpf_str, *bpf_str_, *tok, *saveptr = NULL, *p;
170 u32 prog_id;
171 int ret;
172
173 bpf_str_ = bpf_str = strdup(target->bpf_str);
174 if (!bpf_str)
175 return -1;
176
177 while ((tok = strtok_r(bpf_str, ",", &saveptr)) != NULL) {
178 prog_id = strtoul(tok, &p, 10);
179 if (prog_id == 0 || prog_id == UINT_MAX ||
180 (*p != '\0' && *p != ',')) {
181 pr_err("Failed to parse bpf prog ids %s\n",
182 target->bpf_str);
183 return -1;
184 }
185
186 ret = bpf_program_profiler_load_one(evsel, prog_id);
187 if (ret) {
188 bpf_program_profiler__destroy(evsel);
189 free(bpf_str_);
190 return -1;
191 }
192 bpf_str = NULL;
193 }
194 free(bpf_str_);
195 return 0;
196}
197
198static int bpf_program_profiler__enable(struct evsel *evsel)
199{
200 struct bpf_counter *counter;
201 int ret;
202
203 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
204 assert(counter->skel != NULL);
205 ret = bpf_prog_profiler_bpf__attach(counter->skel);
206 if (ret) {
207 bpf_program_profiler__destroy(evsel);
208 return ret;
209 }
210 }
211 return 0;
212}
213
214static int bpf_program_profiler__disable(struct evsel *evsel)
215{
216 struct bpf_counter *counter;
217
218 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
219 assert(counter->skel != NULL);
220 bpf_prog_profiler_bpf__detach(counter->skel);
221 }
222 return 0;
223}
224
225static int bpf_program_profiler__read(struct evsel *evsel)
226{
227
228 int num_cpu = evsel__nr_cpus(evsel);
229
230
231
232 int num_cpu_bpf = libbpf_num_possible_cpus();
233 struct bpf_perf_event_value values[num_cpu_bpf];
234 struct bpf_counter *counter;
235 int reading_map_fd;
236 __u32 key = 0;
237 int err, cpu;
238
239 if (list_empty(&evsel->bpf_counter_list))
240 return -EAGAIN;
241
242 for (cpu = 0; cpu < num_cpu; cpu++) {
243 perf_counts(evsel->counts, cpu, 0)->val = 0;
244 perf_counts(evsel->counts, cpu, 0)->ena = 0;
245 perf_counts(evsel->counts, cpu, 0)->run = 0;
246 }
247 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
248 struct bpf_prog_profiler_bpf *skel = counter->skel;
249
250 assert(skel != NULL);
251 reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
252
253 err = bpf_map_lookup_elem(reading_map_fd, &key, values);
254 if (err) {
255 pr_err("failed to read value\n");
256 return err;
257 }
258
259 for (cpu = 0; cpu < num_cpu; cpu++) {
260 perf_counts(evsel->counts, cpu, 0)->val += values[cpu].counter;
261 perf_counts(evsel->counts, cpu, 0)->ena += values[cpu].enabled;
262 perf_counts(evsel->counts, cpu, 0)->run += values[cpu].running;
263 }
264 }
265 return 0;
266}
267
268static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu_map_idx,
269 int fd)
270{
271 struct bpf_prog_profiler_bpf *skel;
272 struct bpf_counter *counter;
273 int ret;
274
275 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
276 skel = counter->skel;
277 assert(skel != NULL);
278
279 ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events),
280 &cpu_map_idx, &fd, BPF_ANY);
281 if (ret)
282 return ret;
283 }
284 return 0;
285}
286
287struct bpf_counter_ops bpf_program_profiler_ops = {
288 .load = bpf_program_profiler__load,
289 .enable = bpf_program_profiler__enable,
290 .disable = bpf_program_profiler__disable,
291 .read = bpf_program_profiler__read,
292 .destroy = bpf_program_profiler__destroy,
293 .install_pe = bpf_program_profiler__install_pe,
294};
295
296static bool bperf_attr_map_compatible(int attr_map_fd)
297{
298 struct bpf_map_info map_info = {0};
299 __u32 map_info_len = sizeof(map_info);
300 int err;
301
302 err = bpf_obj_get_info_by_fd(attr_map_fd, &map_info, &map_info_len);
303
304 if (err)
305 return false;
306 return (map_info.key_size == sizeof(struct perf_event_attr)) &&
307 (map_info.value_size == sizeof(struct perf_event_attr_map_entry));
308}
309
310int __weak
311bpf_map_create(enum bpf_map_type map_type,
312 const char *map_name __maybe_unused,
313 __u32 key_size,
314 __u32 value_size,
315 __u32 max_entries,
316 const struct bpf_map_create_opts *opts __maybe_unused)
317{
318#pragma GCC diagnostic push
319#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
320 return bpf_create_map(map_type, key_size, value_size, max_entries, 0);
321#pragma GCC diagnostic pop
322}
323
324static int bperf_lock_attr_map(struct target *target)
325{
326 char path[PATH_MAX];
327 int map_fd, err;
328
329 if (target->attr_map) {
330 scnprintf(path, PATH_MAX, "%s", target->attr_map);
331 } else {
332 scnprintf(path, PATH_MAX, "%s/fs/bpf/%s", sysfs__mountpoint(),
333 BPF_PERF_DEFAULT_ATTR_MAP_PATH);
334 }
335
336 if (access(path, F_OK)) {
337 map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
338 sizeof(struct perf_event_attr),
339 sizeof(struct perf_event_attr_map_entry),
340 ATTR_MAP_SIZE, NULL);
341 if (map_fd < 0)
342 return -1;
343
344 err = bpf_obj_pin(map_fd, path);
345 if (err) {
346
347 close(map_fd);
348 map_fd = bpf_obj_get(path);
349 if (map_fd < 0)
350 return -1;
351 }
352 } else {
353 map_fd = bpf_obj_get(path);
354 if (map_fd < 0)
355 return -1;
356 }
357
358 if (!bperf_attr_map_compatible(map_fd)) {
359 close(map_fd);
360 return -1;
361
362 }
363 err = flock(map_fd, LOCK_EX);
364 if (err) {
365 close(map_fd);
366 return -1;
367 }
368 return map_fd;
369}
370
371static int bperf_check_target(struct evsel *evsel,
372 struct target *target,
373 enum bperf_filter_type *filter_type,
374 __u32 *filter_entry_cnt)
375{
376 if (evsel->core.leader->nr_members > 1) {
377 pr_err("bpf managed perf events do not yet support groups.\n");
378 return -1;
379 }
380
381
382 if (target->system_wide) {
383 *filter_type = BPERF_FILTER_GLOBAL;
384 *filter_entry_cnt = 1;
385 } else if (target->cpu_list) {
386 *filter_type = BPERF_FILTER_CPU;
387 *filter_entry_cnt = perf_cpu_map__nr(evsel__cpus(evsel));
388 } else if (target->tid) {
389 *filter_type = BPERF_FILTER_PID;
390 *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
391 } else if (target->pid || evsel->evlist->workload.pid != -1) {
392 *filter_type = BPERF_FILTER_TGID;
393 *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
394 } else {
395 pr_err("bpf managed perf events do not yet support these targets.\n");
396 return -1;
397 }
398
399 return 0;
400}
401
402static struct perf_cpu_map *all_cpu_map;
403
404static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
405 struct perf_event_attr_map_entry *entry)
406{
407 struct bperf_leader_bpf *skel = bperf_leader_bpf__open();
408 int link_fd, diff_map_fd, err;
409 struct bpf_link *link = NULL;
410
411 if (!skel) {
412 pr_err("Failed to open leader skeleton\n");
413 return -1;
414 }
415
416 bpf_map__set_max_entries(skel->maps.events, libbpf_num_possible_cpus());
417 err = bperf_leader_bpf__load(skel);
418 if (err) {
419 pr_err("Failed to load leader skeleton\n");
420 goto out;
421 }
422
423 link = bpf_program__attach(skel->progs.on_switch);
424 if (IS_ERR(link)) {
425 pr_err("Failed to attach leader program\n");
426 err = PTR_ERR(link);
427 goto out;
428 }
429
430 link_fd = bpf_link__fd(link);
431 diff_map_fd = bpf_map__fd(skel->maps.diff_readings);
432 entry->link_id = bpf_link_get_id(link_fd);
433 entry->diff_map_id = bpf_map_get_id(diff_map_fd);
434 err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, entry, BPF_ANY);
435 assert(err == 0);
436
437 evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry->link_id);
438 assert(evsel->bperf_leader_link_fd >= 0);
439
440
441
442
443
444 evsel->leader_skel = skel;
445 evsel__open_per_cpu(evsel, all_cpu_map, -1);
446
447out:
448 bperf_leader_bpf__destroy(skel);
449 bpf_link__destroy(link);
450 return err;
451}
452
453static int bperf__load(struct evsel *evsel, struct target *target)
454{
455 struct perf_event_attr_map_entry entry = {0xffffffff, 0xffffffff};
456 int attr_map_fd, diff_map_fd = -1, err;
457 enum bperf_filter_type filter_type;
458 __u32 filter_entry_cnt, i;
459
460 if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt))
461 return -1;
462
463 if (!all_cpu_map) {
464 all_cpu_map = perf_cpu_map__new(NULL);
465 if (!all_cpu_map)
466 return -1;
467 }
468
469 evsel->bperf_leader_prog_fd = -1;
470 evsel->bperf_leader_link_fd = -1;
471
472
473
474
475
476
477
478 attr_map_fd = bperf_lock_attr_map(target);
479 if (attr_map_fd < 0) {
480 pr_err("Failed to lock perf_event_attr map\n");
481 return -1;
482 }
483
484 err = bpf_map_lookup_elem(attr_map_fd, &evsel->core.attr, &entry);
485 if (err) {
486 err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, &entry, BPF_ANY);
487 if (err)
488 goto out;
489 }
490
491 evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
492 if (evsel->bperf_leader_link_fd < 0 &&
493 bperf_reload_leader_program(evsel, attr_map_fd, &entry)) {
494 err = -1;
495 goto out;
496 }
497
498
499
500
501
502 evsel->bperf_leader_prog_fd = bpf_prog_get_fd_by_id(
503 bpf_link_get_prog_id(evsel->bperf_leader_link_fd));
504 assert(evsel->bperf_leader_prog_fd >= 0);
505
506 diff_map_fd = bpf_map_get_fd_by_id(entry.diff_map_id);
507 assert(diff_map_fd >= 0);
508
509
510
511
512
513 err = bperf_trigger_reading(evsel->bperf_leader_prog_fd, 0);
514 if (err) {
515 pr_err("The kernel does not support test_run for raw_tp BPF programs.\n"
516 "Therefore, --use-bpf might show inaccurate readings\n");
517 goto out;
518 }
519
520
521 evsel->follower_skel = bperf_follower_bpf__open();
522 if (!evsel->follower_skel) {
523 err = -1;
524 pr_err("Failed to open follower skeleton\n");
525 goto out;
526 }
527
528
529 bpf_program__set_attach_target(evsel->follower_skel->progs.fexit_XXX,
530 evsel->bperf_leader_prog_fd, "on_switch");
531
532
533 bpf_map__reuse_fd(evsel->follower_skel->maps.diff_readings, diff_map_fd);
534
535
536 bpf_map__set_max_entries(evsel->follower_skel->maps.accum_readings,
537 filter_entry_cnt);
538
539 bpf_map__set_max_entries(evsel->follower_skel->maps.filter,
540 filter_entry_cnt);
541 err = bperf_follower_bpf__load(evsel->follower_skel);
542 if (err) {
543 pr_err("Failed to load follower skeleton\n");
544 bperf_follower_bpf__destroy(evsel->follower_skel);
545 evsel->follower_skel = NULL;
546 goto out;
547 }
548
549 for (i = 0; i < filter_entry_cnt; i++) {
550 int filter_map_fd;
551 __u32 key;
552
553 if (filter_type == BPERF_FILTER_PID ||
554 filter_type == BPERF_FILTER_TGID)
555 key = evsel->core.threads->map[i].pid;
556 else if (filter_type == BPERF_FILTER_CPU)
557 key = evsel->core.cpus->map[i].cpu;
558 else
559 break;
560
561 filter_map_fd = bpf_map__fd(evsel->follower_skel->maps.filter);
562 bpf_map_update_elem(filter_map_fd, &key, &i, BPF_ANY);
563 }
564
565 evsel->follower_skel->bss->type = filter_type;
566
567 err = bperf_follower_bpf__attach(evsel->follower_skel);
568
569out:
570 if (err && evsel->bperf_leader_link_fd >= 0)
571 close(evsel->bperf_leader_link_fd);
572 if (err && evsel->bperf_leader_prog_fd >= 0)
573 close(evsel->bperf_leader_prog_fd);
574 if (diff_map_fd >= 0)
575 close(diff_map_fd);
576
577 flock(attr_map_fd, LOCK_UN);
578 close(attr_map_fd);
579
580 return err;
581}
582
583static int bperf__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
584{
585 struct bperf_leader_bpf *skel = evsel->leader_skel;
586
587 return bpf_map_update_elem(bpf_map__fd(skel->maps.events),
588 &cpu_map_idx, &fd, BPF_ANY);
589}
590
591
592
593
594
595static int bperf_sync_counters(struct evsel *evsel)
596{
597 int num_cpu, i, cpu;
598
599 num_cpu = all_cpu_map->nr;
600 for (i = 0; i < num_cpu; i++) {
601 cpu = all_cpu_map->map[i].cpu;
602 bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu);
603 }
604 return 0;
605}
606
607static int bperf__enable(struct evsel *evsel)
608{
609 evsel->follower_skel->bss->enabled = 1;
610 return 0;
611}
612
613static int bperf__disable(struct evsel *evsel)
614{
615 evsel->follower_skel->bss->enabled = 0;
616 return 0;
617}
618
619static int bperf__read(struct evsel *evsel)
620{
621 struct bperf_follower_bpf *skel = evsel->follower_skel;
622 __u32 num_cpu_bpf = cpu__max_cpu().cpu;
623 struct bpf_perf_event_value values[num_cpu_bpf];
624 int reading_map_fd, err = 0;
625 __u32 i;
626 int j;
627
628 bperf_sync_counters(evsel);
629 reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
630
631 for (i = 0; i < bpf_map__max_entries(skel->maps.accum_readings); i++) {
632 struct perf_cpu entry;
633 __u32 cpu;
634
635 err = bpf_map_lookup_elem(reading_map_fd, &i, values);
636 if (err)
637 goto out;
638 switch (evsel->follower_skel->bss->type) {
639 case BPERF_FILTER_GLOBAL:
640 assert(i == 0);
641
642 perf_cpu_map__for_each_cpu(entry, j, all_cpu_map) {
643 cpu = entry.cpu;
644 perf_counts(evsel->counts, cpu, 0)->val = values[cpu].counter;
645 perf_counts(evsel->counts, cpu, 0)->ena = values[cpu].enabled;
646 perf_counts(evsel->counts, cpu, 0)->run = values[cpu].running;
647 }
648 break;
649 case BPERF_FILTER_CPU:
650 cpu = evsel->core.cpus->map[i].cpu;
651 perf_counts(evsel->counts, i, 0)->val = values[cpu].counter;
652 perf_counts(evsel->counts, i, 0)->ena = values[cpu].enabled;
653 perf_counts(evsel->counts, i, 0)->run = values[cpu].running;
654 break;
655 case BPERF_FILTER_PID:
656 case BPERF_FILTER_TGID:
657 perf_counts(evsel->counts, 0, i)->val = 0;
658 perf_counts(evsel->counts, 0, i)->ena = 0;
659 perf_counts(evsel->counts, 0, i)->run = 0;
660
661 for (cpu = 0; cpu < num_cpu_bpf; cpu++) {
662 perf_counts(evsel->counts, 0, i)->val += values[cpu].counter;
663 perf_counts(evsel->counts, 0, i)->ena += values[cpu].enabled;
664 perf_counts(evsel->counts, 0, i)->run += values[cpu].running;
665 }
666 break;
667 default:
668 break;
669 }
670 }
671out:
672 return err;
673}
674
675static int bperf__destroy(struct evsel *evsel)
676{
677 bperf_follower_bpf__destroy(evsel->follower_skel);
678 close(evsel->bperf_leader_prog_fd);
679 close(evsel->bperf_leader_link_fd);
680 return 0;
681}
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758struct bpf_counter_ops bperf_ops = {
759 .load = bperf__load,
760 .enable = bperf__enable,
761 .disable = bperf__disable,
762 .read = bperf__read,
763 .install_pe = bperf__install_pe,
764 .destroy = bperf__destroy,
765};
766
767extern struct bpf_counter_ops bperf_cgrp_ops;
768
769static inline bool bpf_counter_skip(struct evsel *evsel)
770{
771 return list_empty(&evsel->bpf_counter_list) &&
772 evsel->follower_skel == NULL;
773}
774
775int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
776{
777 if (bpf_counter_skip(evsel))
778 return 0;
779 return evsel->bpf_counter_ops->install_pe(evsel, cpu_map_idx, fd);
780}
781
782int bpf_counter__load(struct evsel *evsel, struct target *target)
783{
784 if (target->bpf_str)
785 evsel->bpf_counter_ops = &bpf_program_profiler_ops;
786 else if (cgrp_event_expanded && target->use_bpf)
787 evsel->bpf_counter_ops = &bperf_cgrp_ops;
788 else if (target->use_bpf || evsel->bpf_counter ||
789 evsel__match_bpf_counter_events(evsel->name))
790 evsel->bpf_counter_ops = &bperf_ops;
791
792 if (evsel->bpf_counter_ops)
793 return evsel->bpf_counter_ops->load(evsel, target);
794 return 0;
795}
796
797int bpf_counter__enable(struct evsel *evsel)
798{
799 if (bpf_counter_skip(evsel))
800 return 0;
801 return evsel->bpf_counter_ops->enable(evsel);
802}
803
804int bpf_counter__disable(struct evsel *evsel)
805{
806 if (bpf_counter_skip(evsel))
807 return 0;
808 return evsel->bpf_counter_ops->disable(evsel);
809}
810
811int bpf_counter__read(struct evsel *evsel)
812{
813 if (bpf_counter_skip(evsel))
814 return -EAGAIN;
815 return evsel->bpf_counter_ops->read(evsel);
816}
817
818void bpf_counter__destroy(struct evsel *evsel)
819{
820 if (bpf_counter_skip(evsel))
821 return;
822 evsel->bpf_counter_ops->destroy(evsel);
823 evsel->bpf_counter_ops = NULL;
824}
825