1
2
3
4
5#include <assert.h>
6#include <limits.h>
7#include <unistd.h>
8#include <sys/file.h>
9#include <sys/time.h>
10#include <linux/err.h>
11#include <linux/zalloc.h>
12#include <api/fs/fs.h>
13#include <perf/bpf_perf.h>
14
15#include "bpf_counter.h"
16#include "counts.h"
17#include "debug.h"
18#include "evsel.h"
19#include "evlist.h"
20#include "target.h"
21#include "cgroup.h"
22#include "cpumap.h"
23#include "thread_map.h"
24
25#include "bpf_skel/bpf_prog_profiler.skel.h"
26#include "bpf_skel/bperf_u.h"
27#include "bpf_skel/bperf_leader.skel.h"
28#include "bpf_skel/bperf_follower.skel.h"
29
30#define ATTR_MAP_SIZE 16
31
32static inline void *u64_to_ptr(__u64 ptr)
33{
34 return (void *)(unsigned long)ptr;
35}
36
37static struct bpf_counter *bpf_counter_alloc(void)
38{
39 struct bpf_counter *counter;
40
41 counter = zalloc(sizeof(*counter));
42 if (counter)
43 INIT_LIST_HEAD(&counter->list);
44 return counter;
45}
46
47static int bpf_program_profiler__destroy(struct evsel *evsel)
48{
49 struct bpf_counter *counter, *tmp;
50
51 list_for_each_entry_safe(counter, tmp,
52 &evsel->bpf_counter_list, list) {
53 list_del_init(&counter->list);
54 bpf_prog_profiler_bpf__destroy(counter->skel);
55 free(counter);
56 }
57 assert(list_empty(&evsel->bpf_counter_list));
58
59 return 0;
60}
61
62static char *bpf_target_prog_name(int tgt_fd)
63{
64 struct bpf_prog_info_linear *info_linear;
65 struct bpf_func_info *func_info;
66 const struct btf_type *t;
67 struct btf *btf = NULL;
68 char *name = NULL;
69
70 info_linear = bpf_program__get_prog_info_linear(
71 tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
72 if (IS_ERR_OR_NULL(info_linear)) {
73 pr_debug("failed to get info_linear for prog FD %d\n", tgt_fd);
74 return NULL;
75 }
76
77 if (info_linear->info.btf_id == 0) {
78 pr_debug("prog FD %d doesn't have valid btf\n", tgt_fd);
79 goto out;
80 }
81
82 btf = btf__load_from_kernel_by_id(info_linear->info.btf_id);
83 if (libbpf_get_error(btf)) {
84 pr_debug("failed to load btf for prog FD %d\n", tgt_fd);
85 goto out;
86 }
87
88 func_info = u64_to_ptr(info_linear->info.func_info);
89 t = btf__type_by_id(btf, func_info[0].type_id);
90 if (!t) {
91 pr_debug("btf %d doesn't have type %d\n",
92 info_linear->info.btf_id, func_info[0].type_id);
93 goto out;
94 }
95 name = strdup(btf__name_by_offset(btf, t->name_off));
96out:
97 btf__free(btf);
98 free(info_linear);
99 return name;
100}
101
102static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
103{
104 struct bpf_prog_profiler_bpf *skel;
105 struct bpf_counter *counter;
106 struct bpf_program *prog;
107 char *prog_name;
108 int prog_fd;
109 int err;
110
111 prog_fd = bpf_prog_get_fd_by_id(prog_id);
112 if (prog_fd < 0) {
113 pr_err("Failed to open fd for bpf prog %u\n", prog_id);
114 return -1;
115 }
116 counter = bpf_counter_alloc();
117 if (!counter) {
118 close(prog_fd);
119 return -1;
120 }
121
122 skel = bpf_prog_profiler_bpf__open();
123 if (!skel) {
124 pr_err("Failed to open bpf skeleton\n");
125 goto err_out;
126 }
127
128 skel->rodata->num_cpu = evsel__nr_cpus(evsel);
129
130 bpf_map__resize(skel->maps.events, evsel__nr_cpus(evsel));
131 bpf_map__resize(skel->maps.fentry_readings, 1);
132 bpf_map__resize(skel->maps.accum_readings, 1);
133
134 prog_name = bpf_target_prog_name(prog_fd);
135 if (!prog_name) {
136 pr_err("Failed to get program name for bpf prog %u. Does it have BTF?\n", prog_id);
137 goto err_out;
138 }
139
140 bpf_object__for_each_program(prog, skel->obj) {
141 err = bpf_program__set_attach_target(prog, prog_fd, prog_name);
142 if (err) {
143 pr_err("bpf_program__set_attach_target failed.\n"
144 "Does bpf prog %u have BTF?\n", prog_id);
145 goto err_out;
146 }
147 }
148 set_max_rlimit();
149 err = bpf_prog_profiler_bpf__load(skel);
150 if (err) {
151 pr_err("bpf_prog_profiler_bpf__load failed\n");
152 goto err_out;
153 }
154
155 assert(skel != NULL);
156 counter->skel = skel;
157 list_add(&counter->list, &evsel->bpf_counter_list);
158 close(prog_fd);
159 return 0;
160err_out:
161 bpf_prog_profiler_bpf__destroy(skel);
162 free(counter);
163 close(prog_fd);
164 return -1;
165}
166
167static int bpf_program_profiler__load(struct evsel *evsel, struct target *target)
168{
169 char *bpf_str, *bpf_str_, *tok, *saveptr = NULL, *p;
170 u32 prog_id;
171 int ret;
172
173 bpf_str_ = bpf_str = strdup(target->bpf_str);
174 if (!bpf_str)
175 return -1;
176
177 while ((tok = strtok_r(bpf_str, ",", &saveptr)) != NULL) {
178 prog_id = strtoul(tok, &p, 10);
179 if (prog_id == 0 || prog_id == UINT_MAX ||
180 (*p != '\0' && *p != ',')) {
181 pr_err("Failed to parse bpf prog ids %s\n",
182 target->bpf_str);
183 return -1;
184 }
185
186 ret = bpf_program_profiler_load_one(evsel, prog_id);
187 if (ret) {
188 bpf_program_profiler__destroy(evsel);
189 free(bpf_str_);
190 return -1;
191 }
192 bpf_str = NULL;
193 }
194 free(bpf_str_);
195 return 0;
196}
197
198static int bpf_program_profiler__enable(struct evsel *evsel)
199{
200 struct bpf_counter *counter;
201 int ret;
202
203 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
204 assert(counter->skel != NULL);
205 ret = bpf_prog_profiler_bpf__attach(counter->skel);
206 if (ret) {
207 bpf_program_profiler__destroy(evsel);
208 return ret;
209 }
210 }
211 return 0;
212}
213
214static int bpf_program_profiler__disable(struct evsel *evsel)
215{
216 struct bpf_counter *counter;
217
218 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
219 assert(counter->skel != NULL);
220 bpf_prog_profiler_bpf__detach(counter->skel);
221 }
222 return 0;
223}
224
225static int bpf_program_profiler__read(struct evsel *evsel)
226{
227
228 int num_cpu = evsel__nr_cpus(evsel);
229
230
231
232 int num_cpu_bpf = libbpf_num_possible_cpus();
233 struct bpf_perf_event_value values[num_cpu_bpf];
234 struct bpf_counter *counter;
235 int reading_map_fd;
236 __u32 key = 0;
237 int err, cpu;
238
239 if (list_empty(&evsel->bpf_counter_list))
240 return -EAGAIN;
241
242 for (cpu = 0; cpu < num_cpu; cpu++) {
243 perf_counts(evsel->counts, cpu, 0)->val = 0;
244 perf_counts(evsel->counts, cpu, 0)->ena = 0;
245 perf_counts(evsel->counts, cpu, 0)->run = 0;
246 }
247 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
248 struct bpf_prog_profiler_bpf *skel = counter->skel;
249
250 assert(skel != NULL);
251 reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
252
253 err = bpf_map_lookup_elem(reading_map_fd, &key, values);
254 if (err) {
255 pr_err("failed to read value\n");
256 return err;
257 }
258
259 for (cpu = 0; cpu < num_cpu; cpu++) {
260 perf_counts(evsel->counts, cpu, 0)->val += values[cpu].counter;
261 perf_counts(evsel->counts, cpu, 0)->ena += values[cpu].enabled;
262 perf_counts(evsel->counts, cpu, 0)->run += values[cpu].running;
263 }
264 }
265 return 0;
266}
267
268static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu,
269 int fd)
270{
271 struct bpf_prog_profiler_bpf *skel;
272 struct bpf_counter *counter;
273 int ret;
274
275 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
276 skel = counter->skel;
277 assert(skel != NULL);
278
279 ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events),
280 &cpu, &fd, BPF_ANY);
281 if (ret)
282 return ret;
283 }
284 return 0;
285}
286
287struct bpf_counter_ops bpf_program_profiler_ops = {
288 .load = bpf_program_profiler__load,
289 .enable = bpf_program_profiler__enable,
290 .disable = bpf_program_profiler__disable,
291 .read = bpf_program_profiler__read,
292 .destroy = bpf_program_profiler__destroy,
293 .install_pe = bpf_program_profiler__install_pe,
294};
295
296static bool bperf_attr_map_compatible(int attr_map_fd)
297{
298 struct bpf_map_info map_info = {0};
299 __u32 map_info_len = sizeof(map_info);
300 int err;
301
302 err = bpf_obj_get_info_by_fd(attr_map_fd, &map_info, &map_info_len);
303
304 if (err)
305 return false;
306 return (map_info.key_size == sizeof(struct perf_event_attr)) &&
307 (map_info.value_size == sizeof(struct perf_event_attr_map_entry));
308}
309
310static int bperf_lock_attr_map(struct target *target)
311{
312 char path[PATH_MAX];
313 int map_fd, err;
314
315 if (target->attr_map) {
316 scnprintf(path, PATH_MAX, "%s", target->attr_map);
317 } else {
318 scnprintf(path, PATH_MAX, "%s/fs/bpf/%s", sysfs__mountpoint(),
319 BPF_PERF_DEFAULT_ATTR_MAP_PATH);
320 }
321
322 if (access(path, F_OK)) {
323 map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
324 sizeof(struct perf_event_attr),
325 sizeof(struct perf_event_attr_map_entry),
326 ATTR_MAP_SIZE, 0);
327 if (map_fd < 0)
328 return -1;
329
330 err = bpf_obj_pin(map_fd, path);
331 if (err) {
332
333 close(map_fd);
334 map_fd = bpf_obj_get(path);
335 if (map_fd < 0)
336 return -1;
337 }
338 } else {
339 map_fd = bpf_obj_get(path);
340 if (map_fd < 0)
341 return -1;
342 }
343
344 if (!bperf_attr_map_compatible(map_fd)) {
345 close(map_fd);
346 return -1;
347
348 }
349 err = flock(map_fd, LOCK_EX);
350 if (err) {
351 close(map_fd);
352 return -1;
353 }
354 return map_fd;
355}
356
357static int bperf_check_target(struct evsel *evsel,
358 struct target *target,
359 enum bperf_filter_type *filter_type,
360 __u32 *filter_entry_cnt)
361{
362 if (evsel->core.leader->nr_members > 1) {
363 pr_err("bpf managed perf events do not yet support groups.\n");
364 return -1;
365 }
366
367
368 if (target->system_wide) {
369 *filter_type = BPERF_FILTER_GLOBAL;
370 *filter_entry_cnt = 1;
371 } else if (target->cpu_list) {
372 *filter_type = BPERF_FILTER_CPU;
373 *filter_entry_cnt = perf_cpu_map__nr(evsel__cpus(evsel));
374 } else if (target->tid) {
375 *filter_type = BPERF_FILTER_PID;
376 *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
377 } else if (target->pid || evsel->evlist->workload.pid != -1) {
378 *filter_type = BPERF_FILTER_TGID;
379 *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
380 } else {
381 pr_err("bpf managed perf events do not yet support these targets.\n");
382 return -1;
383 }
384
385 return 0;
386}
387
388static struct perf_cpu_map *all_cpu_map;
389
390static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
391 struct perf_event_attr_map_entry *entry)
392{
393 struct bperf_leader_bpf *skel = bperf_leader_bpf__open();
394 int link_fd, diff_map_fd, err;
395 struct bpf_link *link = NULL;
396
397 if (!skel) {
398 pr_err("Failed to open leader skeleton\n");
399 return -1;
400 }
401
402 bpf_map__resize(skel->maps.events, libbpf_num_possible_cpus());
403 err = bperf_leader_bpf__load(skel);
404 if (err) {
405 pr_err("Failed to load leader skeleton\n");
406 goto out;
407 }
408
409 link = bpf_program__attach(skel->progs.on_switch);
410 if (IS_ERR(link)) {
411 pr_err("Failed to attach leader program\n");
412 err = PTR_ERR(link);
413 goto out;
414 }
415
416 link_fd = bpf_link__fd(link);
417 diff_map_fd = bpf_map__fd(skel->maps.diff_readings);
418 entry->link_id = bpf_link_get_id(link_fd);
419 entry->diff_map_id = bpf_map_get_id(diff_map_fd);
420 err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, entry, BPF_ANY);
421 assert(err == 0);
422
423 evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry->link_id);
424 assert(evsel->bperf_leader_link_fd >= 0);
425
426
427
428
429
430 evsel->leader_skel = skel;
431 evsel__open_per_cpu(evsel, all_cpu_map, -1);
432
433out:
434 bperf_leader_bpf__destroy(skel);
435 bpf_link__destroy(link);
436 return err;
437}
438
439static int bperf__load(struct evsel *evsel, struct target *target)
440{
441 struct perf_event_attr_map_entry entry = {0xffffffff, 0xffffffff};
442 int attr_map_fd, diff_map_fd = -1, err;
443 enum bperf_filter_type filter_type;
444 __u32 filter_entry_cnt, i;
445
446 if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt))
447 return -1;
448
449 if (!all_cpu_map) {
450 all_cpu_map = perf_cpu_map__new(NULL);
451 if (!all_cpu_map)
452 return -1;
453 }
454
455 evsel->bperf_leader_prog_fd = -1;
456 evsel->bperf_leader_link_fd = -1;
457
458
459
460
461
462
463
464 attr_map_fd = bperf_lock_attr_map(target);
465 if (attr_map_fd < 0) {
466 pr_err("Failed to lock perf_event_attr map\n");
467 return -1;
468 }
469
470 err = bpf_map_lookup_elem(attr_map_fd, &evsel->core.attr, &entry);
471 if (err) {
472 err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, &entry, BPF_ANY);
473 if (err)
474 goto out;
475 }
476
477 evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
478 if (evsel->bperf_leader_link_fd < 0 &&
479 bperf_reload_leader_program(evsel, attr_map_fd, &entry)) {
480 err = -1;
481 goto out;
482 }
483
484
485
486
487
488 evsel->bperf_leader_prog_fd = bpf_prog_get_fd_by_id(
489 bpf_link_get_prog_id(evsel->bperf_leader_link_fd));
490 assert(evsel->bperf_leader_prog_fd >= 0);
491
492 diff_map_fd = bpf_map_get_fd_by_id(entry.diff_map_id);
493 assert(diff_map_fd >= 0);
494
495
496
497
498
499 err = bperf_trigger_reading(evsel->bperf_leader_prog_fd, 0);
500 if (err) {
501 pr_err("The kernel does not support test_run for raw_tp BPF programs.\n"
502 "Therefore, --use-bpf might show inaccurate readings\n");
503 goto out;
504 }
505
506
507 evsel->follower_skel = bperf_follower_bpf__open();
508 if (!evsel->follower_skel) {
509 err = -1;
510 pr_err("Failed to open follower skeleton\n");
511 goto out;
512 }
513
514
515 bpf_program__set_attach_target(evsel->follower_skel->progs.fexit_XXX,
516 evsel->bperf_leader_prog_fd, "on_switch");
517
518
519 bpf_map__reuse_fd(evsel->follower_skel->maps.diff_readings, diff_map_fd);
520
521
522 bpf_map__set_max_entries(evsel->follower_skel->maps.accum_readings,
523 filter_entry_cnt);
524
525 bpf_map__set_max_entries(evsel->follower_skel->maps.filter,
526 filter_entry_cnt);
527 err = bperf_follower_bpf__load(evsel->follower_skel);
528 if (err) {
529 pr_err("Failed to load follower skeleton\n");
530 bperf_follower_bpf__destroy(evsel->follower_skel);
531 evsel->follower_skel = NULL;
532 goto out;
533 }
534
535 for (i = 0; i < filter_entry_cnt; i++) {
536 int filter_map_fd;
537 __u32 key;
538
539 if (filter_type == BPERF_FILTER_PID ||
540 filter_type == BPERF_FILTER_TGID)
541 key = evsel->core.threads->map[i].pid;
542 else if (filter_type == BPERF_FILTER_CPU)
543 key = evsel->core.cpus->map[i];
544 else
545 break;
546
547 filter_map_fd = bpf_map__fd(evsel->follower_skel->maps.filter);
548 bpf_map_update_elem(filter_map_fd, &key, &i, BPF_ANY);
549 }
550
551 evsel->follower_skel->bss->type = filter_type;
552
553 err = bperf_follower_bpf__attach(evsel->follower_skel);
554
555out:
556 if (err && evsel->bperf_leader_link_fd >= 0)
557 close(evsel->bperf_leader_link_fd);
558 if (err && evsel->bperf_leader_prog_fd >= 0)
559 close(evsel->bperf_leader_prog_fd);
560 if (diff_map_fd >= 0)
561 close(diff_map_fd);
562
563 flock(attr_map_fd, LOCK_UN);
564 close(attr_map_fd);
565
566 return err;
567}
568
569static int bperf__install_pe(struct evsel *evsel, int cpu, int fd)
570{
571 struct bperf_leader_bpf *skel = evsel->leader_skel;
572
573 return bpf_map_update_elem(bpf_map__fd(skel->maps.events),
574 &cpu, &fd, BPF_ANY);
575}
576
577
578
579
580
581static int bperf_sync_counters(struct evsel *evsel)
582{
583 int num_cpu, i, cpu;
584
585 num_cpu = all_cpu_map->nr;
586 for (i = 0; i < num_cpu; i++) {
587 cpu = all_cpu_map->map[i];
588 bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu);
589 }
590 return 0;
591}
592
593static int bperf__enable(struct evsel *evsel)
594{
595 evsel->follower_skel->bss->enabled = 1;
596 return 0;
597}
598
599static int bperf__disable(struct evsel *evsel)
600{
601 evsel->follower_skel->bss->enabled = 0;
602 return 0;
603}
604
605static int bperf__read(struct evsel *evsel)
606{
607 struct bperf_follower_bpf *skel = evsel->follower_skel;
608 __u32 num_cpu_bpf = cpu__max_cpu();
609 struct bpf_perf_event_value values[num_cpu_bpf];
610 int reading_map_fd, err = 0;
611 __u32 i, j, num_cpu;
612
613 bperf_sync_counters(evsel);
614 reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
615
616 for (i = 0; i < bpf_map__max_entries(skel->maps.accum_readings); i++) {
617 __u32 cpu;
618
619 err = bpf_map_lookup_elem(reading_map_fd, &i, values);
620 if (err)
621 goto out;
622 switch (evsel->follower_skel->bss->type) {
623 case BPERF_FILTER_GLOBAL:
624 assert(i == 0);
625
626 num_cpu = all_cpu_map->nr;
627 for (j = 0; j < num_cpu; j++) {
628 cpu = all_cpu_map->map[j];
629 perf_counts(evsel->counts, cpu, 0)->val = values[cpu].counter;
630 perf_counts(evsel->counts, cpu, 0)->ena = values[cpu].enabled;
631 perf_counts(evsel->counts, cpu, 0)->run = values[cpu].running;
632 }
633 break;
634 case BPERF_FILTER_CPU:
635 cpu = evsel->core.cpus->map[i];
636 perf_counts(evsel->counts, i, 0)->val = values[cpu].counter;
637 perf_counts(evsel->counts, i, 0)->ena = values[cpu].enabled;
638 perf_counts(evsel->counts, i, 0)->run = values[cpu].running;
639 break;
640 case BPERF_FILTER_PID:
641 case BPERF_FILTER_TGID:
642 perf_counts(evsel->counts, 0, i)->val = 0;
643 perf_counts(evsel->counts, 0, i)->ena = 0;
644 perf_counts(evsel->counts, 0, i)->run = 0;
645
646 for (cpu = 0; cpu < num_cpu_bpf; cpu++) {
647 perf_counts(evsel->counts, 0, i)->val += values[cpu].counter;
648 perf_counts(evsel->counts, 0, i)->ena += values[cpu].enabled;
649 perf_counts(evsel->counts, 0, i)->run += values[cpu].running;
650 }
651 break;
652 default:
653 break;
654 }
655 }
656out:
657 return err;
658}
659
660static int bperf__destroy(struct evsel *evsel)
661{
662 bperf_follower_bpf__destroy(evsel->follower_skel);
663 close(evsel->bperf_leader_prog_fd);
664 close(evsel->bperf_leader_link_fd);
665 return 0;
666}
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743struct bpf_counter_ops bperf_ops = {
744 .load = bperf__load,
745 .enable = bperf__enable,
746 .disable = bperf__disable,
747 .read = bperf__read,
748 .install_pe = bperf__install_pe,
749 .destroy = bperf__destroy,
750};
751
752extern struct bpf_counter_ops bperf_cgrp_ops;
753
754static inline bool bpf_counter_skip(struct evsel *evsel)
755{
756 return list_empty(&evsel->bpf_counter_list) &&
757 evsel->follower_skel == NULL;
758}
759
760int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd)
761{
762 if (bpf_counter_skip(evsel))
763 return 0;
764 return evsel->bpf_counter_ops->install_pe(evsel, cpu, fd);
765}
766
767int bpf_counter__load(struct evsel *evsel, struct target *target)
768{
769 if (target->bpf_str)
770 evsel->bpf_counter_ops = &bpf_program_profiler_ops;
771 else if (cgrp_event_expanded && target->use_bpf)
772 evsel->bpf_counter_ops = &bperf_cgrp_ops;
773 else if (target->use_bpf || evsel->bpf_counter ||
774 evsel__match_bpf_counter_events(evsel->name))
775 evsel->bpf_counter_ops = &bperf_ops;
776
777 if (evsel->bpf_counter_ops)
778 return evsel->bpf_counter_ops->load(evsel, target);
779 return 0;
780}
781
782int bpf_counter__enable(struct evsel *evsel)
783{
784 if (bpf_counter_skip(evsel))
785 return 0;
786 return evsel->bpf_counter_ops->enable(evsel);
787}
788
789int bpf_counter__disable(struct evsel *evsel)
790{
791 if (bpf_counter_skip(evsel))
792 return 0;
793 return evsel->bpf_counter_ops->disable(evsel);
794}
795
796int bpf_counter__read(struct evsel *evsel)
797{
798 if (bpf_counter_skip(evsel))
799 return -EAGAIN;
800 return evsel->bpf_counter_ops->read(evsel);
801}
802
803void bpf_counter__destroy(struct evsel *evsel)
804{
805 if (bpf_counter_skip(evsel))
806 return;
807 evsel->bpf_counter_ops->destroy(evsel);
808 evsel->bpf_counter_ops = NULL;
809}
810