1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef _GNU_SOURCE
14#define _GNU_SOURCE
15#endif
16#include <stdlib.h>
17#include <stdio.h>
18#include <stdarg.h>
19#include <libgen.h>
20#include <inttypes.h>
21#include <limits.h>
22#include <string.h>
23#include <unistd.h>
24#include <endian.h>
25#include <fcntl.h>
26#include <errno.h>
27#include <ctype.h>
28#include <asm/unistd.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
31#include <linux/bpf.h>
32#include <linux/btf.h>
33#include <linux/filter.h>
34#include <linux/list.h>
35#include <linux/limits.h>
36#include <linux/perf_event.h>
37#include <linux/ring_buffer.h>
38#include <linux/version.h>
39#include <sys/epoll.h>
40#include <sys/ioctl.h>
41#include <sys/mman.h>
42#include <sys/stat.h>
43#include <sys/types.h>
44#include <sys/vfs.h>
45#include <sys/utsname.h>
46#include <sys/resource.h>
47#include <libelf.h>
48#include <gelf.h>
49#include <zlib.h>
50
51#include "libbpf.h"
52#include "bpf.h"
53#include "btf.h"
54#include "str_error.h"
55#include "libbpf_internal.h"
56#include "hashmap.h"
57#include "bpf_gen_internal.h"
58
59#ifndef BPF_FS_MAGIC
60#define BPF_FS_MAGIC 0xcafe4a11
61#endif
62
63#define BPF_INSN_SZ (sizeof(struct bpf_insn))
64
65
66
67
68#pragma GCC diagnostic ignored "-Wformat-nonliteral"
69
70#define __printf(a, b) __attribute__((format(printf, a, b)))
71
72static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
73static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
74
75static int __base_pr(enum libbpf_print_level level, const char *format,
76 va_list args)
77{
78 if (level == LIBBPF_DEBUG)
79 return 0;
80
81 return vfprintf(stderr, format, args);
82}
83
84static libbpf_print_fn_t __libbpf_pr = __base_pr;
85
86libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
87{
88 libbpf_print_fn_t old_print_fn = __libbpf_pr;
89
90 __libbpf_pr = fn;
91 return old_print_fn;
92}
93
94__printf(2, 3)
95void libbpf_print(enum libbpf_print_level level, const char *format, ...)
96{
97 va_list args;
98
99 if (!__libbpf_pr)
100 return;
101
102 va_start(args, format);
103 __libbpf_pr(level, format, args);
104 va_end(args);
105}
106
107static void pr_perm_msg(int err)
108{
109 struct rlimit limit;
110 char buf[100];
111
112 if (err != -EPERM || geteuid() != 0)
113 return;
114
115 err = getrlimit(RLIMIT_MEMLOCK, &limit);
116 if (err)
117 return;
118
119 if (limit.rlim_cur == RLIM_INFINITY)
120 return;
121
122 if (limit.rlim_cur < 1024)
123 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
124 else if (limit.rlim_cur < 1024*1024)
125 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
126 else
127 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
128
129 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
130 buf);
131}
132
133#define STRERR_BUFSIZE 128
134
135
136#ifndef zfree
137# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
138#endif
139
140#ifndef zclose
141# define zclose(fd) ({ \
142 int ___err = 0; \
143 if ((fd) >= 0) \
144 ___err = close((fd)); \
145 fd = -1; \
146 ___err; })
147#endif
148
149static inline __u64 ptr_to_u64(const void *ptr)
150{
151 return (__u64) (unsigned long) ptr;
152}
153
154
155enum libbpf_strict_mode libbpf_mode = LIBBPF_STRICT_NONE;
156
157int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
158{
159 libbpf_mode = mode;
160 return 0;
161}
162
163__u32 libbpf_major_version(void)
164{
165 return LIBBPF_MAJOR_VERSION;
166}
167
168__u32 libbpf_minor_version(void)
169{
170 return LIBBPF_MINOR_VERSION;
171}
172
173const char *libbpf_version_string(void)
174{
175#define __S(X) #X
176#define _S(X) __S(X)
177 return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
178#undef _S
179#undef __S
180}
181
182enum reloc_type {
183 RELO_LD64,
184 RELO_CALL,
185 RELO_DATA,
186 RELO_EXTERN_VAR,
187 RELO_EXTERN_FUNC,
188 RELO_SUBPROG_ADDR,
189 RELO_CORE,
190};
191
192struct reloc_desc {
193 enum reloc_type type;
194 int insn_idx;
195 union {
196 const struct bpf_core_relo *core_relo;
197 struct {
198 int map_idx;
199 int sym_off;
200 };
201 };
202};
203
204
205enum sec_def_flags {
206 SEC_NONE = 0,
207
208 SEC_EXP_ATTACH_OPT = 1,
209
210
211
212
213
214
215 SEC_ATTACHABLE = 2,
216 SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
217
218
219 SEC_ATTACH_BTF = 4,
220
221 SEC_SLEEPABLE = 8,
222
223 SEC_SLOPPY_PFX = 16,
224
225 SEC_XDP_FRAGS = 32,
226
227 SEC_DEPRECATED = 64,
228};
229
230struct bpf_sec_def {
231 char *sec;
232 enum bpf_prog_type prog_type;
233 enum bpf_attach_type expected_attach_type;
234 long cookie;
235 int handler_id;
236
237 libbpf_prog_setup_fn_t prog_setup_fn;
238 libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
239 libbpf_prog_attach_fn_t prog_attach_fn;
240};
241
242
243
244
245
246struct bpf_program {
247 const struct bpf_sec_def *sec_def;
248 char *sec_name;
249 size_t sec_idx;
250
251
252
253 size_t sec_insn_off;
254
255
256
257
258 size_t sec_insn_cnt;
259
260
261
262
263
264
265
266
267 size_t sub_insn_off;
268
269 char *name;
270
271
272
273 char *pin_name;
274
275
276
277
278
279
280 struct bpf_insn *insns;
281
282
283
284
285 size_t insns_cnt;
286
287 struct reloc_desc *reloc_desc;
288 int nr_reloc;
289
290
291 char *log_buf;
292 size_t log_size;
293 __u32 log_level;
294
295 struct {
296 int nr;
297 int *fds;
298 } instances;
299 bpf_program_prep_t preprocessor;
300
301 struct bpf_object *obj;
302 void *priv;
303 bpf_program_clear_priv_t clear_priv;
304
305 bool load;
306 bool mark_btf_static;
307 enum bpf_prog_type type;
308 enum bpf_attach_type expected_attach_type;
309 int prog_ifindex;
310 __u32 attach_btf_obj_fd;
311 __u32 attach_btf_id;
312 __u32 attach_prog_fd;
313 void *func_info;
314 __u32 func_info_rec_size;
315 __u32 func_info_cnt;
316
317 void *line_info;
318 __u32 line_info_rec_size;
319 __u32 line_info_cnt;
320 __u32 prog_flags;
321};
322
323struct bpf_struct_ops {
324 const char *tname;
325 const struct btf_type *type;
326 struct bpf_program **progs;
327 __u32 *kern_func_off;
328
329 void *data;
330
331
332
333
334
335
336
337
338
339
340 void *kern_vdata;
341 __u32 type_id;
342};
343
344#define DATA_SEC ".data"
345#define BSS_SEC ".bss"
346#define RODATA_SEC ".rodata"
347#define KCONFIG_SEC ".kconfig"
348#define KSYMS_SEC ".ksyms"
349#define STRUCT_OPS_SEC ".struct_ops"
350
351enum libbpf_map_type {
352 LIBBPF_MAP_UNSPEC,
353 LIBBPF_MAP_DATA,
354 LIBBPF_MAP_BSS,
355 LIBBPF_MAP_RODATA,
356 LIBBPF_MAP_KCONFIG,
357};
358
359struct bpf_map {
360 char *name;
361
362
363
364
365
366 char *real_name;
367 int fd;
368 int sec_idx;
369 size_t sec_offset;
370 int map_ifindex;
371 int inner_map_fd;
372 struct bpf_map_def def;
373 __u32 numa_node;
374 __u32 btf_var_idx;
375 __u32 btf_key_type_id;
376 __u32 btf_value_type_id;
377 __u32 btf_vmlinux_value_type_id;
378 void *priv;
379 bpf_map_clear_priv_t clear_priv;
380 enum libbpf_map_type libbpf_type;
381 void *mmaped;
382 struct bpf_struct_ops *st_ops;
383 struct bpf_map *inner_map;
384 void **init_slots;
385 int init_slots_sz;
386 char *pin_path;
387 bool pinned;
388 bool reused;
389 bool skipped;
390 __u64 map_extra;
391};
392
393enum extern_type {
394 EXT_UNKNOWN,
395 EXT_KCFG,
396 EXT_KSYM,
397};
398
399enum kcfg_type {
400 KCFG_UNKNOWN,
401 KCFG_CHAR,
402 KCFG_BOOL,
403 KCFG_INT,
404 KCFG_TRISTATE,
405 KCFG_CHAR_ARR,
406};
407
408struct extern_desc {
409 enum extern_type type;
410 int sym_idx;
411 int btf_id;
412 int sec_btf_id;
413 const char *name;
414 bool is_set;
415 bool is_weak;
416 union {
417 struct {
418 enum kcfg_type type;
419 int sz;
420 int align;
421 int data_off;
422 bool is_signed;
423 } kcfg;
424 struct {
425 unsigned long long addr;
426
427
428 int kernel_btf_obj_fd;
429 int kernel_btf_id;
430
431
432 __u32 type_id;
433
434
435
436
437 __s16 btf_fd_idx;
438 } ksym;
439 };
440};
441
442static LIST_HEAD(bpf_objects_list);
443
444struct module_btf {
445 struct btf *btf;
446 char *name;
447 __u32 id;
448 int fd;
449 int fd_array_idx;
450};
451
452enum sec_type {
453 SEC_UNUSED = 0,
454 SEC_RELO,
455 SEC_BSS,
456 SEC_DATA,
457 SEC_RODATA,
458};
459
460struct elf_sec_desc {
461 enum sec_type sec_type;
462 Elf64_Shdr *shdr;
463 Elf_Data *data;
464};
465
466struct elf_state {
467 int fd;
468 const void *obj_buf;
469 size_t obj_buf_sz;
470 Elf *elf;
471 Elf64_Ehdr *ehdr;
472 Elf_Data *symbols;
473 Elf_Data *st_ops_data;
474 size_t shstrndx;
475 size_t strtabidx;
476 struct elf_sec_desc *secs;
477 int sec_cnt;
478 int maps_shndx;
479 int btf_maps_shndx;
480 __u32 btf_maps_sec_btf_id;
481 int text_shndx;
482 int symbols_shndx;
483 int st_ops_shndx;
484};
485
486struct bpf_object {
487 char name[BPF_OBJ_NAME_LEN];
488 char license[64];
489 __u32 kern_version;
490
491 struct bpf_program *programs;
492 size_t nr_programs;
493 struct bpf_map *maps;
494 size_t nr_maps;
495 size_t maps_cap;
496
497 char *kconfig;
498 struct extern_desc *externs;
499 int nr_extern;
500 int kconfig_map_idx;
501
502 bool loaded;
503 bool has_subcalls;
504 bool has_rodata;
505
506 struct bpf_gen *gen_loader;
507
508
509 struct elf_state efile;
510
511
512
513
514
515 struct list_head list;
516
517 struct btf *btf;
518 struct btf_ext *btf_ext;
519
520
521
522
523 struct btf *btf_vmlinux;
524
525
526
527 char *btf_custom_path;
528
529 struct btf *btf_vmlinux_override;
530
531 struct module_btf *btf_modules;
532 bool btf_modules_loaded;
533 size_t btf_module_cnt;
534 size_t btf_module_cap;
535
536
537 char *log_buf;
538 size_t log_size;
539 __u32 log_level;
540
541 void *priv;
542 bpf_object_clear_priv_t clear_priv;
543
544 int *fd_array;
545 size_t fd_array_cap;
546 size_t fd_array_cnt;
547
548 char path[];
549};
550
551static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
552static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
553static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
554static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
555static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
556static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
557static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
558static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
559static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
560
561void bpf_program__unload(struct bpf_program *prog)
562{
563 int i;
564
565 if (!prog)
566 return;
567
568
569
570
571
572 if (prog->instances.nr > 0) {
573 for (i = 0; i < prog->instances.nr; i++)
574 zclose(prog->instances.fds[i]);
575 } else if (prog->instances.nr != -1) {
576 pr_warn("Internal error: instances.nr is %d\n",
577 prog->instances.nr);
578 }
579
580 prog->instances.nr = -1;
581 zfree(&prog->instances.fds);
582
583 zfree(&prog->func_info);
584 zfree(&prog->line_info);
585}
586
587static void bpf_program__exit(struct bpf_program *prog)
588{
589 if (!prog)
590 return;
591
592 if (prog->clear_priv)
593 prog->clear_priv(prog, prog->priv);
594
595 prog->priv = NULL;
596 prog->clear_priv = NULL;
597
598 bpf_program__unload(prog);
599 zfree(&prog->name);
600 zfree(&prog->sec_name);
601 zfree(&prog->pin_name);
602 zfree(&prog->insns);
603 zfree(&prog->reloc_desc);
604
605 prog->nr_reloc = 0;
606 prog->insns_cnt = 0;
607 prog->sec_idx = -1;
608}
609
610static char *__bpf_program__pin_name(struct bpf_program *prog)
611{
612 char *name, *p;
613
614 if (libbpf_mode & LIBBPF_STRICT_SEC_NAME)
615 name = strdup(prog->name);
616 else
617 name = strdup(prog->sec_name);
618
619 if (!name)
620 return NULL;
621
622 p = name;
623
624 while ((p = strchr(p, '/')))
625 *p = '_';
626
627 return name;
628}
629
630static bool insn_is_subprog_call(const struct bpf_insn *insn)
631{
632 return BPF_CLASS(insn->code) == BPF_JMP &&
633 BPF_OP(insn->code) == BPF_CALL &&
634 BPF_SRC(insn->code) == BPF_K &&
635 insn->src_reg == BPF_PSEUDO_CALL &&
636 insn->dst_reg == 0 &&
637 insn->off == 0;
638}
639
640static bool is_call_insn(const struct bpf_insn *insn)
641{
642 return insn->code == (BPF_JMP | BPF_CALL);
643}
644
645static bool insn_is_pseudo_func(struct bpf_insn *insn)
646{
647 return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
648}
649
650static int
651bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
652 const char *name, size_t sec_idx, const char *sec_name,
653 size_t sec_off, void *insn_data, size_t insn_data_sz)
654{
655 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
656 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
657 sec_name, name, sec_off, insn_data_sz);
658 return -EINVAL;
659 }
660
661 memset(prog, 0, sizeof(*prog));
662 prog->obj = obj;
663
664 prog->sec_idx = sec_idx;
665 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
666 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
667
668 prog->insns_cnt = prog->sec_insn_cnt;
669
670 prog->type = BPF_PROG_TYPE_UNSPEC;
671 prog->load = true;
672
673 prog->instances.fds = NULL;
674 prog->instances.nr = -1;
675
676
677 prog->log_level = obj->log_level;
678
679 prog->sec_name = strdup(sec_name);
680 if (!prog->sec_name)
681 goto errout;
682
683 prog->name = strdup(name);
684 if (!prog->name)
685 goto errout;
686
687 prog->pin_name = __bpf_program__pin_name(prog);
688 if (!prog->pin_name)
689 goto errout;
690
691 prog->insns = malloc(insn_data_sz);
692 if (!prog->insns)
693 goto errout;
694 memcpy(prog->insns, insn_data, insn_data_sz);
695
696 return 0;
697errout:
698 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
699 bpf_program__exit(prog);
700 return -ENOMEM;
701}
702
703static int
704bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
705 const char *sec_name, int sec_idx)
706{
707 Elf_Data *symbols = obj->efile.symbols;
708 struct bpf_program *prog, *progs;
709 void *data = sec_data->d_buf;
710 size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
711 int nr_progs, err, i;
712 const char *name;
713 Elf64_Sym *sym;
714
715 progs = obj->programs;
716 nr_progs = obj->nr_programs;
717 nr_syms = symbols->d_size / sizeof(Elf64_Sym);
718 sec_off = 0;
719
720 for (i = 0; i < nr_syms; i++) {
721 sym = elf_sym_by_idx(obj, i);
722
723 if (sym->st_shndx != sec_idx)
724 continue;
725 if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
726 continue;
727
728 prog_sz = sym->st_size;
729 sec_off = sym->st_value;
730
731 name = elf_sym_str(obj, sym->st_name);
732 if (!name) {
733 pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
734 sec_name, sec_off);
735 return -LIBBPF_ERRNO__FORMAT;
736 }
737
738 if (sec_off + prog_sz > sec_sz) {
739 pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
740 sec_name, sec_off);
741 return -LIBBPF_ERRNO__FORMAT;
742 }
743
744 if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
745 pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
746 return -ENOTSUP;
747 }
748
749 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
750 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
751
752 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
753 if (!progs) {
754
755
756
757
758
759 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
760 sec_name, name);
761 return -ENOMEM;
762 }
763 obj->programs = progs;
764
765 prog = &progs[nr_progs];
766
767 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
768 sec_off, data + sec_off, prog_sz);
769 if (err)
770 return err;
771
772
773
774
775
776
777 if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL
778 && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
779 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
780 prog->mark_btf_static = true;
781
782 nr_progs++;
783 obj->nr_programs = nr_progs;
784 }
785
786 return 0;
787}
788
789__u32 get_kernel_version(void)
790{
791
792
793
794
795
796
797
798
799
800
801 const char *ubuntu_kver_file = "/proc/version_signature";
802 __u32 major, minor, patch;
803 struct utsname info;
804
805 if (access(ubuntu_kver_file, R_OK) == 0) {
806 FILE *f;
807
808 f = fopen(ubuntu_kver_file, "r");
809 if (f) {
810 if (fscanf(f, "%*s %*s %d.%d.%d\n", &major, &minor, &patch) == 3) {
811 fclose(f);
812 return KERNEL_VERSION(major, minor, patch);
813 }
814 fclose(f);
815 }
816
817 }
818
819 uname(&info);
820 if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
821 return 0;
822 return KERNEL_VERSION(major, minor, patch);
823}
824
825static const struct btf_member *
826find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
827{
828 struct btf_member *m;
829 int i;
830
831 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
832 if (btf_member_bit_offset(t, i) == bit_offset)
833 return m;
834 }
835
836 return NULL;
837}
838
839static const struct btf_member *
840find_member_by_name(const struct btf *btf, const struct btf_type *t,
841 const char *name)
842{
843 struct btf_member *m;
844 int i;
845
846 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
847 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
848 return m;
849 }
850
851 return NULL;
852}
853
854#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
855static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
856 const char *name, __u32 kind);
857
858static int
859find_struct_ops_kern_types(const struct btf *btf, const char *tname,
860 const struct btf_type **type, __u32 *type_id,
861 const struct btf_type **vtype, __u32 *vtype_id,
862 const struct btf_member **data_member)
863{
864 const struct btf_type *kern_type, *kern_vtype;
865 const struct btf_member *kern_data_member;
866 __s32 kern_vtype_id, kern_type_id;
867 __u32 i;
868
869 kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
870 if (kern_type_id < 0) {
871 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
872 tname);
873 return kern_type_id;
874 }
875 kern_type = btf__type_by_id(btf, kern_type_id);
876
877
878
879
880
881
882 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
883 tname, BTF_KIND_STRUCT);
884 if (kern_vtype_id < 0) {
885 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
886 STRUCT_OPS_VALUE_PREFIX, tname);
887 return kern_vtype_id;
888 }
889 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
890
891
892
893
894
895
896
897 kern_data_member = btf_members(kern_vtype);
898 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
899 if (kern_data_member->type == kern_type_id)
900 break;
901 }
902 if (i == btf_vlen(kern_vtype)) {
903 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
904 tname, STRUCT_OPS_VALUE_PREFIX, tname);
905 return -EINVAL;
906 }
907
908 *type = kern_type;
909 *type_id = kern_type_id;
910 *vtype = kern_vtype;
911 *vtype_id = kern_vtype_id;
912 *data_member = kern_data_member;
913
914 return 0;
915}
916
917static bool bpf_map__is_struct_ops(const struct bpf_map *map)
918{
919 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
920}
921
922
923static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
924 const struct btf *btf,
925 const struct btf *kern_btf)
926{
927 const struct btf_member *member, *kern_member, *kern_data_member;
928 const struct btf_type *type, *kern_type, *kern_vtype;
929 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
930 struct bpf_struct_ops *st_ops;
931 void *data, *kern_data;
932 const char *tname;
933 int err;
934
935 st_ops = map->st_ops;
936 type = st_ops->type;
937 tname = st_ops->tname;
938 err = find_struct_ops_kern_types(kern_btf, tname,
939 &kern_type, &kern_type_id,
940 &kern_vtype, &kern_vtype_id,
941 &kern_data_member);
942 if (err)
943 return err;
944
945 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
946 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
947
948 map->def.value_size = kern_vtype->size;
949 map->btf_vmlinux_value_type_id = kern_vtype_id;
950
951 st_ops->kern_vdata = calloc(1, kern_vtype->size);
952 if (!st_ops->kern_vdata)
953 return -ENOMEM;
954
955 data = st_ops->data;
956 kern_data_off = kern_data_member->offset / 8;
957 kern_data = st_ops->kern_vdata + kern_data_off;
958
959 member = btf_members(type);
960 for (i = 0; i < btf_vlen(type); i++, member++) {
961 const struct btf_type *mtype, *kern_mtype;
962 __u32 mtype_id, kern_mtype_id;
963 void *mdata, *kern_mdata;
964 __s64 msize, kern_msize;
965 __u32 moff, kern_moff;
966 __u32 kern_member_idx;
967 const char *mname;
968
969 mname = btf__name_by_offset(btf, member->name_off);
970 kern_member = find_member_by_name(kern_btf, kern_type, mname);
971 if (!kern_member) {
972 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
973 map->name, mname);
974 return -ENOTSUP;
975 }
976
977 kern_member_idx = kern_member - btf_members(kern_type);
978 if (btf_member_bitfield_size(type, i) ||
979 btf_member_bitfield_size(kern_type, kern_member_idx)) {
980 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
981 map->name, mname);
982 return -ENOTSUP;
983 }
984
985 moff = member->offset / 8;
986 kern_moff = kern_member->offset / 8;
987
988 mdata = data + moff;
989 kern_mdata = kern_data + kern_moff;
990
991 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
992 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
993 &kern_mtype_id);
994 if (BTF_INFO_KIND(mtype->info) !=
995 BTF_INFO_KIND(kern_mtype->info)) {
996 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
997 map->name, mname, BTF_INFO_KIND(mtype->info),
998 BTF_INFO_KIND(kern_mtype->info));
999 return -ENOTSUP;
1000 }
1001
1002 if (btf_is_ptr(mtype)) {
1003 struct bpf_program *prog;
1004
1005 prog = st_ops->progs[i];
1006 if (!prog)
1007 continue;
1008
1009 kern_mtype = skip_mods_and_typedefs(kern_btf,
1010 kern_mtype->type,
1011 &kern_mtype_id);
1012
1013
1014
1015
1016
1017 if (!btf_is_func_proto(kern_mtype)) {
1018 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
1019 map->name, mname);
1020 return -ENOTSUP;
1021 }
1022
1023 prog->attach_btf_id = kern_type_id;
1024 prog->expected_attach_type = kern_member_idx;
1025
1026 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
1027
1028 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
1029 map->name, mname, prog->name, moff,
1030 kern_moff);
1031
1032 continue;
1033 }
1034
1035 msize = btf__resolve_size(btf, mtype_id);
1036 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
1037 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
1038 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
1039 map->name, mname, (ssize_t)msize,
1040 (ssize_t)kern_msize);
1041 return -ENOTSUP;
1042 }
1043
1044 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
1045 map->name, mname, (unsigned int)msize,
1046 moff, kern_moff);
1047 memcpy(kern_mdata, mdata, msize);
1048 }
1049
1050 return 0;
1051}
1052
1053static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
1054{
1055 struct bpf_map *map;
1056 size_t i;
1057 int err;
1058
1059 for (i = 0; i < obj->nr_maps; i++) {
1060 map = &obj->maps[i];
1061
1062 if (!bpf_map__is_struct_ops(map))
1063 continue;
1064
1065 err = bpf_map__init_kern_struct_ops(map, obj->btf,
1066 obj->btf_vmlinux);
1067 if (err)
1068 return err;
1069 }
1070
1071 return 0;
1072}
1073
1074static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
1075{
1076 const struct btf_type *type, *datasec;
1077 const struct btf_var_secinfo *vsi;
1078 struct bpf_struct_ops *st_ops;
1079 const char *tname, *var_name;
1080 __s32 type_id, datasec_id;
1081 const struct btf *btf;
1082 struct bpf_map *map;
1083 __u32 i;
1084
1085 if (obj->efile.st_ops_shndx == -1)
1086 return 0;
1087
1088 btf = obj->btf;
1089 datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
1090 BTF_KIND_DATASEC);
1091 if (datasec_id < 0) {
1092 pr_warn("struct_ops init: DATASEC %s not found\n",
1093 STRUCT_OPS_SEC);
1094 return -EINVAL;
1095 }
1096
1097 datasec = btf__type_by_id(btf, datasec_id);
1098 vsi = btf_var_secinfos(datasec);
1099 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1100 type = btf__type_by_id(obj->btf, vsi->type);
1101 var_name = btf__name_by_offset(obj->btf, type->name_off);
1102
1103 type_id = btf__resolve_type(obj->btf, vsi->type);
1104 if (type_id < 0) {
1105 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1106 vsi->type, STRUCT_OPS_SEC);
1107 return -EINVAL;
1108 }
1109
1110 type = btf__type_by_id(obj->btf, type_id);
1111 tname = btf__name_by_offset(obj->btf, type->name_off);
1112 if (!tname[0]) {
1113 pr_warn("struct_ops init: anonymous type is not supported\n");
1114 return -ENOTSUP;
1115 }
1116 if (!btf_is_struct(type)) {
1117 pr_warn("struct_ops init: %s is not a struct\n", tname);
1118 return -EINVAL;
1119 }
1120
1121 map = bpf_object__add_map(obj);
1122 if (IS_ERR(map))
1123 return PTR_ERR(map);
1124
1125 map->sec_idx = obj->efile.st_ops_shndx;
1126 map->sec_offset = vsi->offset;
1127 map->name = strdup(var_name);
1128 if (!map->name)
1129 return -ENOMEM;
1130
1131 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1132 map->def.key_size = sizeof(int);
1133 map->def.value_size = type->size;
1134 map->def.max_entries = 1;
1135
1136 map->st_ops = calloc(1, sizeof(*map->st_ops));
1137 if (!map->st_ops)
1138 return -ENOMEM;
1139 st_ops = map->st_ops;
1140 st_ops->data = malloc(type->size);
1141 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1142 st_ops->kern_func_off = malloc(btf_vlen(type) *
1143 sizeof(*st_ops->kern_func_off));
1144 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1145 return -ENOMEM;
1146
1147 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1148 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1149 var_name, STRUCT_OPS_SEC);
1150 return -EINVAL;
1151 }
1152
1153 memcpy(st_ops->data,
1154 obj->efile.st_ops_data->d_buf + vsi->offset,
1155 type->size);
1156 st_ops->tname = tname;
1157 st_ops->type = type;
1158 st_ops->type_id = type_id;
1159
1160 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1161 tname, type_id, var_name, vsi->offset);
1162 }
1163
1164 return 0;
1165}
1166
1167static struct bpf_object *bpf_object__new(const char *path,
1168 const void *obj_buf,
1169 size_t obj_buf_sz,
1170 const char *obj_name)
1171{
1172 bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST);
1173 struct bpf_object *obj;
1174 char *end;
1175
1176 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1177 if (!obj) {
1178 pr_warn("alloc memory failed for %s\n", path);
1179 return ERR_PTR(-ENOMEM);
1180 }
1181
1182 strcpy(obj->path, path);
1183 if (obj_name) {
1184 libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
1185 } else {
1186
1187 libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
1188 end = strchr(obj->name, '.');
1189 if (end)
1190 *end = 0;
1191 }
1192
1193 obj->efile.fd = -1;
1194
1195
1196
1197
1198
1199
1200 obj->efile.obj_buf = obj_buf;
1201 obj->efile.obj_buf_sz = obj_buf_sz;
1202 obj->efile.maps_shndx = -1;
1203 obj->efile.btf_maps_shndx = -1;
1204 obj->efile.st_ops_shndx = -1;
1205 obj->kconfig_map_idx = -1;
1206
1207 obj->kern_version = get_kernel_version();
1208 obj->loaded = false;
1209
1210 INIT_LIST_HEAD(&obj->list);
1211 if (!strict)
1212 list_add(&obj->list, &bpf_objects_list);
1213 return obj;
1214}
1215
1216static void bpf_object__elf_finish(struct bpf_object *obj)
1217{
1218 if (!obj->efile.elf)
1219 return;
1220
1221 if (obj->efile.elf) {
1222 elf_end(obj->efile.elf);
1223 obj->efile.elf = NULL;
1224 }
1225 obj->efile.symbols = NULL;
1226 obj->efile.st_ops_data = NULL;
1227
1228 zfree(&obj->efile.secs);
1229 obj->efile.sec_cnt = 0;
1230 zclose(obj->efile.fd);
1231 obj->efile.obj_buf = NULL;
1232 obj->efile.obj_buf_sz = 0;
1233}
1234
1235static int bpf_object__elf_init(struct bpf_object *obj)
1236{
1237 Elf64_Ehdr *ehdr;
1238 int err = 0;
1239 Elf *elf;
1240
1241 if (obj->efile.elf) {
1242 pr_warn("elf: init internal error\n");
1243 return -LIBBPF_ERRNO__LIBELF;
1244 }
1245
1246 if (obj->efile.obj_buf_sz > 0) {
1247
1248
1249
1250
1251 elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1252 } else {
1253 obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
1254 if (obj->efile.fd < 0) {
1255 char errmsg[STRERR_BUFSIZE], *cp;
1256
1257 err = -errno;
1258 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1259 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1260 return err;
1261 }
1262
1263 elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1264 }
1265
1266 if (!elf) {
1267 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1268 err = -LIBBPF_ERRNO__LIBELF;
1269 goto errout;
1270 }
1271
1272 obj->efile.elf = elf;
1273
1274 if (elf_kind(elf) != ELF_K_ELF) {
1275 err = -LIBBPF_ERRNO__FORMAT;
1276 pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1277 goto errout;
1278 }
1279
1280 if (gelf_getclass(elf) != ELFCLASS64) {
1281 err = -LIBBPF_ERRNO__FORMAT;
1282 pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1283 goto errout;
1284 }
1285
1286 obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1287 if (!obj->efile.ehdr) {
1288 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1289 err = -LIBBPF_ERRNO__FORMAT;
1290 goto errout;
1291 }
1292
1293 if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
1294 pr_warn("elf: failed to get section names section index for %s: %s\n",
1295 obj->path, elf_errmsg(-1));
1296 err = -LIBBPF_ERRNO__FORMAT;
1297 goto errout;
1298 }
1299
1300
1301 if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
1302 pr_warn("elf: failed to get section names strings from %s: %s\n",
1303 obj->path, elf_errmsg(-1));
1304 err = -LIBBPF_ERRNO__FORMAT;
1305 goto errout;
1306 }
1307
1308
1309 if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
1310 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1311 err = -LIBBPF_ERRNO__FORMAT;
1312 goto errout;
1313 }
1314
1315 return 0;
1316errout:
1317 bpf_object__elf_finish(obj);
1318 return err;
1319}
1320
1321static int bpf_object__check_endianness(struct bpf_object *obj)
1322{
1323#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1324 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
1325 return 0;
1326#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1327 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
1328 return 0;
1329#else
1330# error "Unrecognized __BYTE_ORDER__"
1331#endif
1332 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1333 return -LIBBPF_ERRNO__ENDIAN;
1334}
1335
1336static int
1337bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1338{
1339
1340
1341
1342 libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
1343 pr_debug("license of %s is %s\n", obj->path, obj->license);
1344 return 0;
1345}
1346
1347static int
1348bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1349{
1350 __u32 kver;
1351
1352 if (size != sizeof(kver)) {
1353 pr_warn("invalid kver section in %s\n", obj->path);
1354 return -LIBBPF_ERRNO__FORMAT;
1355 }
1356 memcpy(&kver, data, sizeof(kver));
1357 obj->kern_version = kver;
1358 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1359 return 0;
1360}
1361
1362static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1363{
1364 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1365 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1366 return true;
1367 return false;
1368}
1369
1370static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
1371{
1372 Elf_Data *data;
1373 Elf_Scn *scn;
1374
1375 if (!name)
1376 return -EINVAL;
1377
1378 scn = elf_sec_by_name(obj, name);
1379 data = elf_sec_data(obj, scn);
1380 if (data) {
1381 *size = data->d_size;
1382 return 0;
1383 }
1384
1385 return -ENOENT;
1386}
1387
1388static int find_elf_var_offset(const struct bpf_object *obj, const char *name, __u32 *off)
1389{
1390 Elf_Data *symbols = obj->efile.symbols;
1391 const char *sname;
1392 size_t si;
1393
1394 if (!name || !off)
1395 return -EINVAL;
1396
1397 for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
1398 Elf64_Sym *sym = elf_sym_by_idx(obj, si);
1399
1400 if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL ||
1401 ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
1402 continue;
1403
1404 sname = elf_sym_str(obj, sym->st_name);
1405 if (!sname) {
1406 pr_warn("failed to get sym name string for var %s\n", name);
1407 return -EIO;
1408 }
1409 if (strcmp(name, sname) == 0) {
1410 *off = sym->st_value;
1411 return 0;
1412 }
1413 }
1414
1415 return -ENOENT;
1416}
1417
1418static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1419{
1420 struct bpf_map *new_maps;
1421 size_t new_cap;
1422 int i;
1423
1424 if (obj->nr_maps < obj->maps_cap)
1425 return &obj->maps[obj->nr_maps++];
1426
1427 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
1428 new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
1429 if (!new_maps) {
1430 pr_warn("alloc maps for object failed\n");
1431 return ERR_PTR(-ENOMEM);
1432 }
1433
1434 obj->maps_cap = new_cap;
1435 obj->maps = new_maps;
1436
1437
1438 memset(obj->maps + obj->nr_maps, 0,
1439 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
1440
1441
1442
1443
1444 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
1445 obj->maps[i].fd = -1;
1446 obj->maps[i].inner_map_fd = -1;
1447 }
1448
1449 return &obj->maps[obj->nr_maps++];
1450}
1451
1452static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1453{
1454 long page_sz = sysconf(_SC_PAGE_SIZE);
1455 size_t map_sz;
1456
1457 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
1458 map_sz = roundup(map_sz, page_sz);
1459 return map_sz;
1460}
1461
1462static char *internal_map_name(struct bpf_object *obj, const char *real_name)
1463{
1464 char map_name[BPF_OBJ_NAME_LEN], *p;
1465 int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500 if (sfx_len >= BPF_OBJ_NAME_LEN)
1501 sfx_len = BPF_OBJ_NAME_LEN - 1;
1502
1503
1504 if (strchr(real_name + 1, '.') != NULL)
1505 pfx_len = 0;
1506 else
1507 pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
1508
1509 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1510 sfx_len, real_name);
1511
1512
1513 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1514 if (!isalnum(*p) && *p != '_' && *p != '.')
1515 *p = '_';
1516
1517 return strdup(map_name);
1518}
1519
1520static int
1521bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map);
1522
1523static int
1524bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1525 const char *real_name, int sec_idx, void *data, size_t data_sz)
1526{
1527 struct bpf_map_def *def;
1528 struct bpf_map *map;
1529 int err;
1530
1531 map = bpf_object__add_map(obj);
1532 if (IS_ERR(map))
1533 return PTR_ERR(map);
1534
1535 map->libbpf_type = type;
1536 map->sec_idx = sec_idx;
1537 map->sec_offset = 0;
1538 map->real_name = strdup(real_name);
1539 map->name = internal_map_name(obj, real_name);
1540 if (!map->real_name || !map->name) {
1541 zfree(&map->real_name);
1542 zfree(&map->name);
1543 return -ENOMEM;
1544 }
1545
1546 def = &map->def;
1547 def->type = BPF_MAP_TYPE_ARRAY;
1548 def->key_size = sizeof(int);
1549 def->value_size = data_sz;
1550 def->max_entries = 1;
1551 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1552 ? BPF_F_RDONLY_PROG : 0;
1553 def->map_flags |= BPF_F_MMAPABLE;
1554
1555 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1556 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1557
1558 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1559 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1560 if (map->mmaped == MAP_FAILED) {
1561 err = -errno;
1562 map->mmaped = NULL;
1563 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1564 map->name, err);
1565 zfree(&map->real_name);
1566 zfree(&map->name);
1567 return err;
1568 }
1569
1570
1571 (void) bpf_map_find_btf_info(obj, map);
1572
1573 if (data)
1574 memcpy(map->mmaped, data, data_sz);
1575
1576 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1577 return 0;
1578}
1579
1580static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1581{
1582 struct elf_sec_desc *sec_desc;
1583 const char *sec_name;
1584 int err = 0, sec_idx;
1585
1586
1587
1588
1589 for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
1590 sec_desc = &obj->efile.secs[sec_idx];
1591
1592 switch (sec_desc->sec_type) {
1593 case SEC_DATA:
1594 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1595 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1596 sec_name, sec_idx,
1597 sec_desc->data->d_buf,
1598 sec_desc->data->d_size);
1599 break;
1600 case SEC_RODATA:
1601 obj->has_rodata = true;
1602 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1603 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1604 sec_name, sec_idx,
1605 sec_desc->data->d_buf,
1606 sec_desc->data->d_size);
1607 break;
1608 case SEC_BSS:
1609 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1610 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1611 sec_name, sec_idx,
1612 NULL,
1613 sec_desc->data->d_size);
1614 break;
1615 default:
1616
1617 break;
1618 }
1619 if (err)
1620 return err;
1621 }
1622 return 0;
1623}
1624
1625
1626static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1627 const void *name)
1628{
1629 int i;
1630
1631 for (i = 0; i < obj->nr_extern; i++) {
1632 if (strcmp(obj->externs[i].name, name) == 0)
1633 return &obj->externs[i];
1634 }
1635 return NULL;
1636}
1637
1638static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1639 char value)
1640{
1641 switch (ext->kcfg.type) {
1642 case KCFG_BOOL:
1643 if (value == 'm') {
1644 pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
1645 ext->name, value);
1646 return -EINVAL;
1647 }
1648 *(bool *)ext_val = value == 'y' ? true : false;
1649 break;
1650 case KCFG_TRISTATE:
1651 if (value == 'y')
1652 *(enum libbpf_tristate *)ext_val = TRI_YES;
1653 else if (value == 'm')
1654 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1655 else
1656 *(enum libbpf_tristate *)ext_val = TRI_NO;
1657 break;
1658 case KCFG_CHAR:
1659 *(char *)ext_val = value;
1660 break;
1661 case KCFG_UNKNOWN:
1662 case KCFG_INT:
1663 case KCFG_CHAR_ARR:
1664 default:
1665 pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
1666 ext->name, value);
1667 return -EINVAL;
1668 }
1669 ext->is_set = true;
1670 return 0;
1671}
1672
1673static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1674 const char *value)
1675{
1676 size_t len;
1677
1678 if (ext->kcfg.type != KCFG_CHAR_ARR) {
1679 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
1680 return -EINVAL;
1681 }
1682
1683 len = strlen(value);
1684 if (value[len - 1] != '"') {
1685 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
1686 ext->name, value);
1687 return -EINVAL;
1688 }
1689
1690
1691 len -= 2;
1692 if (len >= ext->kcfg.sz) {
1693 pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1694 ext->name, value, len, ext->kcfg.sz - 1);
1695 len = ext->kcfg.sz - 1;
1696 }
1697 memcpy(ext_val, value + 1, len);
1698 ext_val[len] = '\0';
1699 ext->is_set = true;
1700 return 0;
1701}
1702
1703static int parse_u64(const char *value, __u64 *res)
1704{
1705 char *value_end;
1706 int err;
1707
1708 errno = 0;
1709 *res = strtoull(value, &value_end, 0);
1710 if (errno) {
1711 err = -errno;
1712 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1713 return err;
1714 }
1715 if (*value_end) {
1716 pr_warn("failed to parse '%s' as integer completely\n", value);
1717 return -EINVAL;
1718 }
1719 return 0;
1720}
1721
1722static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
1723{
1724 int bit_sz = ext->kcfg.sz * 8;
1725
1726 if (ext->kcfg.sz == 8)
1727 return true;
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741 if (ext->kcfg.is_signed)
1742 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1743 else
1744 return (v >> bit_sz) == 0;
1745}
1746
1747static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1748 __u64 value)
1749{
1750 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
1751 pr_warn("extern (kcfg) %s=%llu should be integer\n",
1752 ext->name, (unsigned long long)value);
1753 return -EINVAL;
1754 }
1755 if (!is_kcfg_value_in_range(ext, value)) {
1756 pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
1757 ext->name, (unsigned long long)value, ext->kcfg.sz);
1758 return -ERANGE;
1759 }
1760 switch (ext->kcfg.sz) {
1761 case 1: *(__u8 *)ext_val = value; break;
1762 case 2: *(__u16 *)ext_val = value; break;
1763 case 4: *(__u32 *)ext_val = value; break;
1764 case 8: *(__u64 *)ext_val = value; break;
1765 default:
1766 return -EINVAL;
1767 }
1768 ext->is_set = true;
1769 return 0;
1770}
1771
1772static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1773 char *buf, void *data)
1774{
1775 struct extern_desc *ext;
1776 char *sep, *value;
1777 int len, err = 0;
1778 void *ext_val;
1779 __u64 num;
1780
1781 if (!str_has_pfx(buf, "CONFIG_"))
1782 return 0;
1783
1784 sep = strchr(buf, '=');
1785 if (!sep) {
1786 pr_warn("failed to parse '%s': no separator\n", buf);
1787 return -EINVAL;
1788 }
1789
1790
1791 len = strlen(buf);
1792 if (buf[len - 1] == '\n')
1793 buf[len - 1] = '\0';
1794
1795 *sep = '\0';
1796 if (!sep[1]) {
1797 *sep = '=';
1798 pr_warn("failed to parse '%s': no value\n", buf);
1799 return -EINVAL;
1800 }
1801
1802 ext = find_extern_by_name(obj, buf);
1803 if (!ext || ext->is_set)
1804 return 0;
1805
1806 ext_val = data + ext->kcfg.data_off;
1807 value = sep + 1;
1808
1809 switch (*value) {
1810 case 'y': case 'n': case 'm':
1811 err = set_kcfg_value_tri(ext, ext_val, *value);
1812 break;
1813 case '"':
1814 err = set_kcfg_value_str(ext, ext_val, value);
1815 break;
1816 default:
1817
1818 err = parse_u64(value, &num);
1819 if (err) {
1820 pr_warn("extern (kcfg) %s=%s should be integer\n",
1821 ext->name, value);
1822 return err;
1823 }
1824 err = set_kcfg_value_num(ext, ext_val, num);
1825 break;
1826 }
1827 if (err)
1828 return err;
1829 pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
1830 return 0;
1831}
1832
1833static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1834{
1835 char buf[PATH_MAX];
1836 struct utsname uts;
1837 int len, err = 0;
1838 gzFile file;
1839
1840 uname(&uts);
1841 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1842 if (len < 0)
1843 return -EINVAL;
1844 else if (len >= PATH_MAX)
1845 return -ENAMETOOLONG;
1846
1847
1848 file = gzopen(buf, "r");
1849 if (!file)
1850 file = gzopen("/proc/config.gz", "r");
1851
1852 if (!file) {
1853 pr_warn("failed to open system Kconfig\n");
1854 return -ENOENT;
1855 }
1856
1857 while (gzgets(file, buf, sizeof(buf))) {
1858 err = bpf_object__process_kconfig_line(obj, buf, data);
1859 if (err) {
1860 pr_warn("error parsing system Kconfig line '%s': %d\n",
1861 buf, err);
1862 goto out;
1863 }
1864 }
1865
1866out:
1867 gzclose(file);
1868 return err;
1869}
1870
1871static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1872 const char *config, void *data)
1873{
1874 char buf[PATH_MAX];
1875 int err = 0;
1876 FILE *file;
1877
1878 file = fmemopen((void *)config, strlen(config), "r");
1879 if (!file) {
1880 err = -errno;
1881 pr_warn("failed to open in-memory Kconfig: %d\n", err);
1882 return err;
1883 }
1884
1885 while (fgets(buf, sizeof(buf), file)) {
1886 err = bpf_object__process_kconfig_line(obj, buf, data);
1887 if (err) {
1888 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1889 buf, err);
1890 break;
1891 }
1892 }
1893
1894 fclose(file);
1895 return err;
1896}
1897
1898static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1899{
1900 struct extern_desc *last_ext = NULL, *ext;
1901 size_t map_sz;
1902 int i, err;
1903
1904 for (i = 0; i < obj->nr_extern; i++) {
1905 ext = &obj->externs[i];
1906 if (ext->type == EXT_KCFG)
1907 last_ext = ext;
1908 }
1909
1910 if (!last_ext)
1911 return 0;
1912
1913 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
1914 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1915 ".kconfig", obj->efile.symbols_shndx,
1916 NULL, map_sz);
1917 if (err)
1918 return err;
1919
1920 obj->kconfig_map_idx = obj->nr_maps - 1;
1921
1922 return 0;
1923}
1924
1925static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1926{
1927 Elf_Data *symbols = obj->efile.symbols;
1928 int i, map_def_sz = 0, nr_maps = 0, nr_syms;
1929 Elf_Data *data = NULL;
1930 Elf_Scn *scn;
1931
1932 if (obj->efile.maps_shndx < 0)
1933 return 0;
1934
1935 if (libbpf_mode & LIBBPF_STRICT_MAP_DEFINITIONS) {
1936 pr_warn("legacy map definitions in SEC(\"maps\") are not supported\n");
1937 return -EOPNOTSUPP;
1938 }
1939
1940 if (!symbols)
1941 return -EINVAL;
1942
1943 scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
1944 data = elf_sec_data(obj, scn);
1945 if (!scn || !data) {
1946 pr_warn("elf: failed to get legacy map definitions for %s\n",
1947 obj->path);
1948 return -EINVAL;
1949 }
1950
1951
1952
1953
1954
1955
1956
1957
1958 nr_syms = symbols->d_size / sizeof(Elf64_Sym);
1959 for (i = 0; i < nr_syms; i++) {
1960 Elf64_Sym *sym = elf_sym_by_idx(obj, i);
1961
1962 if (sym->st_shndx != obj->efile.maps_shndx)
1963 continue;
1964 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION)
1965 continue;
1966 nr_maps++;
1967 }
1968
1969 pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
1970 nr_maps, data->d_size, obj->path);
1971
1972 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
1973 pr_warn("elf: unable to determine legacy map definition size in %s\n",
1974 obj->path);
1975 return -EINVAL;
1976 }
1977 map_def_sz = data->d_size / nr_maps;
1978
1979
1980 for (i = 0; i < nr_syms; i++) {
1981 Elf64_Sym *sym = elf_sym_by_idx(obj, i);
1982 const char *map_name;
1983 struct bpf_map_def *def;
1984 struct bpf_map *map;
1985
1986 if (sym->st_shndx != obj->efile.maps_shndx)
1987 continue;
1988 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION)
1989 continue;
1990
1991 map = bpf_object__add_map(obj);
1992 if (IS_ERR(map))
1993 return PTR_ERR(map);
1994
1995 map_name = elf_sym_str(obj, sym->st_name);
1996 if (!map_name) {
1997 pr_warn("failed to get map #%d name sym string for obj %s\n",
1998 i, obj->path);
1999 return -LIBBPF_ERRNO__FORMAT;
2000 }
2001
2002 pr_warn("map '%s' (legacy): legacy map definitions are deprecated, use BTF-defined maps instead\n", map_name);
2003
2004 if (ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
2005 pr_warn("map '%s' (legacy): static maps are not supported\n", map_name);
2006 return -ENOTSUP;
2007 }
2008
2009 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2010 map->sec_idx = sym->st_shndx;
2011 map->sec_offset = sym->st_value;
2012 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
2013 map_name, map->sec_idx, map->sec_offset);
2014 if (sym->st_value + map_def_sz > data->d_size) {
2015 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
2016 obj->path, map_name);
2017 return -EINVAL;
2018 }
2019
2020 map->name = strdup(map_name);
2021 if (!map->name) {
2022 pr_warn("map '%s': failed to alloc map name\n", map_name);
2023 return -ENOMEM;
2024 }
2025 pr_debug("map %d is \"%s\"\n", i, map->name);
2026 def = (struct bpf_map_def *)(data->d_buf + sym->st_value);
2027
2028
2029
2030
2031
2032
2033 if (map_def_sz <= sizeof(struct bpf_map_def)) {
2034 memcpy(&map->def, def, map_def_sz);
2035 } else {
2036
2037
2038
2039
2040
2041
2042 char *b;
2043
2044 for (b = ((char *)def) + sizeof(struct bpf_map_def);
2045 b < ((char *)def) + map_def_sz; b++) {
2046 if (*b != 0) {
2047 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
2048 obj->path, map_name);
2049 if (strict)
2050 return -EINVAL;
2051 }
2052 }
2053 memcpy(&map->def, def, sizeof(struct bpf_map_def));
2054 }
2055
2056
2057 (void) bpf_map_find_btf_info(obj, map);
2058 }
2059 return 0;
2060}
2061
2062const struct btf_type *
2063skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
2064{
2065 const struct btf_type *t = btf__type_by_id(btf, id);
2066
2067 if (res_id)
2068 *res_id = id;
2069
2070 while (btf_is_mod(t) || btf_is_typedef(t)) {
2071 if (res_id)
2072 *res_id = t->type;
2073 t = btf__type_by_id(btf, t->type);
2074 }
2075
2076 return t;
2077}
2078
2079static const struct btf_type *
2080resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
2081{
2082 const struct btf_type *t;
2083
2084 t = skip_mods_and_typedefs(btf, id, NULL);
2085 if (!btf_is_ptr(t))
2086 return NULL;
2087
2088 t = skip_mods_and_typedefs(btf, t->type, res_id);
2089
2090 return btf_is_func_proto(t) ? t : NULL;
2091}
2092
2093static const char *__btf_kind_str(__u16 kind)
2094{
2095 switch (kind) {
2096 case BTF_KIND_UNKN: return "void";
2097 case BTF_KIND_INT: return "int";
2098 case BTF_KIND_PTR: return "ptr";
2099 case BTF_KIND_ARRAY: return "array";
2100 case BTF_KIND_STRUCT: return "struct";
2101 case BTF_KIND_UNION: return "union";
2102 case BTF_KIND_ENUM: return "enum";
2103 case BTF_KIND_FWD: return "fwd";
2104 case BTF_KIND_TYPEDEF: return "typedef";
2105 case BTF_KIND_VOLATILE: return "volatile";
2106 case BTF_KIND_CONST: return "const";
2107 case BTF_KIND_RESTRICT: return "restrict";
2108 case BTF_KIND_FUNC: return "func";
2109 case BTF_KIND_FUNC_PROTO: return "func_proto";
2110 case BTF_KIND_VAR: return "var";
2111 case BTF_KIND_DATASEC: return "datasec";
2112 case BTF_KIND_FLOAT: return "float";
2113 case BTF_KIND_DECL_TAG: return "decl_tag";
2114 case BTF_KIND_TYPE_TAG: return "type_tag";
2115 default: return "unknown";
2116 }
2117}
2118
2119const char *btf_kind_str(const struct btf_type *t)
2120{
2121 return __btf_kind_str(btf_kind(t));
2122}
2123
2124
2125
2126
2127
2128
2129
2130
2131static bool get_map_field_int(const char *map_name, const struct btf *btf,
2132 const struct btf_member *m, __u32 *res)
2133{
2134 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2135 const char *name = btf__name_by_offset(btf, m->name_off);
2136 const struct btf_array *arr_info;
2137 const struct btf_type *arr_t;
2138
2139 if (!btf_is_ptr(t)) {
2140 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2141 map_name, name, btf_kind_str(t));
2142 return false;
2143 }
2144
2145 arr_t = btf__type_by_id(btf, t->type);
2146 if (!arr_t) {
2147 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2148 map_name, name, t->type);
2149 return false;
2150 }
2151 if (!btf_is_array(arr_t)) {
2152 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2153 map_name, name, btf_kind_str(arr_t));
2154 return false;
2155 }
2156 arr_info = btf_array(arr_t);
2157 *res = arr_info->nelems;
2158 return true;
2159}
2160
2161static int build_map_pin_path(struct bpf_map *map, const char *path)
2162{
2163 char buf[PATH_MAX];
2164 int len;
2165
2166 if (!path)
2167 path = "/sys/fs/bpf";
2168
2169 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
2170 if (len < 0)
2171 return -EINVAL;
2172 else if (len >= PATH_MAX)
2173 return -ENAMETOOLONG;
2174
2175 return bpf_map__set_pin_path(map, buf);
2176}
2177
2178int parse_btf_map_def(const char *map_name, struct btf *btf,
2179 const struct btf_type *def_t, bool strict,
2180 struct btf_map_def *map_def, struct btf_map_def *inner_def)
2181{
2182 const struct btf_type *t;
2183 const struct btf_member *m;
2184 bool is_inner = inner_def == NULL;
2185 int vlen, i;
2186
2187 vlen = btf_vlen(def_t);
2188 m = btf_members(def_t);
2189 for (i = 0; i < vlen; i++, m++) {
2190 const char *name = btf__name_by_offset(btf, m->name_off);
2191
2192 if (!name) {
2193 pr_warn("map '%s': invalid field #%d.\n", map_name, i);
2194 return -EINVAL;
2195 }
2196 if (strcmp(name, "type") == 0) {
2197 if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2198 return -EINVAL;
2199 map_def->parts |= MAP_DEF_MAP_TYPE;
2200 } else if (strcmp(name, "max_entries") == 0) {
2201 if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2202 return -EINVAL;
2203 map_def->parts |= MAP_DEF_MAX_ENTRIES;
2204 } else if (strcmp(name, "map_flags") == 0) {
2205 if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2206 return -EINVAL;
2207 map_def->parts |= MAP_DEF_MAP_FLAGS;
2208 } else if (strcmp(name, "numa_node") == 0) {
2209 if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2210 return -EINVAL;
2211 map_def->parts |= MAP_DEF_NUMA_NODE;
2212 } else if (strcmp(name, "key_size") == 0) {
2213 __u32 sz;
2214
2215 if (!get_map_field_int(map_name, btf, m, &sz))
2216 return -EINVAL;
2217 if (map_def->key_size && map_def->key_size != sz) {
2218 pr_warn("map '%s': conflicting key size %u != %u.\n",
2219 map_name, map_def->key_size, sz);
2220 return -EINVAL;
2221 }
2222 map_def->key_size = sz;
2223 map_def->parts |= MAP_DEF_KEY_SIZE;
2224 } else if (strcmp(name, "key") == 0) {
2225 __s64 sz;
2226
2227 t = btf__type_by_id(btf, m->type);
2228 if (!t) {
2229 pr_warn("map '%s': key type [%d] not found.\n",
2230 map_name, m->type);
2231 return -EINVAL;
2232 }
2233 if (!btf_is_ptr(t)) {
2234 pr_warn("map '%s': key spec is not PTR: %s.\n",
2235 map_name, btf_kind_str(t));
2236 return -EINVAL;
2237 }
2238 sz = btf__resolve_size(btf, t->type);
2239 if (sz < 0) {
2240 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2241 map_name, t->type, (ssize_t)sz);
2242 return sz;
2243 }
2244 if (map_def->key_size && map_def->key_size != sz) {
2245 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2246 map_name, map_def->key_size, (ssize_t)sz);
2247 return -EINVAL;
2248 }
2249 map_def->key_size = sz;
2250 map_def->key_type_id = t->type;
2251 map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2252 } else if (strcmp(name, "value_size") == 0) {
2253 __u32 sz;
2254
2255 if (!get_map_field_int(map_name, btf, m, &sz))
2256 return -EINVAL;
2257 if (map_def->value_size && map_def->value_size != sz) {
2258 pr_warn("map '%s': conflicting value size %u != %u.\n",
2259 map_name, map_def->value_size, sz);
2260 return -EINVAL;
2261 }
2262 map_def->value_size = sz;
2263 map_def->parts |= MAP_DEF_VALUE_SIZE;
2264 } else if (strcmp(name, "value") == 0) {
2265 __s64 sz;
2266
2267 t = btf__type_by_id(btf, m->type);
2268 if (!t) {
2269 pr_warn("map '%s': value type [%d] not found.\n",
2270 map_name, m->type);
2271 return -EINVAL;
2272 }
2273 if (!btf_is_ptr(t)) {
2274 pr_warn("map '%s': value spec is not PTR: %s.\n",
2275 map_name, btf_kind_str(t));
2276 return -EINVAL;
2277 }
2278 sz = btf__resolve_size(btf, t->type);
2279 if (sz < 0) {
2280 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2281 map_name, t->type, (ssize_t)sz);
2282 return sz;
2283 }
2284 if (map_def->value_size && map_def->value_size != sz) {
2285 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2286 map_name, map_def->value_size, (ssize_t)sz);
2287 return -EINVAL;
2288 }
2289 map_def->value_size = sz;
2290 map_def->value_type_id = t->type;
2291 map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2292 }
2293 else if (strcmp(name, "values") == 0) {
2294 bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
2295 bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
2296 const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
2297 char inner_map_name[128];
2298 int err;
2299
2300 if (is_inner) {
2301 pr_warn("map '%s': multi-level inner maps not supported.\n",
2302 map_name);
2303 return -ENOTSUP;
2304 }
2305 if (i != vlen - 1) {
2306 pr_warn("map '%s': '%s' member should be last.\n",
2307 map_name, name);
2308 return -EINVAL;
2309 }
2310 if (!is_map_in_map && !is_prog_array) {
2311 pr_warn("map '%s': should be map-in-map or prog-array.\n",
2312 map_name);
2313 return -ENOTSUP;
2314 }
2315 if (map_def->value_size && map_def->value_size != 4) {
2316 pr_warn("map '%s': conflicting value size %u != 4.\n",
2317 map_name, map_def->value_size);
2318 return -EINVAL;
2319 }
2320 map_def->value_size = 4;
2321 t = btf__type_by_id(btf, m->type);
2322 if (!t) {
2323 pr_warn("map '%s': %s type [%d] not found.\n",
2324 map_name, desc, m->type);
2325 return -EINVAL;
2326 }
2327 if (!btf_is_array(t) || btf_array(t)->nelems) {
2328 pr_warn("map '%s': %s spec is not a zero-sized array.\n",
2329 map_name, desc);
2330 return -EINVAL;
2331 }
2332 t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2333 if (!btf_is_ptr(t)) {
2334 pr_warn("map '%s': %s def is of unexpected kind %s.\n",
2335 map_name, desc, btf_kind_str(t));
2336 return -EINVAL;
2337 }
2338 t = skip_mods_and_typedefs(btf, t->type, NULL);
2339 if (is_prog_array) {
2340 if (!btf_is_func_proto(t)) {
2341 pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
2342 map_name, btf_kind_str(t));
2343 return -EINVAL;
2344 }
2345 continue;
2346 }
2347 if (!btf_is_struct(t)) {
2348 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2349 map_name, btf_kind_str(t));
2350 return -EINVAL;
2351 }
2352
2353 snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2354 err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
2355 if (err)
2356 return err;
2357
2358 map_def->parts |= MAP_DEF_INNER_MAP;
2359 } else if (strcmp(name, "pinning") == 0) {
2360 __u32 val;
2361
2362 if (is_inner) {
2363 pr_warn("map '%s': inner def can't be pinned.\n", map_name);
2364 return -EINVAL;
2365 }
2366 if (!get_map_field_int(map_name, btf, m, &val))
2367 return -EINVAL;
2368 if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
2369 pr_warn("map '%s': invalid pinning value %u.\n",
2370 map_name, val);
2371 return -EINVAL;
2372 }
2373 map_def->pinning = val;
2374 map_def->parts |= MAP_DEF_PINNING;
2375 } else if (strcmp(name, "map_extra") == 0) {
2376 __u32 map_extra;
2377
2378 if (!get_map_field_int(map_name, btf, m, &map_extra))
2379 return -EINVAL;
2380 map_def->map_extra = map_extra;
2381 map_def->parts |= MAP_DEF_MAP_EXTRA;
2382 } else {
2383 if (strict) {
2384 pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
2385 return -ENOTSUP;
2386 }
2387 pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
2388 }
2389 }
2390
2391 if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2392 pr_warn("map '%s': map type isn't specified.\n", map_name);
2393 return -EINVAL;
2394 }
2395
2396 return 0;
2397}
2398
2399static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2400{
2401 map->def.type = def->map_type;
2402 map->def.key_size = def->key_size;
2403 map->def.value_size = def->value_size;
2404 map->def.max_entries = def->max_entries;
2405 map->def.map_flags = def->map_flags;
2406 map->map_extra = def->map_extra;
2407
2408 map->numa_node = def->numa_node;
2409 map->btf_key_type_id = def->key_type_id;
2410 map->btf_value_type_id = def->value_type_id;
2411
2412 if (def->parts & MAP_DEF_MAP_TYPE)
2413 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2414
2415 if (def->parts & MAP_DEF_KEY_TYPE)
2416 pr_debug("map '%s': found key [%u], sz = %u.\n",
2417 map->name, def->key_type_id, def->key_size);
2418 else if (def->parts & MAP_DEF_KEY_SIZE)
2419 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2420
2421 if (def->parts & MAP_DEF_VALUE_TYPE)
2422 pr_debug("map '%s': found value [%u], sz = %u.\n",
2423 map->name, def->value_type_id, def->value_size);
2424 else if (def->parts & MAP_DEF_VALUE_SIZE)
2425 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2426
2427 if (def->parts & MAP_DEF_MAX_ENTRIES)
2428 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2429 if (def->parts & MAP_DEF_MAP_FLAGS)
2430 pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
2431 if (def->parts & MAP_DEF_MAP_EXTRA)
2432 pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
2433 (unsigned long long)def->map_extra);
2434 if (def->parts & MAP_DEF_PINNING)
2435 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2436 if (def->parts & MAP_DEF_NUMA_NODE)
2437 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2438
2439 if (def->parts & MAP_DEF_INNER_MAP)
2440 pr_debug("map '%s': found inner map definition.\n", map->name);
2441}
2442
2443static const char *btf_var_linkage_str(__u32 linkage)
2444{
2445 switch (linkage) {
2446 case BTF_VAR_STATIC: return "static";
2447 case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2448 case BTF_VAR_GLOBAL_EXTERN: return "extern";
2449 default: return "unknown";
2450 }
2451}
2452
2453static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2454 const struct btf_type *sec,
2455 int var_idx, int sec_idx,
2456 const Elf_Data *data, bool strict,
2457 const char *pin_root_path)
2458{
2459 struct btf_map_def map_def = {}, inner_def = {};
2460 const struct btf_type *var, *def;
2461 const struct btf_var_secinfo *vi;
2462 const struct btf_var *var_extra;
2463 const char *map_name;
2464 struct bpf_map *map;
2465 int err;
2466
2467 vi = btf_var_secinfos(sec) + var_idx;
2468 var = btf__type_by_id(obj->btf, vi->type);
2469 var_extra = btf_var(var);
2470 map_name = btf__name_by_offset(obj->btf, var->name_off);
2471
2472 if (map_name == NULL || map_name[0] == '\0') {
2473 pr_warn("map #%d: empty name.\n", var_idx);
2474 return -EINVAL;
2475 }
2476 if ((__u64)vi->offset + vi->size > data->d_size) {
2477 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2478 return -EINVAL;
2479 }
2480 if (!btf_is_var(var)) {
2481 pr_warn("map '%s': unexpected var kind %s.\n",
2482 map_name, btf_kind_str(var));
2483 return -EINVAL;
2484 }
2485 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2486 pr_warn("map '%s': unsupported map linkage %s.\n",
2487 map_name, btf_var_linkage_str(var_extra->linkage));
2488 return -EOPNOTSUPP;
2489 }
2490
2491 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2492 if (!btf_is_struct(def)) {
2493 pr_warn("map '%s': unexpected def kind %s.\n",
2494 map_name, btf_kind_str(var));
2495 return -EINVAL;
2496 }
2497 if (def->size > vi->size) {
2498 pr_warn("map '%s': invalid def size.\n", map_name);
2499 return -EINVAL;
2500 }
2501
2502 map = bpf_object__add_map(obj);
2503 if (IS_ERR(map))
2504 return PTR_ERR(map);
2505 map->name = strdup(map_name);
2506 if (!map->name) {
2507 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2508 return -ENOMEM;
2509 }
2510 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2511 map->def.type = BPF_MAP_TYPE_UNSPEC;
2512 map->sec_idx = sec_idx;
2513 map->sec_offset = vi->offset;
2514 map->btf_var_idx = var_idx;
2515 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2516 map_name, map->sec_idx, map->sec_offset);
2517
2518 err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2519 if (err)
2520 return err;
2521
2522 fill_map_from_def(map, &map_def);
2523
2524 if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2525 err = build_map_pin_path(map, pin_root_path);
2526 if (err) {
2527 pr_warn("map '%s': couldn't build pin path.\n", map->name);
2528 return err;
2529 }
2530 }
2531
2532 if (map_def.parts & MAP_DEF_INNER_MAP) {
2533 map->inner_map = calloc(1, sizeof(*map->inner_map));
2534 if (!map->inner_map)
2535 return -ENOMEM;
2536 map->inner_map->fd = -1;
2537 map->inner_map->sec_idx = sec_idx;
2538 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2539 if (!map->inner_map->name)
2540 return -ENOMEM;
2541 sprintf(map->inner_map->name, "%s.inner", map_name);
2542
2543 fill_map_from_def(map->inner_map, &inner_def);
2544 }
2545
2546 err = bpf_map_find_btf_info(obj, map);
2547 if (err)
2548 return err;
2549
2550 return 0;
2551}
2552
2553static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2554 const char *pin_root_path)
2555{
2556 const struct btf_type *sec = NULL;
2557 int nr_types, i, vlen, err;
2558 const struct btf_type *t;
2559 const char *name;
2560 Elf_Data *data;
2561 Elf_Scn *scn;
2562
2563 if (obj->efile.btf_maps_shndx < 0)
2564 return 0;
2565
2566 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2567 data = elf_sec_data(obj, scn);
2568 if (!scn || !data) {
2569 pr_warn("elf: failed to get %s map definitions for %s\n",
2570 MAPS_ELF_SEC, obj->path);
2571 return -EINVAL;
2572 }
2573
2574 nr_types = btf__type_cnt(obj->btf);
2575 for (i = 1; i < nr_types; i++) {
2576 t = btf__type_by_id(obj->btf, i);
2577 if (!btf_is_datasec(t))
2578 continue;
2579 name = btf__name_by_offset(obj->btf, t->name_off);
2580 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2581 sec = t;
2582 obj->efile.btf_maps_sec_btf_id = i;
2583 break;
2584 }
2585 }
2586
2587 if (!sec) {
2588 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2589 return -ENOENT;
2590 }
2591
2592 vlen = btf_vlen(sec);
2593 for (i = 0; i < vlen; i++) {
2594 err = bpf_object__init_user_btf_map(obj, sec, i,
2595 obj->efile.btf_maps_shndx,
2596 data, strict,
2597 pin_root_path);
2598 if (err)
2599 return err;
2600 }
2601
2602 return 0;
2603}
2604
2605static int bpf_object__init_maps(struct bpf_object *obj,
2606 const struct bpf_object_open_opts *opts)
2607{
2608 const char *pin_root_path;
2609 bool strict;
2610 int err;
2611
2612 strict = !OPTS_GET(opts, relaxed_maps, false);
2613 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2614
2615 err = bpf_object__init_user_maps(obj, strict);
2616 err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2617 err = err ?: bpf_object__init_global_data_maps(obj);
2618 err = err ?: bpf_object__init_kconfig_map(obj);
2619 err = err ?: bpf_object__init_struct_ops_maps(obj);
2620
2621 return err;
2622}
2623
2624static bool section_have_execinstr(struct bpf_object *obj, int idx)
2625{
2626 Elf64_Shdr *sh;
2627
2628 sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
2629 if (!sh)
2630 return false;
2631
2632 return sh->sh_flags & SHF_EXECINSTR;
2633}
2634
2635static bool btf_needs_sanitization(struct bpf_object *obj)
2636{
2637 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2638 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2639 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2640 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2641 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2642 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2643
2644 return !has_func || !has_datasec || !has_func_global || !has_float ||
2645 !has_decl_tag || !has_type_tag;
2646}
2647
2648static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2649{
2650 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2651 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2652 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2653 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2654 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2655 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2656 struct btf_type *t;
2657 int i, j, vlen;
2658
2659 for (i = 1; i < btf__type_cnt(btf); i++) {
2660 t = (struct btf_type *)btf__type_by_id(btf, i);
2661
2662 if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
2663
2664 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2665
2666
2667
2668
2669
2670 t->size = 1;
2671 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2672 } else if (!has_datasec && btf_is_datasec(t)) {
2673
2674 const struct btf_var_secinfo *v = btf_var_secinfos(t);
2675 struct btf_member *m = btf_members(t);
2676 struct btf_type *vt;
2677 char *name;
2678
2679 name = (char *)btf__name_by_offset(btf, t->name_off);
2680 while (*name) {
2681 if (*name == '.')
2682 *name = '_';
2683 name++;
2684 }
2685
2686 vlen = btf_vlen(t);
2687 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2688 for (j = 0; j < vlen; j++, v++, m++) {
2689
2690 m->offset = v->offset * 8;
2691 m->type = v->type;
2692
2693 vt = (void *)btf__type_by_id(btf, v->type);
2694 m->name_off = vt->name_off;
2695 }
2696 } else if (!has_func && btf_is_func_proto(t)) {
2697
2698 vlen = btf_vlen(t);
2699 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2700 t->size = sizeof(__u32);
2701 } else if (!has_func && btf_is_func(t)) {
2702
2703 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2704 } else if (!has_func_global && btf_is_func(t)) {
2705
2706 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2707 } else if (!has_float && btf_is_float(t)) {
2708
2709
2710
2711
2712 t->name_off = 0;
2713 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
2714 } else if (!has_type_tag && btf_is_type_tag(t)) {
2715
2716 t->name_off = 0;
2717 t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
2718 }
2719 }
2720}
2721
2722static bool libbpf_needs_btf(const struct bpf_object *obj)
2723{
2724 return obj->efile.btf_maps_shndx >= 0 ||
2725 obj->efile.st_ops_shndx >= 0 ||
2726 obj->nr_extern > 0;
2727}
2728
2729static bool kernel_needs_btf(const struct bpf_object *obj)
2730{
2731 return obj->efile.st_ops_shndx >= 0;
2732}
2733
2734static int bpf_object__init_btf(struct bpf_object *obj,
2735 Elf_Data *btf_data,
2736 Elf_Data *btf_ext_data)
2737{
2738 int err = -ENOENT;
2739
2740 if (btf_data) {
2741 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2742 err = libbpf_get_error(obj->btf);
2743 if (err) {
2744 obj->btf = NULL;
2745 pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
2746 goto out;
2747 }
2748
2749 btf__set_pointer_size(obj->btf, 8);
2750 }
2751 if (btf_ext_data) {
2752 if (!obj->btf) {
2753 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2754 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2755 goto out;
2756 }
2757 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
2758 err = libbpf_get_error(obj->btf_ext);
2759 if (err) {
2760 pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
2761 BTF_EXT_ELF_SEC, err);
2762 obj->btf_ext = NULL;
2763 goto out;
2764 }
2765 }
2766out:
2767 if (err && libbpf_needs_btf(obj)) {
2768 pr_warn("BTF is required, but is missing or corrupted.\n");
2769 return err;
2770 }
2771 return 0;
2772}
2773
2774static int compare_vsi_off(const void *_a, const void *_b)
2775{
2776 const struct btf_var_secinfo *a = _a;
2777 const struct btf_var_secinfo *b = _b;
2778
2779 return a->offset - b->offset;
2780}
2781
2782static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
2783 struct btf_type *t)
2784{
2785 __u32 size = 0, off = 0, i, vars = btf_vlen(t);
2786 const char *name = btf__name_by_offset(btf, t->name_off);
2787 const struct btf_type *t_var;
2788 struct btf_var_secinfo *vsi;
2789 const struct btf_var *var;
2790 int ret;
2791
2792 if (!name) {
2793 pr_debug("No name found in string section for DATASEC kind.\n");
2794 return -ENOENT;
2795 }
2796
2797
2798
2799
2800 if (t->size)
2801 goto sort_vars;
2802
2803 ret = find_elf_sec_sz(obj, name, &size);
2804 if (ret || !size) {
2805 pr_debug("Invalid size for section %s: %u bytes\n", name, size);
2806 return -ENOENT;
2807 }
2808
2809 t->size = size;
2810
2811 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
2812 t_var = btf__type_by_id(btf, vsi->type);
2813 if (!t_var || !btf_is_var(t_var)) {
2814 pr_debug("Non-VAR type seen in section %s\n", name);
2815 return -EINVAL;
2816 }
2817
2818 var = btf_var(t_var);
2819 if (var->linkage == BTF_VAR_STATIC)
2820 continue;
2821
2822 name = btf__name_by_offset(btf, t_var->name_off);
2823 if (!name) {
2824 pr_debug("No name found in string section for VAR kind\n");
2825 return -ENOENT;
2826 }
2827
2828 ret = find_elf_var_offset(obj, name, &off);
2829 if (ret) {
2830 pr_debug("No offset found in symbol table for VAR %s\n",
2831 name);
2832 return -ENOENT;
2833 }
2834
2835 vsi->offset = off;
2836 }
2837
2838sort_vars:
2839 qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
2840 return 0;
2841}
2842
2843static int btf_finalize_data(struct bpf_object *obj, struct btf *btf)
2844{
2845 int err = 0;
2846 __u32 i, n = btf__type_cnt(btf);
2847
2848 for (i = 1; i < n; i++) {
2849 struct btf_type *t = btf_type_by_id(btf, i);
2850
2851
2852
2853
2854
2855
2856 if (btf_is_datasec(t)) {
2857 err = btf_fixup_datasec(obj, btf, t);
2858 if (err)
2859 break;
2860 }
2861 }
2862
2863 return libbpf_err(err);
2864}
2865
2866int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
2867{
2868 return btf_finalize_data(obj, btf);
2869}
2870
2871static int bpf_object__finalize_btf(struct bpf_object *obj)
2872{
2873 int err;
2874
2875 if (!obj->btf)
2876 return 0;
2877
2878 err = btf_finalize_data(obj, obj->btf);
2879 if (err) {
2880 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2881 return err;
2882 }
2883
2884 return 0;
2885}
2886
2887static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
2888{
2889 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2890 prog->type == BPF_PROG_TYPE_LSM)
2891 return true;
2892
2893
2894
2895
2896 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2897 return true;
2898
2899 return false;
2900}
2901
2902static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
2903{
2904 struct bpf_program *prog;
2905 int i;
2906
2907
2908
2909
2910 if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
2911 return true;
2912
2913
2914 for (i = 0; i < obj->nr_extern; i++) {
2915 const struct extern_desc *ext;
2916
2917 ext = &obj->externs[i];
2918 if (ext->type == EXT_KSYM && ext->ksym.type_id)
2919 return true;
2920 }
2921
2922 bpf_object__for_each_program(prog, obj) {
2923 if (!prog->load)
2924 continue;
2925 if (prog_needs_vmlinux_btf(prog))
2926 return true;
2927 }
2928
2929 return false;
2930}
2931
2932static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
2933{
2934 int err;
2935
2936
2937 if (obj->btf_vmlinux || obj->gen_loader)
2938 return 0;
2939
2940 if (!force && !obj_needs_vmlinux_btf(obj))
2941 return 0;
2942
2943 obj->btf_vmlinux = btf__load_vmlinux_btf();
2944 err = libbpf_get_error(obj->btf_vmlinux);
2945 if (err) {
2946 pr_warn("Error loading vmlinux BTF: %d\n", err);
2947 obj->btf_vmlinux = NULL;
2948 return err;
2949 }
2950 return 0;
2951}
2952
2953static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2954{
2955 struct btf *kern_btf = obj->btf;
2956 bool btf_mandatory, sanitize;
2957 int i, err = 0;
2958
2959 if (!obj->btf)
2960 return 0;
2961
2962 if (!kernel_supports(obj, FEAT_BTF)) {
2963 if (kernel_needs_btf(obj)) {
2964 err = -EOPNOTSUPP;
2965 goto report;
2966 }
2967 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
2968 return 0;
2969 }
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979 for (i = 0; i < obj->nr_programs; i++) {
2980 struct bpf_program *prog = &obj->programs[i];
2981 struct btf_type *t;
2982 const char *name;
2983 int j, n;
2984
2985 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
2986 continue;
2987
2988 n = btf__type_cnt(obj->btf);
2989 for (j = 1; j < n; j++) {
2990 t = btf_type_by_id(obj->btf, j);
2991 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
2992 continue;
2993
2994 name = btf__str_by_offset(obj->btf, t->name_off);
2995 if (strcmp(name, prog->name) != 0)
2996 continue;
2997
2998 t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
2999 break;
3000 }
3001 }
3002
3003 sanitize = btf_needs_sanitization(obj);
3004 if (sanitize) {
3005 const void *raw_data;
3006 __u32 sz;
3007
3008
3009 raw_data = btf__raw_data(obj->btf, &sz);
3010 kern_btf = btf__new(raw_data, sz);
3011 err = libbpf_get_error(kern_btf);
3012 if (err)
3013 return err;
3014
3015
3016 btf__set_pointer_size(obj->btf, 8);
3017 bpf_object__sanitize_btf(obj, kern_btf);
3018 }
3019
3020 if (obj->gen_loader) {
3021 __u32 raw_size = 0;
3022 const void *raw_data = btf__raw_data(kern_btf, &raw_size);
3023
3024 if (!raw_data)
3025 return -ENOMEM;
3026 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3027
3028
3029
3030 btf__set_fd(kern_btf, 0);
3031 } else {
3032
3033 err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
3034 obj->log_level ? 1 : 0);
3035 }
3036 if (sanitize) {
3037 if (!err) {
3038
3039 btf__set_fd(obj->btf, btf__fd(kern_btf));
3040 btf__set_fd(kern_btf, -1);
3041 }
3042 btf__free(kern_btf);
3043 }
3044report:
3045 if (err) {
3046 btf_mandatory = kernel_needs_btf(obj);
3047 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
3048 btf_mandatory ? "BTF is mandatory, can't proceed."
3049 : "BTF is optional, ignoring.");
3050 if (!btf_mandatory)
3051 err = 0;
3052 }
3053 return err;
3054}
3055
3056static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
3057{
3058 const char *name;
3059
3060 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3061 if (!name) {
3062 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3063 off, obj->path, elf_errmsg(-1));
3064 return NULL;
3065 }
3066
3067 return name;
3068}
3069
3070static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
3071{
3072 const char *name;
3073
3074 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3075 if (!name) {
3076 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3077 off, obj->path, elf_errmsg(-1));
3078 return NULL;
3079 }
3080
3081 return name;
3082}
3083
3084static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
3085{
3086 Elf_Scn *scn;
3087
3088 scn = elf_getscn(obj->efile.elf, idx);
3089 if (!scn) {
3090 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
3091 idx, obj->path, elf_errmsg(-1));
3092 return NULL;
3093 }
3094 return scn;
3095}
3096
3097static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
3098{
3099 Elf_Scn *scn = NULL;
3100 Elf *elf = obj->efile.elf;
3101 const char *sec_name;
3102
3103 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3104 sec_name = elf_sec_name(obj, scn);
3105 if (!sec_name)
3106 return NULL;
3107
3108 if (strcmp(sec_name, name) != 0)
3109 continue;
3110
3111 return scn;
3112 }
3113 return NULL;
3114}
3115
3116static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
3117{
3118 Elf64_Shdr *shdr;
3119
3120 if (!scn)
3121 return NULL;
3122
3123 shdr = elf64_getshdr(scn);
3124 if (!shdr) {
3125 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
3126 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3127 return NULL;
3128 }
3129
3130 return shdr;
3131}
3132
3133static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
3134{
3135 const char *name;
3136 Elf64_Shdr *sh;
3137
3138 if (!scn)
3139 return NULL;
3140
3141 sh = elf_sec_hdr(obj, scn);
3142 if (!sh)
3143 return NULL;
3144
3145 name = elf_sec_str(obj, sh->sh_name);
3146 if (!name) {
3147 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
3148 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3149 return NULL;
3150 }
3151
3152 return name;
3153}
3154
3155static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
3156{
3157 Elf_Data *data;
3158
3159 if (!scn)
3160 return NULL;
3161
3162 data = elf_getdata(scn, 0);
3163 if (!data) {
3164 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
3165 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
3166 obj->path, elf_errmsg(-1));
3167 return NULL;
3168 }
3169
3170 return data;
3171}
3172
3173static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
3174{
3175 if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3176 return NULL;
3177
3178 return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3179}
3180
3181static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
3182{
3183 if (idx >= data->d_size / sizeof(Elf64_Rel))
3184 return NULL;
3185
3186 return (Elf64_Rel *)data->d_buf + idx;
3187}
3188
3189static bool is_sec_name_dwarf(const char *name)
3190{
3191
3192 return str_has_pfx(name, ".debug_");
3193}
3194
3195static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
3196{
3197
3198 if (hdr->sh_type == SHT_STRTAB)
3199 return true;
3200
3201
3202 if (hdr->sh_type == SHT_LLVM_ADDRSIG)
3203 return true;
3204
3205
3206 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
3207 strcmp(name, ".text") == 0)
3208 return true;
3209
3210
3211 if (is_sec_name_dwarf(name))
3212 return true;
3213
3214 if (str_has_pfx(name, ".rel")) {
3215 name += sizeof(".rel") - 1;
3216
3217 if (is_sec_name_dwarf(name))
3218 return true;
3219
3220
3221 if (strcmp(name, BTF_ELF_SEC) == 0 ||
3222 strcmp(name, BTF_EXT_ELF_SEC) == 0)
3223 return true;
3224 }
3225
3226 return false;
3227}
3228
3229static int cmp_progs(const void *_a, const void *_b)
3230{
3231 const struct bpf_program *a = _a;
3232 const struct bpf_program *b = _b;
3233
3234 if (a->sec_idx != b->sec_idx)
3235 return a->sec_idx < b->sec_idx ? -1 : 1;
3236
3237
3238 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
3239}
3240
3241static int bpf_object__elf_collect(struct bpf_object *obj)
3242{
3243 struct elf_sec_desc *sec_desc;
3244 Elf *elf = obj->efile.elf;
3245 Elf_Data *btf_ext_data = NULL;
3246 Elf_Data *btf_data = NULL;
3247 int idx = 0, err = 0;
3248 const char *name;
3249 Elf_Data *data;
3250 Elf_Scn *scn;
3251 Elf64_Shdr *sh;
3252
3253
3254
3255
3256
3257 obj->efile.sec_cnt = obj->efile.ehdr->e_shnum;
3258 obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3259 if (!obj->efile.secs)
3260 return -ENOMEM;
3261
3262
3263
3264
3265 scn = NULL;
3266 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3267 sh = elf_sec_hdr(obj, scn);
3268 if (!sh)
3269 return -LIBBPF_ERRNO__FORMAT;
3270
3271 if (sh->sh_type == SHT_SYMTAB) {
3272 if (obj->efile.symbols) {
3273 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3274 return -LIBBPF_ERRNO__FORMAT;
3275 }
3276
3277 data = elf_sec_data(obj, scn);
3278 if (!data)
3279 return -LIBBPF_ERRNO__FORMAT;
3280
3281 idx = elf_ndxscn(scn);
3282
3283 obj->efile.symbols = data;
3284 obj->efile.symbols_shndx = idx;
3285 obj->efile.strtabidx = sh->sh_link;
3286 }
3287 }
3288
3289 if (!obj->efile.symbols) {
3290 pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
3291 obj->path);
3292 return -ENOENT;
3293 }
3294
3295 scn = NULL;
3296 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3297 idx = elf_ndxscn(scn);
3298 sec_desc = &obj->efile.secs[idx];
3299
3300 sh = elf_sec_hdr(obj, scn);
3301 if (!sh)
3302 return -LIBBPF_ERRNO__FORMAT;
3303
3304 name = elf_sec_str(obj, sh->sh_name);
3305 if (!name)
3306 return -LIBBPF_ERRNO__FORMAT;
3307
3308 if (ignore_elf_section(sh, name))
3309 continue;
3310
3311 data = elf_sec_data(obj, scn);
3312 if (!data)
3313 return -LIBBPF_ERRNO__FORMAT;
3314
3315 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
3316 idx, name, (unsigned long)data->d_size,
3317 (int)sh->sh_link, (unsigned long)sh->sh_flags,
3318 (int)sh->sh_type);
3319
3320 if (strcmp(name, "license") == 0) {
3321 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3322 if (err)
3323 return err;
3324 } else if (strcmp(name, "version") == 0) {
3325 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3326 if (err)
3327 return err;
3328 } else if (strcmp(name, "maps") == 0) {
3329 obj->efile.maps_shndx = idx;
3330 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3331 obj->efile.btf_maps_shndx = idx;
3332 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
3333 if (sh->sh_type != SHT_PROGBITS)
3334 return -LIBBPF_ERRNO__FORMAT;
3335 btf_data = data;
3336 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
3337 if (sh->sh_type != SHT_PROGBITS)
3338 return -LIBBPF_ERRNO__FORMAT;
3339 btf_ext_data = data;
3340 } else if (sh->sh_type == SHT_SYMTAB) {
3341
3342 } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
3343 if (sh->sh_flags & SHF_EXECINSTR) {
3344 if (strcmp(name, ".text") == 0)
3345 obj->efile.text_shndx = idx;
3346 err = bpf_object__add_programs(obj, data, name, idx);
3347 if (err)
3348 return err;
3349 } else if (strcmp(name, DATA_SEC) == 0 ||
3350 str_has_pfx(name, DATA_SEC ".")) {
3351 sec_desc->sec_type = SEC_DATA;
3352 sec_desc->shdr = sh;
3353 sec_desc->data = data;
3354 } else if (strcmp(name, RODATA_SEC) == 0 ||
3355 str_has_pfx(name, RODATA_SEC ".")) {
3356 sec_desc->sec_type = SEC_RODATA;
3357 sec_desc->shdr = sh;
3358 sec_desc->data = data;
3359 } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
3360 obj->efile.st_ops_data = data;
3361 obj->efile.st_ops_shndx = idx;
3362 } else {
3363 pr_info("elf: skipping unrecognized data section(%d) %s\n",
3364 idx, name);
3365 }
3366 } else if (sh->sh_type == SHT_REL) {
3367 int targ_sec_idx = sh->sh_info;
3368
3369 if (sh->sh_entsize != sizeof(Elf64_Rel) ||
3370 targ_sec_idx >= obj->efile.sec_cnt)
3371 return -LIBBPF_ERRNO__FORMAT;
3372
3373
3374 if (!section_have_execinstr(obj, targ_sec_idx) &&
3375 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
3376 strcmp(name, ".rel" MAPS_ELF_SEC)) {
3377 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
3378 idx, name, targ_sec_idx,
3379 elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
3380 continue;
3381 }
3382
3383 sec_desc->sec_type = SEC_RELO;
3384 sec_desc->shdr = sh;
3385 sec_desc->data = data;
3386 } else if (sh->sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
3387 sec_desc->sec_type = SEC_BSS;
3388 sec_desc->shdr = sh;
3389 sec_desc->data = data;
3390 } else {
3391 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
3392 (size_t)sh->sh_size);
3393 }
3394 }
3395
3396 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3397 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3398 return -LIBBPF_ERRNO__FORMAT;
3399 }
3400
3401
3402
3403 if (obj->nr_programs)
3404 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
3405
3406 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
3407}
3408
3409static bool sym_is_extern(const Elf64_Sym *sym)
3410{
3411 int bind = ELF64_ST_BIND(sym->st_info);
3412
3413 return sym->st_shndx == SHN_UNDEF &&
3414 (bind == STB_GLOBAL || bind == STB_WEAK) &&
3415 ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
3416}
3417
3418static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
3419{
3420 int bind = ELF64_ST_BIND(sym->st_info);
3421 int type = ELF64_ST_TYPE(sym->st_info);
3422
3423
3424 if (sym->st_shndx != text_shndx)
3425 return false;
3426
3427
3428 if (bind == STB_LOCAL && type == STT_SECTION)
3429 return true;
3430
3431
3432 return bind == STB_GLOBAL && type == STT_FUNC;
3433}
3434
3435static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3436{
3437 const struct btf_type *t;
3438 const char *tname;
3439 int i, n;
3440
3441 if (!btf)
3442 return -ESRCH;
3443
3444 n = btf__type_cnt(btf);
3445 for (i = 1; i < n; i++) {
3446 t = btf__type_by_id(btf, i);
3447
3448 if (!btf_is_var(t) && !btf_is_func(t))
3449 continue;
3450
3451 tname = btf__name_by_offset(btf, t->name_off);
3452 if (strcmp(tname, ext_name))
3453 continue;
3454
3455 if (btf_is_var(t) &&
3456 btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3457 return -EINVAL;
3458
3459 if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
3460 return -EINVAL;
3461
3462 return i;
3463 }
3464
3465 return -ENOENT;
3466}
3467
3468static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3469 const struct btf_var_secinfo *vs;
3470 const struct btf_type *t;
3471 int i, j, n;
3472
3473 if (!btf)
3474 return -ESRCH;
3475
3476 n = btf__type_cnt(btf);
3477 for (i = 1; i < n; i++) {
3478 t = btf__type_by_id(btf, i);
3479
3480 if (!btf_is_datasec(t))
3481 continue;
3482
3483 vs = btf_var_secinfos(t);
3484 for (j = 0; j < btf_vlen(t); j++, vs++) {
3485 if (vs->type == ext_btf_id)
3486 return i;
3487 }
3488 }
3489
3490 return -ENOENT;
3491}
3492
3493static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3494 bool *is_signed)
3495{
3496 const struct btf_type *t;
3497 const char *name;
3498
3499 t = skip_mods_and_typedefs(btf, id, NULL);
3500 name = btf__name_by_offset(btf, t->name_off);
3501
3502 if (is_signed)
3503 *is_signed = false;
3504 switch (btf_kind(t)) {
3505 case BTF_KIND_INT: {
3506 int enc = btf_int_encoding(t);
3507
3508 if (enc & BTF_INT_BOOL)
3509 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3510 if (is_signed)
3511 *is_signed = enc & BTF_INT_SIGNED;
3512 if (t->size == 1)
3513 return KCFG_CHAR;
3514 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3515 return KCFG_UNKNOWN;
3516 return KCFG_INT;
3517 }
3518 case BTF_KIND_ENUM:
3519 if (t->size != 4)
3520 return KCFG_UNKNOWN;
3521 if (strcmp(name, "libbpf_tristate"))
3522 return KCFG_UNKNOWN;
3523 return KCFG_TRISTATE;
3524 case BTF_KIND_ARRAY:
3525 if (btf_array(t)->nelems == 0)
3526 return KCFG_UNKNOWN;
3527 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3528 return KCFG_UNKNOWN;
3529 return KCFG_CHAR_ARR;
3530 default:
3531 return KCFG_UNKNOWN;
3532 }
3533}
3534
3535static int cmp_externs(const void *_a, const void *_b)
3536{
3537 const struct extern_desc *a = _a;
3538 const struct extern_desc *b = _b;
3539
3540 if (a->type != b->type)
3541 return a->type < b->type ? -1 : 1;
3542
3543 if (a->type == EXT_KCFG) {
3544
3545 if (a->kcfg.align != b->kcfg.align)
3546 return a->kcfg.align > b->kcfg.align ? -1 : 1;
3547
3548 if (a->kcfg.sz != b->kcfg.sz)
3549 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3550 }
3551
3552
3553 return strcmp(a->name, b->name);
3554}
3555
3556static int find_int_btf_id(const struct btf *btf)
3557{
3558 const struct btf_type *t;
3559 int i, n;
3560
3561 n = btf__type_cnt(btf);
3562 for (i = 1; i < n; i++) {
3563 t = btf__type_by_id(btf, i);
3564
3565 if (btf_is_int(t) && btf_int_bits(t) == 32)
3566 return i;
3567 }
3568
3569 return 0;
3570}
3571
3572static int add_dummy_ksym_var(struct btf *btf)
3573{
3574 int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
3575 const struct btf_var_secinfo *vs;
3576 const struct btf_type *sec;
3577
3578 if (!btf)
3579 return 0;
3580
3581 sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
3582 BTF_KIND_DATASEC);
3583 if (sec_btf_id < 0)
3584 return 0;
3585
3586 sec = btf__type_by_id(btf, sec_btf_id);
3587 vs = btf_var_secinfos(sec);
3588 for (i = 0; i < btf_vlen(sec); i++, vs++) {
3589 const struct btf_type *vt;
3590
3591 vt = btf__type_by_id(btf, vs->type);
3592 if (btf_is_func(vt))
3593 break;
3594 }
3595
3596
3597 if (i == btf_vlen(sec))
3598 return 0;
3599
3600 int_btf_id = find_int_btf_id(btf);
3601 dummy_var_btf_id = btf__add_var(btf,
3602 "dummy_ksym",
3603 BTF_VAR_GLOBAL_ALLOCATED,
3604 int_btf_id);
3605 if (dummy_var_btf_id < 0)
3606 pr_warn("cannot create a dummy_ksym var\n");
3607
3608 return dummy_var_btf_id;
3609}
3610
3611static int bpf_object__collect_externs(struct bpf_object *obj)
3612{
3613 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
3614 const struct btf_type *t;
3615 struct extern_desc *ext;
3616 int i, n, off, dummy_var_btf_id;
3617 const char *ext_name, *sec_name;
3618 Elf_Scn *scn;
3619 Elf64_Shdr *sh;
3620
3621 if (!obj->efile.symbols)
3622 return 0;
3623
3624 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3625 sh = elf_sec_hdr(obj, scn);
3626 if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
3627 return -LIBBPF_ERRNO__FORMAT;
3628
3629 dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
3630 if (dummy_var_btf_id < 0)
3631 return dummy_var_btf_id;
3632
3633 n = sh->sh_size / sh->sh_entsize;
3634 pr_debug("looking for externs among %d symbols...\n", n);
3635
3636 for (i = 0; i < n; i++) {
3637 Elf64_Sym *sym = elf_sym_by_idx(obj, i);
3638
3639 if (!sym)
3640 return -LIBBPF_ERRNO__FORMAT;
3641 if (!sym_is_extern(sym))
3642 continue;
3643 ext_name = elf_sym_str(obj, sym->st_name);
3644 if (!ext_name || !ext_name[0])
3645 continue;
3646
3647 ext = obj->externs;
3648 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
3649 if (!ext)
3650 return -ENOMEM;
3651 obj->externs = ext;
3652 ext = &ext[obj->nr_extern];
3653 memset(ext, 0, sizeof(*ext));
3654 obj->nr_extern++;
3655
3656 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3657 if (ext->btf_id <= 0) {
3658 pr_warn("failed to find BTF for extern '%s': %d\n",
3659 ext_name, ext->btf_id);
3660 return ext->btf_id;
3661 }
3662 t = btf__type_by_id(obj->btf, ext->btf_id);
3663 ext->name = btf__name_by_offset(obj->btf, t->name_off);
3664 ext->sym_idx = i;
3665 ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
3666
3667 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3668 if (ext->sec_btf_id <= 0) {
3669 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
3670 ext_name, ext->btf_id, ext->sec_btf_id);
3671 return ext->sec_btf_id;
3672 }
3673 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3674 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3675
3676 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
3677 if (btf_is_func(t)) {
3678 pr_warn("extern function %s is unsupported under %s section\n",
3679 ext->name, KCONFIG_SEC);
3680 return -ENOTSUP;
3681 }
3682 kcfg_sec = sec;
3683 ext->type = EXT_KCFG;
3684 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3685 if (ext->kcfg.sz <= 0) {
3686 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
3687 ext_name, ext->kcfg.sz);
3688 return ext->kcfg.sz;
3689 }
3690 ext->kcfg.align = btf__align_of(obj->btf, t->type);
3691 if (ext->kcfg.align <= 0) {
3692 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
3693 ext_name, ext->kcfg.align);
3694 return -EINVAL;
3695 }
3696 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3697 &ext->kcfg.is_signed);
3698 if (ext->kcfg.type == KCFG_UNKNOWN) {
3699 pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
3700 return -ENOTSUP;
3701 }
3702 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
3703 ksym_sec = sec;
3704 ext->type = EXT_KSYM;
3705 skip_mods_and_typedefs(obj->btf, t->type,
3706 &ext->ksym.type_id);
3707 } else {
3708 pr_warn("unrecognized extern section '%s'\n", sec_name);
3709 return -ENOTSUP;
3710 }
3711 }
3712 pr_debug("collected %d externs total\n", obj->nr_extern);
3713
3714 if (!obj->nr_extern)
3715 return 0;
3716
3717
3718 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
3719
3720
3721
3722
3723
3724 if (ksym_sec) {
3725
3726
3727
3728 int int_btf_id = find_int_btf_id(obj->btf);
3729
3730
3731
3732
3733
3734 const struct btf_type *dummy_var;
3735
3736 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
3737 for (i = 0; i < obj->nr_extern; i++) {
3738 ext = &obj->externs[i];
3739 if (ext->type != EXT_KSYM)
3740 continue;
3741 pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
3742 i, ext->sym_idx, ext->name);
3743 }
3744
3745 sec = ksym_sec;
3746 n = btf_vlen(sec);
3747 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
3748 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3749 struct btf_type *vt;
3750
3751 vt = (void *)btf__type_by_id(obj->btf, vs->type);
3752 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3753 ext = find_extern_by_name(obj, ext_name);
3754 if (!ext) {
3755 pr_warn("failed to find extern definition for BTF %s '%s'\n",
3756 btf_kind_str(vt), ext_name);
3757 return -ESRCH;
3758 }
3759 if (btf_is_func(vt)) {
3760 const struct btf_type *func_proto;
3761 struct btf_param *param;
3762 int j;
3763
3764 func_proto = btf__type_by_id(obj->btf,
3765 vt->type);
3766 param = btf_params(func_proto);
3767
3768
3769
3770 for (j = 0; j < btf_vlen(func_proto); j++)
3771 if (param[j].type && !param[j].name_off)
3772 param[j].name_off =
3773 dummy_var->name_off;
3774 vs->type = dummy_var_btf_id;
3775 vt->info &= ~0xffff;
3776 vt->info |= BTF_FUNC_GLOBAL;
3777 } else {
3778 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3779 vt->type = int_btf_id;
3780 }
3781 vs->offset = off;
3782 vs->size = sizeof(int);
3783 }
3784 sec->size = off;
3785 }
3786
3787 if (kcfg_sec) {
3788 sec = kcfg_sec;
3789
3790 off = 0;
3791 for (i = 0; i < obj->nr_extern; i++) {
3792 ext = &obj->externs[i];
3793 if (ext->type != EXT_KCFG)
3794 continue;
3795
3796 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
3797 off = ext->kcfg.data_off + ext->kcfg.sz;
3798 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
3799 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
3800 }
3801 sec->size = off;
3802 n = btf_vlen(sec);
3803 for (i = 0; i < n; i++) {
3804 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3805
3806 t = btf__type_by_id(obj->btf, vs->type);
3807 ext_name = btf__name_by_offset(obj->btf, t->name_off);
3808 ext = find_extern_by_name(obj, ext_name);
3809 if (!ext) {
3810 pr_warn("failed to find extern definition for BTF var '%s'\n",
3811 ext_name);
3812 return -ESRCH;
3813 }
3814 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3815 vs->offset = ext->kcfg.data_off;
3816 }
3817 }
3818 return 0;
3819}
3820
3821struct bpf_program *
3822bpf_object__find_program_by_title(const struct bpf_object *obj,
3823 const char *title)
3824{
3825 struct bpf_program *pos;
3826
3827 bpf_object__for_each_program(pos, obj) {
3828 if (pos->sec_name && !strcmp(pos->sec_name, title))
3829 return pos;
3830 }
3831 return errno = ENOENT, NULL;
3832}
3833
3834static bool prog_is_subprog(const struct bpf_object *obj,
3835 const struct bpf_program *prog)
3836{
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853 if (libbpf_mode & LIBBPF_STRICT_SEC_NAME)
3854 return prog->sec_idx == obj->efile.text_shndx;
3855
3856 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
3857}
3858
3859struct bpf_program *
3860bpf_object__find_program_by_name(const struct bpf_object *obj,
3861 const char *name)
3862{
3863 struct bpf_program *prog;
3864
3865 bpf_object__for_each_program(prog, obj) {
3866 if (prog_is_subprog(obj, prog))
3867 continue;
3868 if (!strcmp(prog->name, name))
3869 return prog;
3870 }
3871 return errno = ENOENT, NULL;
3872}
3873
3874static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3875 int shndx)
3876{
3877 switch (obj->efile.secs[shndx].sec_type) {
3878 case SEC_BSS:
3879 case SEC_DATA:
3880 case SEC_RODATA:
3881 return true;
3882 default:
3883 return false;
3884 }
3885}
3886
3887static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3888 int shndx)
3889{
3890 return shndx == obj->efile.maps_shndx ||
3891 shndx == obj->efile.btf_maps_shndx;
3892}
3893
3894static enum libbpf_map_type
3895bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3896{
3897 if (shndx == obj->efile.symbols_shndx)
3898 return LIBBPF_MAP_KCONFIG;
3899
3900 switch (obj->efile.secs[shndx].sec_type) {
3901 case SEC_BSS:
3902 return LIBBPF_MAP_BSS;
3903 case SEC_DATA:
3904 return LIBBPF_MAP_DATA;
3905 case SEC_RODATA:
3906 return LIBBPF_MAP_RODATA;
3907 default:
3908 return LIBBPF_MAP_UNSPEC;
3909 }
3910}
3911
3912static int bpf_program__record_reloc(struct bpf_program *prog,
3913 struct reloc_desc *reloc_desc,
3914 __u32 insn_idx, const char *sym_name,
3915 const Elf64_Sym *sym, const Elf64_Rel *rel)
3916{
3917 struct bpf_insn *insn = &prog->insns[insn_idx];
3918 size_t map_idx, nr_maps = prog->obj->nr_maps;
3919 struct bpf_object *obj = prog->obj;
3920 __u32 shdr_idx = sym->st_shndx;
3921 enum libbpf_map_type type;
3922 const char *sym_sec_name;
3923 struct bpf_map *map;
3924
3925 if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
3926 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
3927 prog->name, sym_name, insn_idx, insn->code);
3928 return -LIBBPF_ERRNO__RELOC;
3929 }
3930
3931 if (sym_is_extern(sym)) {
3932 int sym_idx = ELF64_R_SYM(rel->r_info);
3933 int i, n = obj->nr_extern;
3934 struct extern_desc *ext;
3935
3936 for (i = 0; i < n; i++) {
3937 ext = &obj->externs[i];
3938 if (ext->sym_idx == sym_idx)
3939 break;
3940 }
3941 if (i >= n) {
3942 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
3943 prog->name, sym_name, sym_idx);
3944 return -LIBBPF_ERRNO__RELOC;
3945 }
3946 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
3947 prog->name, i, ext->name, ext->sym_idx, insn_idx);
3948 if (insn->code == (BPF_JMP | BPF_CALL))
3949 reloc_desc->type = RELO_EXTERN_FUNC;
3950 else
3951 reloc_desc->type = RELO_EXTERN_VAR;
3952 reloc_desc->insn_idx = insn_idx;
3953 reloc_desc->sym_off = i;
3954 return 0;
3955 }
3956
3957
3958 if (is_call_insn(insn)) {
3959 if (insn->src_reg != BPF_PSEUDO_CALL) {
3960 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
3961 return -LIBBPF_ERRNO__RELOC;
3962 }
3963
3964 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
3965 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3966 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
3967 prog->name, sym_name, sym_sec_name);
3968 return -LIBBPF_ERRNO__RELOC;
3969 }
3970 if (sym->st_value % BPF_INSN_SZ) {
3971 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
3972 prog->name, sym_name, (size_t)sym->st_value);
3973 return -LIBBPF_ERRNO__RELOC;
3974 }
3975 reloc_desc->type = RELO_CALL;
3976 reloc_desc->insn_idx = insn_idx;
3977 reloc_desc->sym_off = sym->st_value;
3978 return 0;
3979 }
3980
3981 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
3982 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
3983 prog->name, sym_name, shdr_idx);
3984 return -LIBBPF_ERRNO__RELOC;
3985 }
3986
3987
3988 if (sym_is_subprog(sym, obj->efile.text_shndx)) {
3989
3990
3991
3992 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
3993 pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
3994 prog->name, sym_name, (size_t)sym->st_value, insn->imm);
3995 return -LIBBPF_ERRNO__RELOC;
3996 }
3997
3998 reloc_desc->type = RELO_SUBPROG_ADDR;
3999 reloc_desc->insn_idx = insn_idx;
4000 reloc_desc->sym_off = sym->st_value;
4001 return 0;
4002 }
4003
4004 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
4005 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4006
4007
4008 if (type == LIBBPF_MAP_UNSPEC) {
4009 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
4010 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
4011 prog->name, sym_name, sym_sec_name);
4012 return -LIBBPF_ERRNO__RELOC;
4013 }
4014 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4015 map = &obj->maps[map_idx];
4016 if (map->libbpf_type != type ||
4017 map->sec_idx != sym->st_shndx ||
4018 map->sec_offset != sym->st_value)
4019 continue;
4020 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
4021 prog->name, map_idx, map->name, map->sec_idx,
4022 map->sec_offset, insn_idx);
4023 break;
4024 }
4025 if (map_idx >= nr_maps) {
4026 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
4027 prog->name, sym_sec_name, (size_t)sym->st_value);
4028 return -LIBBPF_ERRNO__RELOC;
4029 }
4030 reloc_desc->type = RELO_LD64;
4031 reloc_desc->insn_idx = insn_idx;
4032 reloc_desc->map_idx = map_idx;
4033 reloc_desc->sym_off = 0;
4034 return 0;
4035 }
4036
4037
4038 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
4039 pr_warn("prog '%s': bad data relo against section '%s'\n",
4040 prog->name, sym_sec_name);
4041 return -LIBBPF_ERRNO__RELOC;
4042 }
4043 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4044 map = &obj->maps[map_idx];
4045 if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
4046 continue;
4047 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
4048 prog->name, map_idx, map->name, map->sec_idx,
4049 map->sec_offset, insn_idx);
4050 break;
4051 }
4052 if (map_idx >= nr_maps) {
4053 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
4054 prog->name, sym_sec_name);
4055 return -LIBBPF_ERRNO__RELOC;
4056 }
4057
4058 reloc_desc->type = RELO_DATA;
4059 reloc_desc->insn_idx = insn_idx;
4060 reloc_desc->map_idx = map_idx;
4061 reloc_desc->sym_off = sym->st_value;
4062 return 0;
4063}
4064
4065static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
4066{
4067 return insn_idx >= prog->sec_insn_off &&
4068 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
4069}
4070
4071static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
4072 size_t sec_idx, size_t insn_idx)
4073{
4074 int l = 0, r = obj->nr_programs - 1, m;
4075 struct bpf_program *prog;
4076
4077 while (l < r) {
4078 m = l + (r - l + 1) / 2;
4079 prog = &obj->programs[m];
4080
4081 if (prog->sec_idx < sec_idx ||
4082 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
4083 l = m;
4084 else
4085 r = m - 1;
4086 }
4087
4088
4089
4090 prog = &obj->programs[l];
4091 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
4092 return prog;
4093 return NULL;
4094}
4095
4096static int
4097bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
4098{
4099 const char *relo_sec_name, *sec_name;
4100 size_t sec_idx = shdr->sh_info, sym_idx;
4101 struct bpf_program *prog;
4102 struct reloc_desc *relos;
4103 int err, i, nrels;
4104 const char *sym_name;
4105 __u32 insn_idx;
4106 Elf_Scn *scn;
4107 Elf_Data *scn_data;
4108 Elf64_Sym *sym;
4109 Elf64_Rel *rel;
4110
4111 if (sec_idx >= obj->efile.sec_cnt)
4112 return -EINVAL;
4113
4114 scn = elf_sec_by_idx(obj, sec_idx);
4115 scn_data = elf_sec_data(obj, scn);
4116
4117 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4118 sec_name = elf_sec_name(obj, scn);
4119 if (!relo_sec_name || !sec_name)
4120 return -EINVAL;
4121
4122 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
4123 relo_sec_name, sec_idx, sec_name);
4124 nrels = shdr->sh_size / shdr->sh_entsize;
4125
4126 for (i = 0; i < nrels; i++) {
4127 rel = elf_rel_by_idx(data, i);
4128 if (!rel) {
4129 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
4130 return -LIBBPF_ERRNO__FORMAT;
4131 }
4132
4133 sym_idx = ELF64_R_SYM(rel->r_info);
4134 sym = elf_sym_by_idx(obj, sym_idx);
4135 if (!sym) {
4136 pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
4137 relo_sec_name, sym_idx, i);
4138 return -LIBBPF_ERRNO__FORMAT;
4139 }
4140
4141 if (sym->st_shndx >= obj->efile.sec_cnt) {
4142 pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
4143 relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
4144 return -LIBBPF_ERRNO__FORMAT;
4145 }
4146
4147 if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
4148 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
4149 relo_sec_name, (size_t)rel->r_offset, i);
4150 return -LIBBPF_ERRNO__FORMAT;
4151 }
4152
4153 insn_idx = rel->r_offset / BPF_INSN_SZ;
4154
4155
4156
4157
4158
4159
4160 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
4161 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
4162 else
4163 sym_name = elf_sym_str(obj, sym->st_name);
4164 sym_name = sym_name ?: "<?";
4165
4166 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
4167 relo_sec_name, i, insn_idx, sym_name);
4168
4169 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
4170 if (!prog) {
4171 pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
4172 relo_sec_name, i, sec_name, insn_idx);
4173 continue;
4174 }
4175
4176 relos = libbpf_reallocarray(prog->reloc_desc,
4177 prog->nr_reloc + 1, sizeof(*relos));
4178 if (!relos)
4179 return -ENOMEM;
4180 prog->reloc_desc = relos;
4181
4182
4183 insn_idx -= prog->sec_insn_off;
4184 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
4185 insn_idx, sym_name, sym, rel);
4186 if (err)
4187 return err;
4188
4189 prog->nr_reloc++;
4190 }
4191 return 0;
4192}
4193
4194static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
4195{
4196 struct bpf_map_def *def = &map->def;
4197 __u32 key_type_id = 0, value_type_id = 0;
4198 int ret;
4199
4200 if (!obj->btf)
4201 return -ENOENT;
4202
4203
4204
4205
4206
4207 if (map->sec_idx == obj->efile.btf_maps_shndx ||
4208 bpf_map__is_struct_ops(map))
4209 return 0;
4210
4211 if (!bpf_map__is_internal(map)) {
4212 pr_warn("Use of BPF_ANNOTATE_KV_PAIR is deprecated, use BTF-defined maps in .maps section instead\n");
4213#pragma GCC diagnostic push
4214#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
4215 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
4216 def->value_size, &key_type_id,
4217 &value_type_id);
4218#pragma GCC diagnostic pop
4219 } else {
4220
4221
4222
4223
4224 ret = btf__find_by_name(obj->btf, map->real_name);
4225 }
4226 if (ret < 0)
4227 return ret;
4228
4229 map->btf_key_type_id = key_type_id;
4230 map->btf_value_type_id = bpf_map__is_internal(map) ?
4231 ret : value_type_id;
4232 return 0;
4233}
4234
4235static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
4236{
4237 char file[PATH_MAX], buff[4096];
4238 FILE *fp;
4239 __u32 val;
4240 int err;
4241
4242 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
4243 memset(info, 0, sizeof(*info));
4244
4245 fp = fopen(file, "r");
4246 if (!fp) {
4247 err = -errno;
4248 pr_warn("failed to open %s: %d. No procfs support?\n", file,
4249 err);
4250 return err;
4251 }
4252
4253 while (fgets(buff, sizeof(buff), fp)) {
4254 if (sscanf(buff, "map_type:\t%u", &val) == 1)
4255 info->type = val;
4256 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
4257 info->key_size = val;
4258 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
4259 info->value_size = val;
4260 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
4261 info->max_entries = val;
4262 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
4263 info->map_flags = val;
4264 }
4265
4266 fclose(fp);
4267
4268 return 0;
4269}
4270
4271int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4272{
4273 struct bpf_map_info info = {};
4274 __u32 len = sizeof(info);
4275 int new_fd, err;
4276 char *new_name;
4277
4278 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4279 if (err && errno == EINVAL)
4280 err = bpf_get_map_info_from_fdinfo(fd, &info);
4281 if (err)
4282 return libbpf_err(err);
4283
4284 new_name = strdup(info.name);
4285 if (!new_name)
4286 return libbpf_err(-errno);
4287
4288 new_fd = open("/", O_RDONLY | O_CLOEXEC);
4289 if (new_fd < 0) {
4290 err = -errno;
4291 goto err_free_new_name;
4292 }
4293
4294 new_fd = dup3(fd, new_fd, O_CLOEXEC);
4295 if (new_fd < 0) {
4296 err = -errno;
4297 goto err_close_new_fd;
4298 }
4299
4300 err = zclose(map->fd);
4301 if (err) {
4302 err = -errno;
4303 goto err_close_new_fd;
4304 }
4305 free(map->name);
4306
4307 map->fd = new_fd;
4308 map->name = new_name;
4309 map->def.type = info.type;
4310 map->def.key_size = info.key_size;
4311 map->def.value_size = info.value_size;
4312 map->def.max_entries = info.max_entries;
4313 map->def.map_flags = info.map_flags;
4314 map->btf_key_type_id = info.btf_key_type_id;
4315 map->btf_value_type_id = info.btf_value_type_id;
4316 map->reused = true;
4317 map->map_extra = info.map_extra;
4318
4319 return 0;
4320
4321err_close_new_fd:
4322 close(new_fd);
4323err_free_new_name:
4324 free(new_name);
4325 return libbpf_err(err);
4326}
4327
4328__u32 bpf_map__max_entries(const struct bpf_map *map)
4329{
4330 return map->def.max_entries;
4331}
4332
4333struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
4334{
4335 if (!bpf_map_type__is_map_in_map(map->def.type))
4336 return errno = EINVAL, NULL;
4337
4338 return map->inner_map;
4339}
4340
4341int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
4342{
4343 if (map->fd >= 0)
4344 return libbpf_err(-EBUSY);
4345 map->def.max_entries = max_entries;
4346 return 0;
4347}
4348
4349int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
4350{
4351 if (!map || !max_entries)
4352 return libbpf_err(-EINVAL);
4353
4354 return bpf_map__set_max_entries(map, max_entries);
4355}
4356
4357static int
4358bpf_object__probe_loading(struct bpf_object *obj)
4359{
4360 char *cp, errmsg[STRERR_BUFSIZE];
4361 struct bpf_insn insns[] = {
4362 BPF_MOV64_IMM(BPF_REG_0, 0),
4363 BPF_EXIT_INSN(),
4364 };
4365 int ret, insn_cnt = ARRAY_SIZE(insns);
4366
4367 if (obj->gen_loader)
4368 return 0;
4369
4370 ret = bump_rlimit_memlock();
4371 if (ret)
4372 pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
4373
4374
4375 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4376 if (ret < 0)
4377 ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
4378 if (ret < 0) {
4379 ret = errno;
4380 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4381 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
4382 "program. Make sure your kernel supports BPF "
4383 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
4384 "set to big enough value.\n", __func__, cp, ret);
4385 return -ret;
4386 }
4387 close(ret);
4388
4389 return 0;
4390}
4391
4392static int probe_fd(int fd)
4393{
4394 if (fd >= 0)
4395 close(fd);
4396 return fd >= 0;
4397}
4398
4399static int probe_kern_prog_name(void)
4400{
4401 struct bpf_insn insns[] = {
4402 BPF_MOV64_IMM(BPF_REG_0, 0),
4403 BPF_EXIT_INSN(),
4404 };
4405 int ret, insn_cnt = ARRAY_SIZE(insns);
4406
4407
4408 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "test", "GPL", insns, insn_cnt, NULL);
4409 return probe_fd(ret);
4410}
4411
4412static int probe_kern_global_data(void)
4413{
4414 char *cp, errmsg[STRERR_BUFSIZE];
4415 struct bpf_insn insns[] = {
4416 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
4417 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
4418 BPF_MOV64_IMM(BPF_REG_0, 0),
4419 BPF_EXIT_INSN(),
4420 };
4421 int ret, map, insn_cnt = ARRAY_SIZE(insns);
4422
4423 map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL);
4424 if (map < 0) {
4425 ret = -errno;
4426 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4427 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4428 __func__, cp, -ret);
4429 return ret;
4430 }
4431
4432 insns[0].imm = map;
4433
4434 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4435 close(map);
4436 return probe_fd(ret);
4437}
4438
4439static int probe_kern_btf(void)
4440{
4441 static const char strs[] = "\0int";
4442 __u32 types[] = {
4443
4444 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4445 };
4446
4447 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4448 strs, sizeof(strs)));
4449}
4450
4451static int probe_kern_btf_func(void)
4452{
4453 static const char strs[] = "\0int\0x\0a";
4454
4455 __u32 types[] = {
4456
4457 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4458
4459 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4460 BTF_PARAM_ENC(7, 1),
4461
4462 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
4463 };
4464
4465 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4466 strs, sizeof(strs)));
4467}
4468
4469static int probe_kern_btf_func_global(void)
4470{
4471 static const char strs[] = "\0int\0x\0a";
4472
4473 __u32 types[] = {
4474
4475 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4476
4477 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4478 BTF_PARAM_ENC(7, 1),
4479
4480 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
4481 };
4482
4483 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4484 strs, sizeof(strs)));
4485}
4486
4487static int probe_kern_btf_datasec(void)
4488{
4489 static const char strs[] = "\0x\0.data";
4490
4491 __u32 types[] = {
4492
4493 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
4494
4495 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
4496 BTF_VAR_STATIC,
4497
4498 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
4499 BTF_VAR_SECINFO_ENC(2, 0, 4),
4500 };
4501
4502 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4503 strs, sizeof(strs)));
4504}
4505
4506static int probe_kern_btf_float(void)
4507{
4508 static const char strs[] = "\0float";
4509 __u32 types[] = {
4510
4511 BTF_TYPE_FLOAT_ENC(1, 4),
4512 };
4513
4514 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4515 strs, sizeof(strs)));
4516}
4517
4518static int probe_kern_btf_decl_tag(void)
4519{
4520 static const char strs[] = "\0tag";
4521 __u32 types[] = {
4522
4523 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
4524
4525 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
4526 BTF_VAR_STATIC,
4527
4528 BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
4529 };
4530
4531 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4532 strs, sizeof(strs)));
4533}
4534
4535static int probe_kern_btf_type_tag(void)
4536{
4537 static const char strs[] = "\0tag";
4538 __u32 types[] = {
4539
4540 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
4541
4542 BTF_TYPE_TYPE_TAG_ENC(1, 1),
4543
4544 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
4545 };
4546
4547 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4548 strs, sizeof(strs)));
4549}
4550
4551static int probe_kern_array_mmap(void)
4552{
4553 LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE);
4554 int fd;
4555
4556 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(int), 1, &opts);
4557 return probe_fd(fd);
4558}
4559
4560static int probe_kern_exp_attach_type(void)
4561{
4562 LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE);
4563 struct bpf_insn insns[] = {
4564 BPF_MOV64_IMM(BPF_REG_0, 0),
4565 BPF_EXIT_INSN(),
4566 };
4567 int fd, insn_cnt = ARRAY_SIZE(insns);
4568
4569
4570
4571
4572
4573
4574 fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
4575 return probe_fd(fd);
4576}
4577
4578static int probe_kern_probe_read_kernel(void)
4579{
4580 struct bpf_insn insns[] = {
4581 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4582 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
4583 BPF_MOV64_IMM(BPF_REG_2, 8),
4584 BPF_MOV64_IMM(BPF_REG_3, 0),
4585 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
4586 BPF_EXIT_INSN(),
4587 };
4588 int fd, insn_cnt = ARRAY_SIZE(insns);
4589
4590 fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
4591 return probe_fd(fd);
4592}
4593
4594static int probe_prog_bind_map(void)
4595{
4596 char *cp, errmsg[STRERR_BUFSIZE];
4597 struct bpf_insn insns[] = {
4598 BPF_MOV64_IMM(BPF_REG_0, 0),
4599 BPF_EXIT_INSN(),
4600 };
4601 int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
4602
4603 map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL);
4604 if (map < 0) {
4605 ret = -errno;
4606 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4607 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4608 __func__, cp, -ret);
4609 return ret;
4610 }
4611
4612 prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4613 if (prog < 0) {
4614 close(map);
4615 return 0;
4616 }
4617
4618 ret = bpf_prog_bind_map(prog, map, NULL);
4619
4620 close(map);
4621 close(prog);
4622
4623 return ret >= 0;
4624}
4625
4626static int probe_module_btf(void)
4627{
4628 static const char strs[] = "\0int";
4629 __u32 types[] = {
4630
4631 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4632 };
4633 struct bpf_btf_info info;
4634 __u32 len = sizeof(info);
4635 char name[16];
4636 int fd, err;
4637
4638 fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
4639 if (fd < 0)
4640 return 0;
4641
4642 memset(&info, 0, sizeof(info));
4643 info.name = ptr_to_u64(name);
4644 info.name_len = sizeof(name);
4645
4646
4647
4648
4649
4650 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4651 close(fd);
4652 return !err;
4653}
4654
4655static int probe_perf_link(void)
4656{
4657 struct bpf_insn insns[] = {
4658 BPF_MOV64_IMM(BPF_REG_0, 0),
4659 BPF_EXIT_INSN(),
4660 };
4661 int prog_fd, link_fd, err;
4662
4663 prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
4664 insns, ARRAY_SIZE(insns), NULL);
4665 if (prog_fd < 0)
4666 return -errno;
4667
4668
4669
4670
4671 link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
4672 err = -errno;
4673
4674 if (link_fd >= 0)
4675 close(link_fd);
4676 close(prog_fd);
4677
4678 return link_fd < 0 && err == -EBADF;
4679}
4680
4681enum kern_feature_result {
4682 FEAT_UNKNOWN = 0,
4683 FEAT_SUPPORTED = 1,
4684 FEAT_MISSING = 2,
4685};
4686
4687typedef int (*feature_probe_fn)(void);
4688
4689static struct kern_feature_desc {
4690 const char *desc;
4691 feature_probe_fn probe;
4692 enum kern_feature_result res;
4693} feature_probes[__FEAT_CNT] = {
4694 [FEAT_PROG_NAME] = {
4695 "BPF program name", probe_kern_prog_name,
4696 },
4697 [FEAT_GLOBAL_DATA] = {
4698 "global variables", probe_kern_global_data,
4699 },
4700 [FEAT_BTF] = {
4701 "minimal BTF", probe_kern_btf,
4702 },
4703 [FEAT_BTF_FUNC] = {
4704 "BTF functions", probe_kern_btf_func,
4705 },
4706 [FEAT_BTF_GLOBAL_FUNC] = {
4707 "BTF global function", probe_kern_btf_func_global,
4708 },
4709 [FEAT_BTF_DATASEC] = {
4710 "BTF data section and variable", probe_kern_btf_datasec,
4711 },
4712 [FEAT_ARRAY_MMAP] = {
4713 "ARRAY map mmap()", probe_kern_array_mmap,
4714 },
4715 [FEAT_EXP_ATTACH_TYPE] = {
4716 "BPF_PROG_LOAD expected_attach_type attribute",
4717 probe_kern_exp_attach_type,
4718 },
4719 [FEAT_PROBE_READ_KERN] = {
4720 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
4721 },
4722 [FEAT_PROG_BIND_MAP] = {
4723 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
4724 },
4725 [FEAT_MODULE_BTF] = {
4726 "module BTF support", probe_module_btf,
4727 },
4728 [FEAT_BTF_FLOAT] = {
4729 "BTF_KIND_FLOAT support", probe_kern_btf_float,
4730 },
4731 [FEAT_PERF_LINK] = {
4732 "BPF perf link support", probe_perf_link,
4733 },
4734 [FEAT_BTF_DECL_TAG] = {
4735 "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
4736 },
4737 [FEAT_BTF_TYPE_TAG] = {
4738 "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
4739 },
4740 [FEAT_MEMCG_ACCOUNT] = {
4741 "memcg-based memory accounting", probe_memcg_account,
4742 },
4743};
4744
4745bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
4746{
4747 struct kern_feature_desc *feat = &feature_probes[feat_id];
4748 int ret;
4749
4750 if (obj && obj->gen_loader)
4751
4752
4753
4754 return true;
4755
4756 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
4757 ret = feat->probe();
4758 if (ret > 0) {
4759 WRITE_ONCE(feat->res, FEAT_SUPPORTED);
4760 } else if (ret == 0) {
4761 WRITE_ONCE(feat->res, FEAT_MISSING);
4762 } else {
4763 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
4764 WRITE_ONCE(feat->res, FEAT_MISSING);
4765 }
4766 }
4767
4768 return READ_ONCE(feat->res) == FEAT_SUPPORTED;
4769}
4770
4771static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4772{
4773 struct bpf_map_info map_info = {};
4774 char msg[STRERR_BUFSIZE];
4775 __u32 map_info_len;
4776 int err;
4777
4778 map_info_len = sizeof(map_info);
4779
4780 err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
4781 if (err && errno == EINVAL)
4782 err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
4783 if (err) {
4784 pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
4785 libbpf_strerror_r(errno, msg, sizeof(msg)));
4786 return false;
4787 }
4788
4789 return (map_info.type == map->def.type &&
4790 map_info.key_size == map->def.key_size &&
4791 map_info.value_size == map->def.value_size &&
4792 map_info.max_entries == map->def.max_entries &&
4793 map_info.map_flags == map->def.map_flags &&
4794 map_info.map_extra == map->map_extra);
4795}
4796
4797static int
4798bpf_object__reuse_map(struct bpf_map *map)
4799{
4800 char *cp, errmsg[STRERR_BUFSIZE];
4801 int err, pin_fd;
4802
4803 pin_fd = bpf_obj_get(map->pin_path);
4804 if (pin_fd < 0) {
4805 err = -errno;
4806 if (err == -ENOENT) {
4807 pr_debug("found no pinned map to reuse at '%s'\n",
4808 map->pin_path);
4809 return 0;
4810 }
4811
4812 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4813 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4814 map->pin_path, cp);
4815 return err;
4816 }
4817
4818 if (!map_is_reuse_compat(map, pin_fd)) {
4819 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4820 map->pin_path);
4821 close(pin_fd);
4822 return -EINVAL;
4823 }
4824
4825 err = bpf_map__reuse_fd(map, pin_fd);
4826 close(pin_fd);
4827 if (err) {
4828 return err;
4829 }
4830 map->pinned = true;
4831 pr_debug("reused pinned map at '%s'\n", map->pin_path);
4832
4833 return 0;
4834}
4835
4836static int
4837bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4838{
4839 enum libbpf_map_type map_type = map->libbpf_type;
4840 char *cp, errmsg[STRERR_BUFSIZE];
4841 int err, zero = 0;
4842
4843 if (obj->gen_loader) {
4844 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
4845 map->mmaped, map->def.value_size);
4846 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
4847 bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
4848 return 0;
4849 }
4850 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4851 if (err) {
4852 err = -errno;
4853 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4854 pr_warn("Error setting initial map(%s) contents: %s\n",
4855 map->name, cp);
4856 return err;
4857 }
4858
4859
4860 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
4861 err = bpf_map_freeze(map->fd);
4862 if (err) {
4863 err = -errno;
4864 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4865 pr_warn("Error freezing map(%s) as read-only: %s\n",
4866 map->name, cp);
4867 return err;
4868 }
4869 }
4870 return 0;
4871}
4872
4873static void bpf_map__destroy(struct bpf_map *map);
4874
4875static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
4876{
4877 LIBBPF_OPTS(bpf_map_create_opts, create_attr);
4878 struct bpf_map_def *def = &map->def;
4879 const char *map_name = NULL;
4880 int err = 0;
4881
4882 if (kernel_supports(obj, FEAT_PROG_NAME))
4883 map_name = map->name;
4884 create_attr.map_ifindex = map->map_ifindex;
4885 create_attr.map_flags = def->map_flags;
4886 create_attr.numa_node = map->numa_node;
4887 create_attr.map_extra = map->map_extra;
4888
4889 if (bpf_map__is_struct_ops(map))
4890 create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4891
4892 if (obj->btf && btf__fd(obj->btf) >= 0) {
4893 create_attr.btf_fd = btf__fd(obj->btf);
4894 create_attr.btf_key_type_id = map->btf_key_type_id;
4895 create_attr.btf_value_type_id = map->btf_value_type_id;
4896 }
4897
4898 if (bpf_map_type__is_map_in_map(def->type)) {
4899 if (map->inner_map) {
4900 err = bpf_object__create_map(obj, map->inner_map, true);
4901 if (err) {
4902 pr_warn("map '%s': failed to create inner map: %d\n",
4903 map->name, err);
4904 return err;
4905 }
4906 map->inner_map_fd = bpf_map__fd(map->inner_map);
4907 }
4908 if (map->inner_map_fd >= 0)
4909 create_attr.inner_map_fd = map->inner_map_fd;
4910 }
4911
4912 switch (def->type) {
4913 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
4914 case BPF_MAP_TYPE_CGROUP_ARRAY:
4915 case BPF_MAP_TYPE_STACK_TRACE:
4916 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
4917 case BPF_MAP_TYPE_HASH_OF_MAPS:
4918 case BPF_MAP_TYPE_DEVMAP:
4919 case BPF_MAP_TYPE_DEVMAP_HASH:
4920 case BPF_MAP_TYPE_CPUMAP:
4921 case BPF_MAP_TYPE_XSKMAP:
4922 case BPF_MAP_TYPE_SOCKMAP:
4923 case BPF_MAP_TYPE_SOCKHASH:
4924 case BPF_MAP_TYPE_QUEUE:
4925 case BPF_MAP_TYPE_STACK:
4926 case BPF_MAP_TYPE_RINGBUF:
4927 create_attr.btf_fd = 0;
4928 create_attr.btf_key_type_id = 0;
4929 create_attr.btf_value_type_id = 0;
4930 map->btf_key_type_id = 0;
4931 map->btf_value_type_id = 0;
4932 default:
4933 break;
4934 }
4935
4936 if (obj->gen_loader) {
4937 bpf_gen__map_create(obj->gen_loader, def->type, map_name,
4938 def->key_size, def->value_size, def->max_entries,
4939 &create_attr, is_inner ? -1 : map - obj->maps);
4940
4941
4942
4943 map->fd = 0;
4944 } else {
4945 map->fd = bpf_map_create(def->type, map_name,
4946 def->key_size, def->value_size,
4947 def->max_entries, &create_attr);
4948 }
4949 if (map->fd < 0 && (create_attr.btf_key_type_id ||
4950 create_attr.btf_value_type_id)) {
4951 char *cp, errmsg[STRERR_BUFSIZE];
4952
4953 err = -errno;
4954 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4955 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
4956 map->name, cp, err);
4957 create_attr.btf_fd = 0;
4958 create_attr.btf_key_type_id = 0;
4959 create_attr.btf_value_type_id = 0;
4960 map->btf_key_type_id = 0;
4961 map->btf_value_type_id = 0;
4962 map->fd = bpf_map_create(def->type, map_name,
4963 def->key_size, def->value_size,
4964 def->max_entries, &create_attr);
4965 }
4966
4967 err = map->fd < 0 ? -errno : 0;
4968
4969 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
4970 if (obj->gen_loader)
4971 map->inner_map->fd = -1;
4972 bpf_map__destroy(map->inner_map);
4973 zfree(&map->inner_map);
4974 }
4975
4976 return err;
4977}
4978
4979static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
4980{
4981 const struct bpf_map *targ_map;
4982 unsigned int i;
4983 int fd, err = 0;
4984
4985 for (i = 0; i < map->init_slots_sz; i++) {
4986 if (!map->init_slots[i])
4987 continue;
4988
4989 targ_map = map->init_slots[i];
4990 fd = bpf_map__fd(targ_map);
4991
4992 if (obj->gen_loader) {
4993 bpf_gen__populate_outer_map(obj->gen_loader,
4994 map - obj->maps, i,
4995 targ_map - obj->maps);
4996 } else {
4997 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
4998 }
4999 if (err) {
5000 err = -errno;
5001 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
5002 map->name, i, targ_map->name, fd, err);
5003 return err;
5004 }
5005 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
5006 map->name, i, targ_map->name, fd);
5007 }
5008
5009 zfree(&map->init_slots);
5010 map->init_slots_sz = 0;
5011
5012 return 0;
5013}
5014
5015static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
5016{
5017 const struct bpf_program *targ_prog;
5018 unsigned int i;
5019 int fd, err;
5020
5021 if (obj->gen_loader)
5022 return -ENOTSUP;
5023
5024 for (i = 0; i < map->init_slots_sz; i++) {
5025 if (!map->init_slots[i])
5026 continue;
5027
5028 targ_prog = map->init_slots[i];
5029 fd = bpf_program__fd(targ_prog);
5030
5031 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5032 if (err) {
5033 err = -errno;
5034 pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
5035 map->name, i, targ_prog->name, fd, err);
5036 return err;
5037 }
5038 pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
5039 map->name, i, targ_prog->name, fd);
5040 }
5041
5042 zfree(&map->init_slots);
5043 map->init_slots_sz = 0;
5044
5045 return 0;
5046}
5047
5048static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5049{
5050 struct bpf_map *map;
5051 int i, err;
5052
5053 for (i = 0; i < obj->nr_maps; i++) {
5054 map = &obj->maps[i];
5055
5056 if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
5057 continue;
5058
5059 err = init_prog_array_slots(obj, map);
5060 if (err < 0) {
5061 zclose(map->fd);
5062 return err;
5063 }
5064 }
5065 return 0;
5066}
5067
5068static int map_set_def_max_entries(struct bpf_map *map)
5069{
5070 if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
5071 int nr_cpus;
5072
5073 nr_cpus = libbpf_num_possible_cpus();
5074 if (nr_cpus < 0) {
5075 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
5076 map->name, nr_cpus);
5077 return nr_cpus;
5078 }
5079 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
5080 map->def.max_entries = nr_cpus;
5081 }
5082
5083 return 0;
5084}
5085
5086static int
5087bpf_object__create_maps(struct bpf_object *obj)
5088{
5089 struct bpf_map *map;
5090 char *cp, errmsg[STRERR_BUFSIZE];
5091 unsigned int i, j;
5092 int err;
5093 bool retried;
5094
5095 for (i = 0; i < obj->nr_maps; i++) {
5096 map = &obj->maps[i];
5097
5098
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112 if (bpf_map__is_internal(map) &&
5113 !kernel_supports(obj, FEAT_GLOBAL_DATA)) {
5114 map->skipped = true;
5115 continue;
5116 }
5117
5118 err = map_set_def_max_entries(map);
5119 if (err)
5120 goto err_out;
5121
5122 retried = false;
5123retry:
5124 if (map->pin_path) {
5125 err = bpf_object__reuse_map(map);
5126 if (err) {
5127 pr_warn("map '%s': error reusing pinned map\n",
5128 map->name);
5129 goto err_out;
5130 }
5131 if (retried && map->fd < 0) {
5132 pr_warn("map '%s': cannot find pinned map\n",
5133 map->name);
5134 err = -ENOENT;
5135 goto err_out;
5136 }
5137 }
5138
5139 if (map->fd >= 0) {
5140 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
5141 map->name, map->fd);
5142 } else {
5143 err = bpf_object__create_map(obj, map, false);
5144 if (err)
5145 goto err_out;
5146
5147 pr_debug("map '%s': created successfully, fd=%d\n",
5148 map->name, map->fd);
5149
5150 if (bpf_map__is_internal(map)) {
5151 err = bpf_object__populate_internal_map(obj, map);
5152 if (err < 0) {
5153 zclose(map->fd);
5154 goto err_out;
5155 }
5156 }
5157
5158 if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
5159 err = init_map_in_map_slots(obj, map);
5160 if (err < 0) {
5161 zclose(map->fd);
5162 goto err_out;
5163 }
5164 }
5165 }
5166
5167 if (map->pin_path && !map->pinned) {
5168 err = bpf_map__pin(map, NULL);
5169 if (err) {
5170 zclose(map->fd);
5171 if (!retried && err == -EEXIST) {
5172 retried = true;
5173 goto retry;
5174 }
5175 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
5176 map->name, map->pin_path, err);
5177 goto err_out;
5178 }
5179 }
5180 }
5181
5182 return 0;
5183
5184err_out:
5185 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5186 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
5187 pr_perm_msg(err);
5188 for (j = 0; j < i; j++)
5189 zclose(obj->maps[j].fd);
5190 return err;
5191}
5192
5193static bool bpf_core_is_flavor_sep(const char *s)
5194{
5195
5196 return s[0] != '_' &&
5197 s[1] == '_' && s[2] == '_' && s[3] == '_' &&
5198 s[4] != '_';
5199}
5200
5201
5202
5203
5204
5205size_t bpf_core_essential_name_len(const char *name)
5206{
5207 size_t n = strlen(name);
5208 int i;
5209
5210 for (i = n - 5; i >= 0; i--) {
5211 if (bpf_core_is_flavor_sep(name + i))
5212 return i + 1;
5213 }
5214 return n;
5215}
5216
5217void bpf_core_free_cands(struct bpf_core_cand_list *cands)
5218{
5219 if (!cands)
5220 return;
5221
5222 free(cands->cands);
5223 free(cands);
5224}
5225
5226int bpf_core_add_cands(struct bpf_core_cand *local_cand,
5227 size_t local_essent_len,
5228 const struct btf *targ_btf,
5229 const char *targ_btf_name,
5230 int targ_start_id,
5231 struct bpf_core_cand_list *cands)
5232{
5233 struct bpf_core_cand *new_cands, *cand;
5234 const struct btf_type *t, *local_t;
5235 const char *targ_name, *local_name;
5236 size_t targ_essent_len;
5237 int n, i;
5238
5239 local_t = btf__type_by_id(local_cand->btf, local_cand->id);
5240 local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
5241
5242 n = btf__type_cnt(targ_btf);
5243 for (i = targ_start_id; i < n; i++) {
5244 t = btf__type_by_id(targ_btf, i);
5245 if (btf_kind(t) != btf_kind(local_t))
5246 continue;
5247
5248 targ_name = btf__name_by_offset(targ_btf, t->name_off);
5249 if (str_is_empty(targ_name))
5250 continue;
5251
5252 targ_essent_len = bpf_core_essential_name_len(targ_name);
5253 if (targ_essent_len != local_essent_len)
5254 continue;
5255
5256 if (strncmp(local_name, targ_name, local_essent_len) != 0)
5257 continue;
5258
5259 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
5260 local_cand->id, btf_kind_str(local_t),
5261 local_name, i, btf_kind_str(t), targ_name,
5262 targ_btf_name);
5263 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5264 sizeof(*cands->cands));
5265 if (!new_cands)
5266 return -ENOMEM;
5267
5268 cand = &new_cands[cands->len];
5269 cand->btf = targ_btf;
5270 cand->id = i;
5271
5272 cands->cands = new_cands;
5273 cands->len++;
5274 }
5275 return 0;
5276}
5277
5278static int load_module_btfs(struct bpf_object *obj)
5279{
5280 struct bpf_btf_info info;
5281 struct module_btf *mod_btf;
5282 struct btf *btf;
5283 char name[64];
5284 __u32 id = 0, len;
5285 int err, fd;
5286
5287 if (obj->btf_modules_loaded)
5288 return 0;
5289
5290 if (obj->gen_loader)
5291 return 0;
5292
5293
5294 obj->btf_modules_loaded = true;
5295
5296
5297 if (!kernel_supports(obj, FEAT_MODULE_BTF))
5298 return 0;
5299
5300 while (true) {
5301 err = bpf_btf_get_next_id(id, &id);
5302 if (err && errno == ENOENT)
5303 return 0;
5304 if (err) {
5305 err = -errno;
5306 pr_warn("failed to iterate BTF objects: %d\n", err);
5307 return err;
5308 }
5309
5310 fd = bpf_btf_get_fd_by_id(id);
5311 if (fd < 0) {
5312 if (errno == ENOENT)
5313 continue;
5314 err = -errno;
5315 pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
5316 return err;
5317 }
5318
5319 len = sizeof(info);
5320 memset(&info, 0, sizeof(info));
5321 info.name = ptr_to_u64(name);
5322 info.name_len = sizeof(name);
5323
5324 err = bpf_obj_get_info_by_fd(fd, &info, &len);
5325 if (err) {
5326 err = -errno;
5327 pr_warn("failed to get BTF object #%d info: %d\n", id, err);
5328 goto err_out;
5329 }
5330
5331
5332 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
5333 close(fd);
5334 continue;
5335 }
5336
5337 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
5338 err = libbpf_get_error(btf);
5339 if (err) {
5340 pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
5341 name, id, err);
5342 goto err_out;
5343 }
5344
5345 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
5346 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
5347 if (err)
5348 goto err_out;
5349
5350 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5351
5352 mod_btf->btf = btf;
5353 mod_btf->id = id;
5354 mod_btf->fd = fd;
5355 mod_btf->name = strdup(name);
5356 if (!mod_btf->name) {
5357 err = -ENOMEM;
5358 goto err_out;
5359 }
5360 continue;
5361
5362err_out:
5363 close(fd);
5364 return err;
5365 }
5366
5367 return 0;
5368}
5369
5370static struct bpf_core_cand_list *
5371bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
5372{
5373 struct bpf_core_cand local_cand = {};
5374 struct bpf_core_cand_list *cands;
5375 const struct btf *main_btf;
5376 const struct btf_type *local_t;
5377 const char *local_name;
5378 size_t local_essent_len;
5379 int err, i;
5380
5381 local_cand.btf = local_btf;
5382 local_cand.id = local_type_id;
5383 local_t = btf__type_by_id(local_btf, local_type_id);
5384 if (!local_t)
5385 return ERR_PTR(-EINVAL);
5386
5387 local_name = btf__name_by_offset(local_btf, local_t->name_off);
5388 if (str_is_empty(local_name))
5389 return ERR_PTR(-EINVAL);
5390 local_essent_len = bpf_core_essential_name_len(local_name);
5391
5392 cands = calloc(1, sizeof(*cands));
5393 if (!cands)
5394 return ERR_PTR(-ENOMEM);
5395
5396
5397 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5398 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
5399 if (err)
5400 goto err_out;
5401
5402
5403 if (cands->len)
5404 return cands;
5405
5406
5407 if (obj->btf_vmlinux_override)
5408 return cands;
5409
5410
5411 err = load_module_btfs(obj);
5412 if (err)
5413 goto err_out;
5414
5415 for (i = 0; i < obj->btf_module_cnt; i++) {
5416 err = bpf_core_add_cands(&local_cand, local_essent_len,
5417 obj->btf_modules[i].btf,
5418 obj->btf_modules[i].name,
5419 btf__type_cnt(obj->btf_vmlinux),
5420 cands);
5421 if (err)
5422 goto err_out;
5423 }
5424
5425 return cands;
5426err_out:
5427 bpf_core_free_cands(cands);
5428 return ERR_PTR(err);
5429}
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5451 const struct btf *targ_btf, __u32 targ_id)
5452{
5453 const struct btf_type *local_type, *targ_type;
5454 int depth = 32;
5455
5456
5457 local_type = btf__type_by_id(local_btf, local_id);
5458 targ_type = btf__type_by_id(targ_btf, targ_id);
5459 if (btf_kind(local_type) != btf_kind(targ_type))
5460 return 0;
5461
5462recur:
5463 depth--;
5464 if (depth < 0)
5465 return -EINVAL;
5466
5467 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
5468 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5469 if (!local_type || !targ_type)
5470 return -EINVAL;
5471
5472 if (btf_kind(local_type) != btf_kind(targ_type))
5473 return 0;
5474
5475 switch (btf_kind(local_type)) {
5476 case BTF_KIND_UNKN:
5477 case BTF_KIND_STRUCT:
5478 case BTF_KIND_UNION:
5479 case BTF_KIND_ENUM:
5480 case BTF_KIND_FWD:
5481 return 1;
5482 case BTF_KIND_INT:
5483
5484
5485
5486 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
5487 case BTF_KIND_PTR:
5488 local_id = local_type->type;
5489 targ_id = targ_type->type;
5490 goto recur;
5491 case BTF_KIND_ARRAY:
5492 local_id = btf_array(local_type)->type;
5493 targ_id = btf_array(targ_type)->type;
5494 goto recur;
5495 case BTF_KIND_FUNC_PROTO: {
5496 struct btf_param *local_p = btf_params(local_type);
5497 struct btf_param *targ_p = btf_params(targ_type);
5498 __u16 local_vlen = btf_vlen(local_type);
5499 __u16 targ_vlen = btf_vlen(targ_type);
5500 int i, err;
5501
5502 if (local_vlen != targ_vlen)
5503 return 0;
5504
5505 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
5506 skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
5507 skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
5508 err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
5509 if (err <= 0)
5510 return err;
5511 }
5512
5513
5514 skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
5515 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
5516 goto recur;
5517 }
5518 default:
5519 pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
5520 btf_kind_str(local_type), local_id, targ_id);
5521 return 0;
5522 }
5523}
5524
5525static size_t bpf_core_hash_fn(const void *key, void *ctx)
5526{
5527 return (size_t)key;
5528}
5529
5530static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
5531{
5532 return k1 == k2;
5533}
5534
5535static void *u32_as_hash_key(__u32 x)
5536{
5537 return (void *)(uintptr_t)x;
5538}
5539
5540static int record_relo_core(struct bpf_program *prog,
5541 const struct bpf_core_relo *core_relo, int insn_idx)
5542{
5543 struct reloc_desc *relos, *relo;
5544
5545 relos = libbpf_reallocarray(prog->reloc_desc,
5546 prog->nr_reloc + 1, sizeof(*relos));
5547 if (!relos)
5548 return -ENOMEM;
5549 relo = &relos[prog->nr_reloc];
5550 relo->type = RELO_CORE;
5551 relo->insn_idx = insn_idx;
5552 relo->core_relo = core_relo;
5553 prog->reloc_desc = relos;
5554 prog->nr_reloc++;
5555 return 0;
5556}
5557
5558static int bpf_core_resolve_relo(struct bpf_program *prog,
5559 const struct bpf_core_relo *relo,
5560 int relo_idx,
5561 const struct btf *local_btf,
5562 struct hashmap *cand_cache,
5563 struct bpf_core_relo_res *targ_res)
5564{
5565 struct bpf_core_spec specs_scratch[3] = {};
5566 const void *type_key = u32_as_hash_key(relo->type_id);
5567 struct bpf_core_cand_list *cands = NULL;
5568 const char *prog_name = prog->name;
5569 const struct btf_type *local_type;
5570 const char *local_name;
5571 __u32 local_id = relo->type_id;
5572 int err;
5573
5574 local_type = btf__type_by_id(local_btf, local_id);
5575 if (!local_type)
5576 return -EINVAL;
5577
5578 local_name = btf__name_by_offset(local_btf, local_type->name_off);
5579 if (!local_name)
5580 return -EINVAL;
5581
5582 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
5583 !hashmap__find(cand_cache, type_key, (void **)&cands)) {
5584 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5585 if (IS_ERR(cands)) {
5586 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5587 prog_name, relo_idx, local_id, btf_kind_str(local_type),
5588 local_name, PTR_ERR(cands));
5589 return PTR_ERR(cands);
5590 }
5591 err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
5592 if (err) {
5593 bpf_core_free_cands(cands);
5594 return err;
5595 }
5596 }
5597
5598 return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
5599 targ_res);
5600}
5601
5602static int
5603bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
5604{
5605 const struct btf_ext_info_sec *sec;
5606 struct bpf_core_relo_res targ_res;
5607 const struct bpf_core_relo *rec;
5608 const struct btf_ext_info *seg;
5609 struct hashmap_entry *entry;
5610 struct hashmap *cand_cache = NULL;
5611 struct bpf_program *prog;
5612 struct bpf_insn *insn;
5613 const char *sec_name;
5614 int i, err = 0, insn_idx, sec_idx;
5615
5616 if (obj->btf_ext->core_relo_info.len == 0)
5617 return 0;
5618
5619 if (targ_btf_path) {
5620 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
5621 err = libbpf_get_error(obj->btf_vmlinux_override);
5622 if (err) {
5623 pr_warn("failed to parse target BTF: %d\n", err);
5624 return err;
5625 }
5626 }
5627
5628 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
5629 if (IS_ERR(cand_cache)) {
5630 err = PTR_ERR(cand_cache);
5631 goto out;
5632 }
5633
5634 seg = &obj->btf_ext->core_relo_info;
5635 for_each_btf_ext_sec(seg, sec) {
5636 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5637 if (str_is_empty(sec_name)) {
5638 err = -EINVAL;
5639 goto out;
5640 }
5641
5642
5643
5644
5645
5646
5647 prog = NULL;
5648 for (i = 0; i < obj->nr_programs; i++) {
5649 prog = &obj->programs[i];
5650 if (strcmp(prog->sec_name, sec_name) == 0)
5651 break;
5652 }
5653 if (!prog) {
5654 pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
5655 return -ENOENT;
5656 }
5657 sec_idx = prog->sec_idx;
5658
5659 pr_debug("sec '%s': found %d CO-RE relocations\n",
5660 sec_name, sec->num_info);
5661
5662 for_each_btf_ext_rec(seg, sec, i, rec) {
5663 if (rec->insn_off % BPF_INSN_SZ)
5664 return -EINVAL;
5665 insn_idx = rec->insn_off / BPF_INSN_SZ;
5666 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
5667 if (!prog) {
5668 pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
5669 sec_name, insn_idx, i);
5670 err = -EINVAL;
5671 goto out;
5672 }
5673
5674
5675
5676 if (!prog->load)
5677 continue;
5678
5679
5680
5681
5682
5683 insn_idx = insn_idx - prog->sec_insn_off;
5684 if (insn_idx >= prog->insns_cnt)
5685 return -EINVAL;
5686 insn = &prog->insns[insn_idx];
5687
5688 if (prog->obj->gen_loader) {
5689 err = record_relo_core(prog, rec, insn_idx);
5690 if (err) {
5691 pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
5692 prog->name, i, err);
5693 goto out;
5694 }
5695 continue;
5696 }
5697
5698 err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
5699 if (err) {
5700 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
5701 prog->name, i, err);
5702 goto out;
5703 }
5704
5705 err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
5706 if (err) {
5707 pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
5708 prog->name, i, insn_idx, err);
5709 goto out;
5710 }
5711 }
5712 }
5713
5714out:
5715
5716 btf__free(obj->btf_vmlinux_override);
5717 obj->btf_vmlinux_override = NULL;
5718
5719 if (!IS_ERR_OR_NULL(cand_cache)) {
5720 hashmap__for_each_entry(cand_cache, entry, i) {
5721 bpf_core_free_cands(entry->value);
5722 }
5723 hashmap__free(cand_cache);
5724 }
5725 return err;
5726}
5727
5728
5729
5730
5731
5732
5733static int
5734bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
5735{
5736 int i;
5737
5738 for (i = 0; i < prog->nr_reloc; i++) {
5739 struct reloc_desc *relo = &prog->reloc_desc[i];
5740 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
5741 struct extern_desc *ext;
5742
5743 switch (relo->type) {
5744 case RELO_LD64:
5745 if (obj->gen_loader) {
5746 insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
5747 insn[0].imm = relo->map_idx;
5748 } else {
5749 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
5750 insn[0].imm = obj->maps[relo->map_idx].fd;
5751 }
5752 break;
5753 case RELO_DATA:
5754 insn[1].imm = insn[0].imm + relo->sym_off;
5755 if (obj->gen_loader) {
5756 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5757 insn[0].imm = relo->map_idx;
5758 } else {
5759 const struct bpf_map *map = &obj->maps[relo->map_idx];
5760
5761 if (map->skipped) {
5762 pr_warn("prog '%s': relo #%d: kernel doesn't support global data\n",
5763 prog->name, i);
5764 return -ENOTSUP;
5765 }
5766 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5767 insn[0].imm = obj->maps[relo->map_idx].fd;
5768 }
5769 break;
5770 case RELO_EXTERN_VAR:
5771 ext = &obj->externs[relo->sym_off];
5772 if (ext->type == EXT_KCFG) {
5773 if (obj->gen_loader) {
5774 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5775 insn[0].imm = obj->kconfig_map_idx;
5776 } else {
5777 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5778 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
5779 }
5780 insn[1].imm = ext->kcfg.data_off;
5781 } else {
5782 if (ext->ksym.type_id && ext->is_set) {
5783 insn[0].src_reg = BPF_PSEUDO_BTF_ID;
5784 insn[0].imm = ext->ksym.kernel_btf_id;
5785 insn[1].imm = ext->ksym.kernel_btf_obj_fd;
5786 } else {
5787 insn[0].imm = (__u32)ext->ksym.addr;
5788 insn[1].imm = ext->ksym.addr >> 32;
5789 }
5790 }
5791 break;
5792 case RELO_EXTERN_FUNC:
5793 ext = &obj->externs[relo->sym_off];
5794 insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
5795 if (ext->is_set) {
5796 insn[0].imm = ext->ksym.kernel_btf_id;
5797 insn[0].off = ext->ksym.btf_fd_idx;
5798 } else {
5799 insn[0].imm = 0;
5800 insn[0].off = 0;
5801 }
5802 break;
5803 case RELO_SUBPROG_ADDR:
5804 if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
5805 pr_warn("prog '%s': relo #%d: bad insn\n",
5806 prog->name, i);
5807 return -EINVAL;
5808 }
5809
5810 break;
5811 case RELO_CALL:
5812
5813 break;
5814 case RELO_CORE:
5815
5816 break;
5817 default:
5818 pr_warn("prog '%s': relo #%d: bad relo type %d\n",
5819 prog->name, i, relo->type);
5820 return -EINVAL;
5821 }
5822 }
5823
5824 return 0;
5825}
5826
5827static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
5828 const struct bpf_program *prog,
5829 const struct btf_ext_info *ext_info,
5830 void **prog_info, __u32 *prog_rec_cnt,
5831 __u32 *prog_rec_sz)
5832{
5833 void *copy_start = NULL, *copy_end = NULL;
5834 void *rec, *rec_end, *new_prog_info;
5835 const struct btf_ext_info_sec *sec;
5836 size_t old_sz, new_sz;
5837 const char *sec_name;
5838 int i, off_adj;
5839
5840 for_each_btf_ext_sec(ext_info, sec) {
5841 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5842 if (!sec_name)
5843 return -EINVAL;
5844 if (strcmp(sec_name, prog->sec_name) != 0)
5845 continue;
5846
5847 for_each_btf_ext_rec(ext_info, sec, i, rec) {
5848 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
5849
5850 if (insn_off < prog->sec_insn_off)
5851 continue;
5852 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
5853 break;
5854
5855 if (!copy_start)
5856 copy_start = rec;
5857 copy_end = rec + ext_info->rec_size;
5858 }
5859
5860 if (!copy_start)
5861 return -ENOENT;
5862
5863
5864
5865
5866 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
5867 new_sz = old_sz + (copy_end - copy_start);
5868 new_prog_info = realloc(*prog_info, new_sz);
5869 if (!new_prog_info)
5870 return -ENOMEM;
5871 *prog_info = new_prog_info;
5872 *prog_rec_cnt = new_sz / ext_info->rec_size;
5873 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
5874
5875
5876
5877
5878
5879
5880
5881 off_adj = prog->sub_insn_off - prog->sec_insn_off;
5882 rec = new_prog_info + old_sz;
5883 rec_end = new_prog_info + new_sz;
5884 for (; rec < rec_end; rec += ext_info->rec_size) {
5885 __u32 *insn_off = rec;
5886
5887 *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
5888 }
5889 *prog_rec_sz = ext_info->rec_size;
5890 return 0;
5891 }
5892
5893 return -ENOENT;
5894}
5895
5896static int
5897reloc_prog_func_and_line_info(const struct bpf_object *obj,
5898 struct bpf_program *main_prog,
5899 const struct bpf_program *prog)
5900{
5901 int err;
5902
5903
5904
5905
5906 if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
5907 return 0;
5908
5909
5910
5911
5912 if (main_prog != prog && !main_prog->func_info)
5913 goto line_info;
5914
5915 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
5916 &main_prog->func_info,
5917 &main_prog->func_info_cnt,
5918 &main_prog->func_info_rec_size);
5919 if (err) {
5920 if (err != -ENOENT) {
5921 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
5922 prog->name, err);
5923 return err;
5924 }
5925 if (main_prog->func_info) {
5926
5927
5928
5929
5930 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
5931 return err;
5932 }
5933
5934 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
5935 prog->name);
5936 }
5937
5938line_info:
5939
5940 if (main_prog != prog && !main_prog->line_info)
5941 return 0;
5942
5943 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
5944 &main_prog->line_info,
5945 &main_prog->line_info_cnt,
5946 &main_prog->line_info_rec_size);
5947 if (err) {
5948 if (err != -ENOENT) {
5949 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
5950 prog->name, err);
5951 return err;
5952 }
5953 if (main_prog->line_info) {
5954
5955
5956
5957
5958 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
5959 return err;
5960 }
5961
5962 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
5963 prog->name);
5964 }
5965 return 0;
5966}
5967
5968static int cmp_relo_by_insn_idx(const void *key, const void *elem)
5969{
5970 size_t insn_idx = *(const size_t *)key;
5971 const struct reloc_desc *relo = elem;
5972
5973 if (insn_idx == relo->insn_idx)
5974 return 0;
5975 return insn_idx < relo->insn_idx ? -1 : 1;
5976}
5977
5978static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
5979{
5980 if (!prog->nr_reloc)
5981 return NULL;
5982 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
5983 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
5984}
5985
5986static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
5987{
5988 int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
5989 struct reloc_desc *relos;
5990 int i;
5991
5992 if (main_prog == subprog)
5993 return 0;
5994 relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
5995 if (!relos)
5996 return -ENOMEM;
5997 if (subprog->nr_reloc)
5998 memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
5999 sizeof(*relos) * subprog->nr_reloc);
6000
6001 for (i = main_prog->nr_reloc; i < new_cnt; i++)
6002 relos[i].insn_idx += subprog->sub_insn_off;
6003
6004
6005
6006 main_prog->reloc_desc = relos;
6007 main_prog->nr_reloc = new_cnt;
6008 return 0;
6009}
6010
6011static int
6012bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6013 struct bpf_program *prog)
6014{
6015 size_t sub_insn_idx, insn_idx, new_cnt;
6016 struct bpf_program *subprog;
6017 struct bpf_insn *insns, *insn;
6018 struct reloc_desc *relo;
6019 int err;
6020
6021 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6022 if (err)
6023 return err;
6024
6025 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6026 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6027 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
6028 continue;
6029
6030 relo = find_prog_insn_relo(prog, insn_idx);
6031 if (relo && relo->type == RELO_EXTERN_FUNC)
6032
6033
6034
6035 continue;
6036 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
6037 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6038 prog->name, insn_idx, relo->type);
6039 return -LIBBPF_ERRNO__RELOC;
6040 }
6041 if (relo) {
6042
6043
6044
6045
6046
6047
6048
6049
6050
6051
6052 if (relo->type == RELO_CALL)
6053 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6054 else
6055 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6056 } else if (insn_is_pseudo_func(insn)) {
6057
6058
6059
6060
6061 pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6062 prog->name, insn_idx);
6063 return -LIBBPF_ERRNO__RELOC;
6064 } else {
6065
6066
6067
6068
6069
6070
6071 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6072 }
6073
6074
6075 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6076 if (!subprog) {
6077 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6078 prog->name);
6079 return -LIBBPF_ERRNO__RELOC;
6080 }
6081
6082
6083
6084
6085
6086
6087
6088
6089
6090
6091
6092 if (subprog->sub_insn_off == 0) {
6093 subprog->sub_insn_off = main_prog->insns_cnt;
6094
6095 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6096 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6097 if (!insns) {
6098 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6099 return -ENOMEM;
6100 }
6101 main_prog->insns = insns;
6102 main_prog->insns_cnt = new_cnt;
6103
6104 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6105 subprog->insns_cnt * sizeof(*insns));
6106
6107 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6108 main_prog->name, subprog->insns_cnt, subprog->name);
6109
6110
6111 err = append_subprog_relos(main_prog, subprog);
6112 if (err)
6113 return err;
6114 err = bpf_object__reloc_code(obj, main_prog, subprog);
6115 if (err)
6116 return err;
6117 }
6118
6119
6120
6121
6122 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6123
6124
6125
6126
6127
6128 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6129
6130 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6131 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6132 }
6133
6134 return 0;
6135}
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149
6150
6151
6152
6153
6154
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172
6173
6174
6175
6176
6177
6178
6179
6180
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214
6215
6216
6217
6218static int
6219bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6220{
6221 struct bpf_program *subprog;
6222 int i, err;
6223
6224
6225
6226
6227 for (i = 0; i < obj->nr_programs; i++) {
6228 subprog = &obj->programs[i];
6229 if (!prog_is_subprog(obj, subprog))
6230 continue;
6231
6232 subprog->sub_insn_off = 0;
6233 }
6234
6235 err = bpf_object__reloc_code(obj, prog, prog);
6236 if (err)
6237 return err;
6238
6239
6240 return 0;
6241}
6242
6243static void
6244bpf_object__free_relocs(struct bpf_object *obj)
6245{
6246 struct bpf_program *prog;
6247 int i;
6248
6249
6250 for (i = 0; i < obj->nr_programs; i++) {
6251 prog = &obj->programs[i];
6252 zfree(&prog->reloc_desc);
6253 prog->nr_reloc = 0;
6254 }
6255}
6256
6257static int cmp_relocs(const void *_a, const void *_b)
6258{
6259 const struct reloc_desc *a = _a;
6260 const struct reloc_desc *b = _b;
6261
6262 if (a->insn_idx != b->insn_idx)
6263 return a->insn_idx < b->insn_idx ? -1 : 1;
6264
6265
6266 if (a->type != b->type)
6267 return a->type < b->type ? -1 : 1;
6268
6269 return 0;
6270}
6271
6272static void bpf_object__sort_relos(struct bpf_object *obj)
6273{
6274 int i;
6275
6276 for (i = 0; i < obj->nr_programs; i++) {
6277 struct bpf_program *p = &obj->programs[i];
6278
6279 if (!p->nr_reloc)
6280 continue;
6281
6282 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6283 }
6284}
6285
6286static int
6287bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6288{
6289 struct bpf_program *prog;
6290 size_t i, j;
6291 int err;
6292
6293 if (obj->btf_ext) {
6294 err = bpf_object__relocate_core(obj, targ_btf_path);
6295 if (err) {
6296 pr_warn("failed to perform CO-RE relocations: %d\n",
6297 err);
6298 return err;
6299 }
6300 if (obj->gen_loader)
6301 bpf_object__sort_relos(obj);
6302 }
6303
6304
6305
6306
6307
6308
6309
6310
6311 for (i = 0; i < obj->nr_programs; i++) {
6312 prog = &obj->programs[i];
6313 for (j = 0; j < prog->nr_reloc; j++) {
6314 struct reloc_desc *relo = &prog->reloc_desc[j];
6315 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6316
6317
6318 if (relo->type == RELO_SUBPROG_ADDR)
6319 insn[0].src_reg = BPF_PSEUDO_FUNC;
6320 }
6321 }
6322
6323
6324
6325
6326
6327
6328
6329
6330 for (i = 0; i < obj->nr_programs; i++) {
6331 prog = &obj->programs[i];
6332
6333
6334
6335 if (prog_is_subprog(obj, prog))
6336 continue;
6337 if (!prog->load)
6338 continue;
6339
6340 err = bpf_object__relocate_calls(obj, prog);
6341 if (err) {
6342 pr_warn("prog '%s': failed to relocate calls: %d\n",
6343 prog->name, err);
6344 return err;
6345 }
6346 }
6347
6348 for (i = 0; i < obj->nr_programs; i++) {
6349 prog = &obj->programs[i];
6350 if (prog_is_subprog(obj, prog))
6351 continue;
6352 if (!prog->load)
6353 continue;
6354 err = bpf_object__relocate_data(obj, prog);
6355 if (err) {
6356 pr_warn("prog '%s': failed to relocate data references: %d\n",
6357 prog->name, err);
6358 return err;
6359 }
6360 }
6361 if (!obj->gen_loader)
6362 bpf_object__free_relocs(obj);
6363 return 0;
6364}
6365
6366static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
6367 Elf64_Shdr *shdr, Elf_Data *data);
6368
6369static int bpf_object__collect_map_relos(struct bpf_object *obj,
6370 Elf64_Shdr *shdr, Elf_Data *data)
6371{
6372 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
6373 int i, j, nrels, new_sz;
6374 const struct btf_var_secinfo *vi = NULL;
6375 const struct btf_type *sec, *var, *def;
6376 struct bpf_map *map = NULL, *targ_map = NULL;
6377 struct bpf_program *targ_prog = NULL;
6378 bool is_prog_array, is_map_in_map;
6379 const struct btf_member *member;
6380 const char *name, *mname, *type;
6381 unsigned int moff;
6382 Elf64_Sym *sym;
6383 Elf64_Rel *rel;
6384 void *tmp;
6385
6386 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
6387 return -EINVAL;
6388 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
6389 if (!sec)
6390 return -EINVAL;
6391
6392 nrels = shdr->sh_size / shdr->sh_entsize;
6393 for (i = 0; i < nrels; i++) {
6394 rel = elf_rel_by_idx(data, i);
6395 if (!rel) {
6396 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
6397 return -LIBBPF_ERRNO__FORMAT;
6398 }
6399
6400 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
6401 if (!sym) {
6402 pr_warn(".maps relo #%d: symbol %zx not found\n",
6403 i, (size_t)ELF64_R_SYM(rel->r_info));
6404 return -LIBBPF_ERRNO__FORMAT;
6405 }
6406 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
6407
6408 pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
6409 i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
6410 (size_t)rel->r_offset, sym->st_name, name);
6411
6412 for (j = 0; j < obj->nr_maps; j++) {
6413 map = &obj->maps[j];
6414 if (map->sec_idx != obj->efile.btf_maps_shndx)
6415 continue;
6416
6417 vi = btf_var_secinfos(sec) + map->btf_var_idx;
6418 if (vi->offset <= rel->r_offset &&
6419 rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
6420 break;
6421 }
6422 if (j == obj->nr_maps) {
6423 pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
6424 i, name, (size_t)rel->r_offset);
6425 return -EINVAL;
6426 }
6427
6428 is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
6429 is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
6430 type = is_map_in_map ? "map" : "prog";
6431 if (is_map_in_map) {
6432 if (sym->st_shndx != obj->efile.btf_maps_shndx) {
6433 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
6434 i, name);
6435 return -LIBBPF_ERRNO__RELOC;
6436 }
6437 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
6438 map->def.key_size != sizeof(int)) {
6439 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
6440 i, map->name, sizeof(int));
6441 return -EINVAL;
6442 }
6443 targ_map = bpf_object__find_map_by_name(obj, name);
6444 if (!targ_map) {
6445 pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n",
6446 i, name);
6447 return -ESRCH;
6448 }
6449 } else if (is_prog_array) {
6450 targ_prog = bpf_object__find_program_by_name(obj, name);
6451 if (!targ_prog) {
6452 pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n",
6453 i, name);
6454 return -ESRCH;
6455 }
6456 if (targ_prog->sec_idx != sym->st_shndx ||
6457 targ_prog->sec_insn_off * 8 != sym->st_value ||
6458 prog_is_subprog(obj, targ_prog)) {
6459 pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
6460 i, name);
6461 return -LIBBPF_ERRNO__RELOC;
6462 }
6463 } else {
6464 return -EINVAL;
6465 }
6466
6467 var = btf__type_by_id(obj->btf, vi->type);
6468 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
6469 if (btf_vlen(def) == 0)
6470 return -EINVAL;
6471 member = btf_members(def) + btf_vlen(def) - 1;
6472 mname = btf__name_by_offset(obj->btf, member->name_off);
6473 if (strcmp(mname, "values"))
6474 return -EINVAL;
6475
6476 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
6477 if (rel->r_offset - vi->offset < moff)
6478 return -EINVAL;
6479
6480 moff = rel->r_offset - vi->offset - moff;
6481
6482
6483
6484 if (moff % bpf_ptr_sz)
6485 return -EINVAL;
6486 moff /= bpf_ptr_sz;
6487 if (moff >= map->init_slots_sz) {
6488 new_sz = moff + 1;
6489 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
6490 if (!tmp)
6491 return -ENOMEM;
6492 map->init_slots = tmp;
6493 memset(map->init_slots + map->init_slots_sz, 0,
6494 (new_sz - map->init_slots_sz) * host_ptr_sz);
6495 map->init_slots_sz = new_sz;
6496 }
6497 map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
6498
6499 pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n",
6500 i, map->name, moff, type, name);
6501 }
6502
6503 return 0;
6504}
6505
6506static int bpf_object__collect_relos(struct bpf_object *obj)
6507{
6508 int i, err;
6509
6510 for (i = 0; i < obj->efile.sec_cnt; i++) {
6511 struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
6512 Elf64_Shdr *shdr;
6513 Elf_Data *data;
6514 int idx;
6515
6516 if (sec_desc->sec_type != SEC_RELO)
6517 continue;
6518
6519 shdr = sec_desc->shdr;
6520 data = sec_desc->data;
6521 idx = shdr->sh_info;
6522
6523 if (shdr->sh_type != SHT_REL) {
6524 pr_warn("internal error at %d\n", __LINE__);
6525 return -LIBBPF_ERRNO__INTERNAL;
6526 }
6527
6528 if (idx == obj->efile.st_ops_shndx)
6529 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
6530 else if (idx == obj->efile.btf_maps_shndx)
6531 err = bpf_object__collect_map_relos(obj, shdr, data);
6532 else
6533 err = bpf_object__collect_prog_relos(obj, shdr, data);
6534 if (err)
6535 return err;
6536 }
6537
6538 bpf_object__sort_relos(obj);
6539 return 0;
6540}
6541
6542static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
6543{
6544 if (BPF_CLASS(insn->code) == BPF_JMP &&
6545 BPF_OP(insn->code) == BPF_CALL &&
6546 BPF_SRC(insn->code) == BPF_K &&
6547 insn->src_reg == 0 &&
6548 insn->dst_reg == 0) {
6549 *func_id = insn->imm;
6550 return true;
6551 }
6552 return false;
6553}
6554
6555static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
6556{
6557 struct bpf_insn *insn = prog->insns;
6558 enum bpf_func_id func_id;
6559 int i;
6560
6561 if (obj->gen_loader)
6562 return 0;
6563
6564 for (i = 0; i < prog->insns_cnt; i++, insn++) {
6565 if (!insn_is_helper_call(insn, &func_id))
6566 continue;
6567
6568
6569
6570
6571
6572 switch (func_id) {
6573 case BPF_FUNC_probe_read_kernel:
6574 case BPF_FUNC_probe_read_user:
6575 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
6576 insn->imm = BPF_FUNC_probe_read;
6577 break;
6578 case BPF_FUNC_probe_read_kernel_str:
6579 case BPF_FUNC_probe_read_user_str:
6580 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
6581 insn->imm = BPF_FUNC_probe_read_str;
6582 break;
6583 default:
6584 break;
6585 }
6586 }
6587 return 0;
6588}
6589
6590static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
6591 int *btf_obj_fd, int *btf_type_id);
6592
6593
6594static int libbpf_prepare_prog_load(struct bpf_program *prog,
6595 struct bpf_prog_load_opts *opts, long cookie)
6596{
6597 enum sec_def_flags def = cookie;
6598
6599
6600 if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
6601 opts->expected_attach_type = 0;
6602
6603 if (def & SEC_SLEEPABLE)
6604 opts->prog_flags |= BPF_F_SLEEPABLE;
6605
6606 if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
6607 opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
6608
6609 if (def & SEC_DEPRECATED)
6610 pr_warn("SEC(\"%s\") is deprecated, please see https://github.com/libbpf/libbpf/wiki/Libbpf-1.0-migration-guide#bpf-program-sec-annotation-deprecations for details\n",
6611 prog->sec_name);
6612
6613 if ((prog->type == BPF_PROG_TYPE_TRACING ||
6614 prog->type == BPF_PROG_TYPE_LSM ||
6615 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
6616 int btf_obj_fd = 0, btf_type_id = 0, err;
6617 const char *attach_name;
6618
6619 attach_name = strchr(prog->sec_name, '/') + 1;
6620 err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
6621 if (err)
6622 return err;
6623
6624
6625 prog->attach_btf_obj_fd = btf_obj_fd;
6626 prog->attach_btf_id = btf_type_id;
6627
6628
6629
6630
6631
6632
6633 opts->attach_btf_obj_fd = btf_obj_fd;
6634 opts->attach_btf_id = btf_type_id;
6635 }
6636 return 0;
6637}
6638
6639static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_program *prog,
6640 struct bpf_insn *insns, int insns_cnt,
6641 const char *license, __u32 kern_version,
6642 int *prog_fd)
6643{
6644 LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
6645 const char *prog_name = NULL;
6646 char *cp, errmsg[STRERR_BUFSIZE];
6647 size_t log_buf_size = 0;
6648 char *log_buf = NULL, *tmp;
6649 int btf_fd, ret, err;
6650 bool own_log_buf = true;
6651 __u32 log_level = prog->log_level;
6652
6653 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
6654
6655
6656
6657
6658 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
6659 prog->name, prog->sec_name);
6660 return -EINVAL;
6661 }
6662
6663 if (!insns || !insns_cnt)
6664 return -EINVAL;
6665
6666 load_attr.expected_attach_type = prog->expected_attach_type;
6667 if (kernel_supports(obj, FEAT_PROG_NAME))
6668 prog_name = prog->name;
6669 load_attr.attach_prog_fd = prog->attach_prog_fd;
6670 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
6671 load_attr.attach_btf_id = prog->attach_btf_id;
6672 load_attr.kern_version = kern_version;
6673 load_attr.prog_ifindex = prog->prog_ifindex;
6674
6675
6676 btf_fd = bpf_object__btf_fd(obj);
6677 if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
6678 load_attr.prog_btf_fd = btf_fd;
6679 load_attr.func_info = prog->func_info;
6680 load_attr.func_info_rec_size = prog->func_info_rec_size;
6681 load_attr.func_info_cnt = prog->func_info_cnt;
6682 load_attr.line_info = prog->line_info;
6683 load_attr.line_info_rec_size = prog->line_info_rec_size;
6684 load_attr.line_info_cnt = prog->line_info_cnt;
6685 }
6686 load_attr.log_level = log_level;
6687 load_attr.prog_flags = prog->prog_flags;
6688 load_attr.fd_array = obj->fd_array;
6689
6690
6691 if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
6692 err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
6693 if (err < 0) {
6694 pr_warn("prog '%s': failed to prepare load attributes: %d\n",
6695 prog->name, err);
6696 return err;
6697 }
6698 }
6699
6700 if (obj->gen_loader) {
6701 bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
6702 license, insns, insns_cnt, &load_attr,
6703 prog - obj->programs);
6704 *prog_fd = -1;
6705 return 0;
6706 }
6707
6708retry_load:
6709
6710
6711
6712
6713
6714 if (log_level) {
6715 if (prog->log_buf) {
6716 log_buf = prog->log_buf;
6717 log_buf_size = prog->log_size;
6718 own_log_buf = false;
6719 } else if (obj->log_buf) {
6720 log_buf = obj->log_buf;
6721 log_buf_size = obj->log_size;
6722 own_log_buf = false;
6723 } else {
6724 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
6725 tmp = realloc(log_buf, log_buf_size);
6726 if (!tmp) {
6727 ret = -ENOMEM;
6728 goto out;
6729 }
6730 log_buf = tmp;
6731 log_buf[0] = '\0';
6732 own_log_buf = true;
6733 }
6734 }
6735
6736 load_attr.log_buf = log_buf;
6737 load_attr.log_size = log_buf_size;
6738 load_attr.log_level = log_level;
6739
6740 ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
6741 if (ret >= 0) {
6742 if (log_level && own_log_buf) {
6743 pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
6744 prog->name, log_buf);
6745 }
6746
6747 if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
6748 struct bpf_map *map;
6749 int i;
6750
6751 for (i = 0; i < obj->nr_maps; i++) {
6752 map = &prog->obj->maps[i];
6753 if (map->libbpf_type != LIBBPF_MAP_RODATA)
6754 continue;
6755
6756 if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) {
6757 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6758 pr_warn("prog '%s': failed to bind map '%s': %s\n",
6759 prog->name, map->real_name, cp);
6760
6761 }
6762 }
6763 }
6764
6765 *prog_fd = ret;
6766 ret = 0;
6767 goto out;
6768 }
6769
6770 if (log_level == 0) {
6771 log_level = 1;
6772 goto retry_load;
6773 }
6774
6775
6776
6777
6778
6779
6780
6781 if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
6782 goto retry_load;
6783
6784 ret = -errno;
6785 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6786 pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
6787 pr_perm_msg(ret);
6788
6789 if (own_log_buf && log_buf && log_buf[0] != '\0') {
6790 pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
6791 prog->name, log_buf);
6792 }
6793 if (insns_cnt >= BPF_MAXINSNS) {
6794 pr_warn("prog '%s': program too large (%d insns), at most %d insns\n",
6795 prog->name, insns_cnt, BPF_MAXINSNS);
6796 }
6797
6798out:
6799 if (own_log_buf)
6800 free(log_buf);
6801 return ret;
6802}
6803
6804static int bpf_program_record_relos(struct bpf_program *prog)
6805{
6806 struct bpf_object *obj = prog->obj;
6807 int i;
6808
6809 for (i = 0; i < prog->nr_reloc; i++) {
6810 struct reloc_desc *relo = &prog->reloc_desc[i];
6811 struct extern_desc *ext = &obj->externs[relo->sym_off];
6812
6813 switch (relo->type) {
6814 case RELO_EXTERN_VAR:
6815 if (ext->type != EXT_KSYM)
6816 continue;
6817 bpf_gen__record_extern(obj->gen_loader, ext->name,
6818 ext->is_weak, !ext->ksym.type_id,
6819 BTF_KIND_VAR, relo->insn_idx);
6820 break;
6821 case RELO_EXTERN_FUNC:
6822 bpf_gen__record_extern(obj->gen_loader, ext->name,
6823 ext->is_weak, false, BTF_KIND_FUNC,
6824 relo->insn_idx);
6825 break;
6826 case RELO_CORE: {
6827 struct bpf_core_relo cr = {
6828 .insn_off = relo->insn_idx * 8,
6829 .type_id = relo->core_relo->type_id,
6830 .access_str_off = relo->core_relo->access_str_off,
6831 .kind = relo->core_relo->kind,
6832 };
6833
6834 bpf_gen__record_relo_core(obj->gen_loader, &cr);
6835 break;
6836 }
6837 default:
6838 continue;
6839 }
6840 }
6841 return 0;
6842}
6843
6844static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
6845 const char *license, __u32 kern_ver)
6846{
6847 int err = 0, fd, i;
6848
6849 if (obj->loaded) {
6850 pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
6851 return libbpf_err(-EINVAL);
6852 }
6853
6854 if (prog->instances.nr < 0 || !prog->instances.fds) {
6855 if (prog->preprocessor) {
6856 pr_warn("Internal error: can't load program '%s'\n",
6857 prog->name);
6858 return libbpf_err(-LIBBPF_ERRNO__INTERNAL);
6859 }
6860
6861 prog->instances.fds = malloc(sizeof(int));
6862 if (!prog->instances.fds) {
6863 pr_warn("Not enough memory for BPF fds\n");
6864 return libbpf_err(-ENOMEM);
6865 }
6866 prog->instances.nr = 1;
6867 prog->instances.fds[0] = -1;
6868 }
6869
6870 if (!prog->preprocessor) {
6871 if (prog->instances.nr != 1) {
6872 pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
6873 prog->name, prog->instances.nr);
6874 }
6875 if (obj->gen_loader)
6876 bpf_program_record_relos(prog);
6877 err = bpf_object_load_prog_instance(obj, prog,
6878 prog->insns, prog->insns_cnt,
6879 license, kern_ver, &fd);
6880 if (!err)
6881 prog->instances.fds[0] = fd;
6882 goto out;
6883 }
6884
6885 for (i = 0; i < prog->instances.nr; i++) {
6886 struct bpf_prog_prep_result result;
6887 bpf_program_prep_t preprocessor = prog->preprocessor;
6888
6889 memset(&result, 0, sizeof(result));
6890 err = preprocessor(prog, i, prog->insns,
6891 prog->insns_cnt, &result);
6892 if (err) {
6893 pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
6894 i, prog->name);
6895 goto out;
6896 }
6897
6898 if (!result.new_insn_ptr || !result.new_insn_cnt) {
6899 pr_debug("Skip loading the %dth instance of program '%s'\n",
6900 i, prog->name);
6901 prog->instances.fds[i] = -1;
6902 if (result.pfd)
6903 *result.pfd = -1;
6904 continue;
6905 }
6906
6907 err = bpf_object_load_prog_instance(obj, prog,
6908 result.new_insn_ptr, result.new_insn_cnt,
6909 license, kern_ver, &fd);
6910 if (err) {
6911 pr_warn("Loading the %dth instance of program '%s' failed\n",
6912 i, prog->name);
6913 goto out;
6914 }
6915
6916 if (result.pfd)
6917 *result.pfd = fd;
6918 prog->instances.fds[i] = fd;
6919 }
6920out:
6921 if (err)
6922 pr_warn("failed to load program '%s'\n", prog->name);
6923 return libbpf_err(err);
6924}
6925
6926int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_ver)
6927{
6928 return bpf_object_load_prog(prog->obj, prog, license, kern_ver);
6929}
6930
6931static int
6932bpf_object__load_progs(struct bpf_object *obj, int log_level)
6933{
6934 struct bpf_program *prog;
6935 size_t i;
6936 int err;
6937
6938 for (i = 0; i < obj->nr_programs; i++) {
6939 prog = &obj->programs[i];
6940 err = bpf_object__sanitize_prog(obj, prog);
6941 if (err)
6942 return err;
6943 }
6944
6945 for (i = 0; i < obj->nr_programs; i++) {
6946 prog = &obj->programs[i];
6947 if (prog_is_subprog(obj, prog))
6948 continue;
6949 if (!prog->load) {
6950 pr_debug("prog '%s': skipped loading\n", prog->name);
6951 continue;
6952 }
6953 prog->log_level |= log_level;
6954 err = bpf_object_load_prog(obj, prog, obj->license, obj->kern_version);
6955 if (err)
6956 return err;
6957 }
6958 if (obj->gen_loader)
6959 bpf_object__free_relocs(obj);
6960 return 0;
6961}
6962
6963static const struct bpf_sec_def *find_sec_def(const char *sec_name);
6964
6965static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
6966{
6967 struct bpf_program *prog;
6968 int err;
6969
6970 bpf_object__for_each_program(prog, obj) {
6971 prog->sec_def = find_sec_def(prog->sec_name);
6972 if (!prog->sec_def) {
6973
6974 pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
6975 prog->name, prog->sec_name);
6976 continue;
6977 }
6978
6979 bpf_program__set_type(prog, prog->sec_def->prog_type);
6980 bpf_program__set_expected_attach_type(prog, prog->sec_def->expected_attach_type);
6981
6982#pragma GCC diagnostic push
6983#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
6984 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
6985 prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
6986 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
6987#pragma GCC diagnostic pop
6988
6989
6990
6991
6992 if (prog->sec_def->prog_setup_fn) {
6993 err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
6994 if (err < 0) {
6995 pr_warn("prog '%s': failed to initialize: %d\n",
6996 prog->name, err);
6997 return err;
6998 }
6999 }
7000 }
7001
7002 return 0;
7003}
7004
7005static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7006 const struct bpf_object_open_opts *opts)
7007{
7008 const char *obj_name, *kconfig, *btf_tmp_path;
7009 struct bpf_object *obj;
7010 char tmp_name[64];
7011 int err;
7012 char *log_buf;
7013 size_t log_size;
7014 __u32 log_level;
7015
7016 if (elf_version(EV_CURRENT) == EV_NONE) {
7017 pr_warn("failed to init libelf for %s\n",
7018 path ? : "(mem buf)");
7019 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
7020 }
7021
7022 if (!OPTS_VALID(opts, bpf_object_open_opts))
7023 return ERR_PTR(-EINVAL);
7024
7025 obj_name = OPTS_GET(opts, object_name, NULL);
7026 if (obj_buf) {
7027 if (!obj_name) {
7028 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7029 (unsigned long)obj_buf,
7030 (unsigned long)obj_buf_sz);
7031 obj_name = tmp_name;
7032 }
7033 path = obj_name;
7034 pr_debug("loading object '%s' from buffer\n", obj_name);
7035 }
7036
7037 log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
7038 log_size = OPTS_GET(opts, kernel_log_size, 0);
7039 log_level = OPTS_GET(opts, kernel_log_level, 0);
7040 if (log_size > UINT_MAX)
7041 return ERR_PTR(-EINVAL);
7042 if (log_size && !log_buf)
7043 return ERR_PTR(-EINVAL);
7044
7045 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7046 if (IS_ERR(obj))
7047 return obj;
7048
7049 obj->log_buf = log_buf;
7050 obj->log_size = log_size;
7051 obj->log_level = log_level;
7052
7053 btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
7054 if (btf_tmp_path) {
7055 if (strlen(btf_tmp_path) >= PATH_MAX) {
7056 err = -ENAMETOOLONG;
7057 goto out;
7058 }
7059 obj->btf_custom_path = strdup(btf_tmp_path);
7060 if (!obj->btf_custom_path) {
7061 err = -ENOMEM;
7062 goto out;
7063 }
7064 }
7065
7066 kconfig = OPTS_GET(opts, kconfig, NULL);
7067 if (kconfig) {
7068 obj->kconfig = strdup(kconfig);
7069 if (!obj->kconfig) {
7070 err = -ENOMEM;
7071 goto out;
7072 }
7073 }
7074
7075 err = bpf_object__elf_init(obj);
7076 err = err ? : bpf_object__check_endianness(obj);
7077 err = err ? : bpf_object__elf_collect(obj);
7078 err = err ? : bpf_object__collect_externs(obj);
7079 err = err ? : bpf_object__finalize_btf(obj);
7080 err = err ? : bpf_object__init_maps(obj, opts);
7081 err = err ? : bpf_object_init_progs(obj, opts);
7082 err = err ? : bpf_object__collect_relos(obj);
7083 if (err)
7084 goto out;
7085
7086 bpf_object__elf_finish(obj);
7087
7088 return obj;
7089out:
7090 bpf_object__close(obj);
7091 return ERR_PTR(err);
7092}
7093
7094static struct bpf_object *
7095__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
7096{
7097 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7098 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
7099 );
7100
7101
7102 if (!attr->file)
7103 return NULL;
7104
7105 pr_debug("loading %s\n", attr->file);
7106 return bpf_object_open(attr->file, NULL, 0, &opts);
7107}
7108
7109struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
7110{
7111 return libbpf_ptr(__bpf_object__open_xattr(attr, 0));
7112}
7113
7114struct bpf_object *bpf_object__open(const char *path)
7115{
7116 struct bpf_object_open_attr attr = {
7117 .file = path,
7118 .prog_type = BPF_PROG_TYPE_UNSPEC,
7119 };
7120
7121 return libbpf_ptr(__bpf_object__open_xattr(&attr, 0));
7122}
7123
7124struct bpf_object *
7125bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
7126{
7127 if (!path)
7128 return libbpf_err_ptr(-EINVAL);
7129
7130 pr_debug("loading %s\n", path);
7131
7132 return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
7133}
7134
7135struct bpf_object *
7136bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
7137 const struct bpf_object_open_opts *opts)
7138{
7139 if (!obj_buf || obj_buf_sz == 0)
7140 return libbpf_err_ptr(-EINVAL);
7141
7142 return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
7143}
7144
7145struct bpf_object *
7146bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
7147 const char *name)
7148{
7149 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7150 .object_name = name,
7151
7152 .relaxed_maps = true,
7153 );
7154
7155
7156 if (!obj_buf || obj_buf_sz == 0)
7157 return errno = EINVAL, NULL;
7158
7159 return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, &opts));
7160}
7161
7162static int bpf_object_unload(struct bpf_object *obj)
7163{
7164 size_t i;
7165
7166 if (!obj)
7167 return libbpf_err(-EINVAL);
7168
7169 for (i = 0; i < obj->nr_maps; i++) {
7170 zclose(obj->maps[i].fd);
7171 if (obj->maps[i].st_ops)
7172 zfree(&obj->maps[i].st_ops->kern_vdata);
7173 }
7174
7175 for (i = 0; i < obj->nr_programs; i++)
7176 bpf_program__unload(&obj->programs[i]);
7177
7178 return 0;
7179}
7180
7181int bpf_object__unload(struct bpf_object *obj) __attribute__((alias("bpf_object_unload")));
7182
7183static int bpf_object__sanitize_maps(struct bpf_object *obj)
7184{
7185 struct bpf_map *m;
7186
7187 bpf_object__for_each_map(m, obj) {
7188 if (!bpf_map__is_internal(m))
7189 continue;
7190 if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
7191 m->def.map_flags ^= BPF_F_MMAPABLE;
7192 }
7193
7194 return 0;
7195}
7196
7197int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
7198{
7199 char sym_type, sym_name[500];
7200 unsigned long long sym_addr;
7201 int ret, err = 0;
7202 FILE *f;
7203
7204 f = fopen("/proc/kallsyms", "r");
7205 if (!f) {
7206 err = -errno;
7207 pr_warn("failed to open /proc/kallsyms: %d\n", err);
7208 return err;
7209 }
7210
7211 while (true) {
7212 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7213 &sym_addr, &sym_type, sym_name);
7214 if (ret == EOF && feof(f))
7215 break;
7216 if (ret != 3) {
7217 pr_warn("failed to read kallsyms entry: %d\n", ret);
7218 err = -EINVAL;
7219 break;
7220 }
7221
7222 err = cb(sym_addr, sym_type, sym_name, ctx);
7223 if (err)
7224 break;
7225 }
7226
7227 fclose(f);
7228 return err;
7229}
7230
7231static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
7232 const char *sym_name, void *ctx)
7233{
7234 struct bpf_object *obj = ctx;
7235 const struct btf_type *t;
7236 struct extern_desc *ext;
7237
7238 ext = find_extern_by_name(obj, sym_name);
7239 if (!ext || ext->type != EXT_KSYM)
7240 return 0;
7241
7242 t = btf__type_by_id(obj->btf, ext->btf_id);
7243 if (!btf_is_var(t))
7244 return 0;
7245
7246 if (ext->is_set && ext->ksym.addr != sym_addr) {
7247 pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
7248 sym_name, ext->ksym.addr, sym_addr);
7249 return -EINVAL;
7250 }
7251 if (!ext->is_set) {
7252 ext->is_set = true;
7253 ext->ksym.addr = sym_addr;
7254 pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
7255 }
7256 return 0;
7257}
7258
7259static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
7260{
7261 return libbpf_kallsyms_parse(kallsyms_cb, obj);
7262}
7263
7264static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
7265 __u16 kind, struct btf **res_btf,
7266 struct module_btf **res_mod_btf)
7267{
7268 struct module_btf *mod_btf;
7269 struct btf *btf;
7270 int i, id, err;
7271
7272 btf = obj->btf_vmlinux;
7273 mod_btf = NULL;
7274 id = btf__find_by_name_kind(btf, ksym_name, kind);
7275
7276 if (id == -ENOENT) {
7277 err = load_module_btfs(obj);
7278 if (err)
7279 return err;
7280
7281 for (i = 0; i < obj->btf_module_cnt; i++) {
7282
7283 mod_btf = &obj->btf_modules[i];
7284 btf = mod_btf->btf;
7285 id = btf__find_by_name_kind_own(btf, ksym_name, kind);
7286 if (id != -ENOENT)
7287 break;
7288 }
7289 }
7290 if (id <= 0)
7291 return -ESRCH;
7292
7293 *res_btf = btf;
7294 *res_mod_btf = mod_btf;
7295 return id;
7296}
7297
7298static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
7299 struct extern_desc *ext)
7300{
7301 const struct btf_type *targ_var, *targ_type;
7302 __u32 targ_type_id, local_type_id;
7303 struct module_btf *mod_btf = NULL;
7304 const char *targ_var_name;
7305 struct btf *btf = NULL;
7306 int id, err;
7307
7308 id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
7309 if (id < 0) {
7310 if (id == -ESRCH && ext->is_weak)
7311 return 0;
7312 pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
7313 ext->name);
7314 return id;
7315 }
7316
7317
7318 local_type_id = ext->ksym.type_id;
7319
7320
7321 targ_var = btf__type_by_id(btf, id);
7322 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
7323 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
7324
7325 err = bpf_core_types_are_compat(obj->btf, local_type_id,
7326 btf, targ_type_id);
7327 if (err <= 0) {
7328 const struct btf_type *local_type;
7329 const char *targ_name, *local_name;
7330
7331 local_type = btf__type_by_id(obj->btf, local_type_id);
7332 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
7333 targ_name = btf__name_by_offset(btf, targ_type->name_off);
7334
7335 pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
7336 ext->name, local_type_id,
7337 btf_kind_str(local_type), local_name, targ_type_id,
7338 btf_kind_str(targ_type), targ_name);
7339 return -EINVAL;
7340 }
7341
7342 ext->is_set = true;
7343 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
7344 ext->ksym.kernel_btf_id = id;
7345 pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
7346 ext->name, id, btf_kind_str(targ_var), targ_var_name);
7347
7348 return 0;
7349}
7350
7351static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
7352 struct extern_desc *ext)
7353{
7354 int local_func_proto_id, kfunc_proto_id, kfunc_id;
7355 struct module_btf *mod_btf = NULL;
7356 const struct btf_type *kern_func;
7357 struct btf *kern_btf = NULL;
7358 int ret;
7359
7360 local_func_proto_id = ext->ksym.type_id;
7361
7362 kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf);
7363 if (kfunc_id < 0) {
7364 if (kfunc_id == -ESRCH && ext->is_weak)
7365 return 0;
7366 pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
7367 ext->name);
7368 return kfunc_id;
7369 }
7370
7371 kern_func = btf__type_by_id(kern_btf, kfunc_id);
7372 kfunc_proto_id = kern_func->type;
7373
7374 ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
7375 kern_btf, kfunc_proto_id);
7376 if (ret <= 0) {
7377 pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with kernel [%d]\n",
7378 ext->name, local_func_proto_id, kfunc_proto_id);
7379 return -EINVAL;
7380 }
7381
7382
7383 if (mod_btf && !mod_btf->fd_array_idx) {
7384
7385 if (obj->fd_array_cnt == INT16_MAX) {
7386 pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
7387 ext->name, mod_btf->fd_array_idx);
7388 return -E2BIG;
7389 }
7390
7391 if (!obj->fd_array_cnt)
7392 obj->fd_array_cnt = 1;
7393
7394 ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
7395 obj->fd_array_cnt + 1);
7396 if (ret)
7397 return ret;
7398 mod_btf->fd_array_idx = obj->fd_array_cnt;
7399
7400 obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
7401 }
7402
7403 ext->is_set = true;
7404 ext->ksym.kernel_btf_id = kfunc_id;
7405 ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
7406 pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
7407 ext->name, kfunc_id);
7408
7409 return 0;
7410}
7411
7412static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
7413{
7414 const struct btf_type *t;
7415 struct extern_desc *ext;
7416 int i, err;
7417
7418 for (i = 0; i < obj->nr_extern; i++) {
7419 ext = &obj->externs[i];
7420 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
7421 continue;
7422
7423 if (obj->gen_loader) {
7424 ext->is_set = true;
7425 ext->ksym.kernel_btf_obj_fd = 0;
7426 ext->ksym.kernel_btf_id = 0;
7427 continue;
7428 }
7429 t = btf__type_by_id(obj->btf, ext->btf_id);
7430 if (btf_is_var(t))
7431 err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
7432 else
7433 err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
7434 if (err)
7435 return err;
7436 }
7437 return 0;
7438}
7439
7440static int bpf_object__resolve_externs(struct bpf_object *obj,
7441 const char *extra_kconfig)
7442{
7443 bool need_config = false, need_kallsyms = false;
7444 bool need_vmlinux_btf = false;
7445 struct extern_desc *ext;
7446 void *kcfg_data = NULL;
7447 int err, i;
7448
7449 if (obj->nr_extern == 0)
7450 return 0;
7451
7452 if (obj->kconfig_map_idx >= 0)
7453 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
7454
7455 for (i = 0; i < obj->nr_extern; i++) {
7456 ext = &obj->externs[i];
7457
7458 if (ext->type == EXT_KCFG &&
7459 strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
7460 void *ext_val = kcfg_data + ext->kcfg.data_off;
7461 __u32 kver = get_kernel_version();
7462
7463 if (!kver) {
7464 pr_warn("failed to get kernel version\n");
7465 return -EINVAL;
7466 }
7467 err = set_kcfg_value_num(ext, ext_val, kver);
7468 if (err)
7469 return err;
7470 pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
7471 } else if (ext->type == EXT_KCFG && str_has_pfx(ext->name, "CONFIG_")) {
7472 need_config = true;
7473 } else if (ext->type == EXT_KSYM) {
7474 if (ext->ksym.type_id)
7475 need_vmlinux_btf = true;
7476 else
7477 need_kallsyms = true;
7478 } else {
7479 pr_warn("unrecognized extern '%s'\n", ext->name);
7480 return -EINVAL;
7481 }
7482 }
7483 if (need_config && extra_kconfig) {
7484 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
7485 if (err)
7486 return -EINVAL;
7487 need_config = false;
7488 for (i = 0; i < obj->nr_extern; i++) {
7489 ext = &obj->externs[i];
7490 if (ext->type == EXT_KCFG && !ext->is_set) {
7491 need_config = true;
7492 break;
7493 }
7494 }
7495 }
7496 if (need_config) {
7497 err = bpf_object__read_kconfig_file(obj, kcfg_data);
7498 if (err)
7499 return -EINVAL;
7500 }
7501 if (need_kallsyms) {
7502 err = bpf_object__read_kallsyms_file(obj);
7503 if (err)
7504 return -EINVAL;
7505 }
7506 if (need_vmlinux_btf) {
7507 err = bpf_object__resolve_ksyms_btf_id(obj);
7508 if (err)
7509 return -EINVAL;
7510 }
7511 for (i = 0; i < obj->nr_extern; i++) {
7512 ext = &obj->externs[i];
7513
7514 if (!ext->is_set && !ext->is_weak) {
7515 pr_warn("extern %s (strong) not resolved\n", ext->name);
7516 return -ESRCH;
7517 } else if (!ext->is_set) {
7518 pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
7519 ext->name);
7520 }
7521 }
7522
7523 return 0;
7524}
7525
7526static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
7527{
7528 int err, i;
7529
7530 if (!obj)
7531 return libbpf_err(-EINVAL);
7532
7533 if (obj->loaded) {
7534 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
7535 return libbpf_err(-EINVAL);
7536 }
7537
7538 if (obj->gen_loader)
7539 bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
7540
7541 err = bpf_object__probe_loading(obj);
7542 err = err ? : bpf_object__load_vmlinux_btf(obj, false);
7543 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
7544 err = err ? : bpf_object__sanitize_and_load_btf(obj);
7545 err = err ? : bpf_object__sanitize_maps(obj);
7546 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
7547 err = err ? : bpf_object__create_maps(obj);
7548 err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
7549 err = err ? : bpf_object__load_progs(obj, extra_log_level);
7550 err = err ? : bpf_object_init_prog_arrays(obj);
7551
7552 if (obj->gen_loader) {
7553
7554 if (obj->btf)
7555 btf__set_fd(obj->btf, -1);
7556 for (i = 0; i < obj->nr_maps; i++)
7557 obj->maps[i].fd = -1;
7558 if (!err)
7559 err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
7560 }
7561
7562
7563 zfree(&obj->fd_array);
7564
7565
7566 for (i = 0; i < obj->btf_module_cnt; i++) {
7567 close(obj->btf_modules[i].fd);
7568 btf__free(obj->btf_modules[i].btf);
7569 free(obj->btf_modules[i].name);
7570 }
7571 free(obj->btf_modules);
7572
7573
7574 btf__free(obj->btf_vmlinux);
7575 obj->btf_vmlinux = NULL;
7576
7577 obj->loaded = true;
7578
7579 if (err)
7580 goto out;
7581
7582 return 0;
7583out:
7584
7585 for (i = 0; i < obj->nr_maps; i++)
7586 if (obj->maps[i].pinned && !obj->maps[i].reused)
7587 bpf_map__unpin(&obj->maps[i], NULL);
7588
7589 bpf_object_unload(obj);
7590 pr_warn("failed to load object '%s'\n", obj->path);
7591 return libbpf_err(err);
7592}
7593
7594int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
7595{
7596 return bpf_object_load(attr->obj, attr->log_level, attr->target_btf_path);
7597}
7598
7599int bpf_object__load(struct bpf_object *obj)
7600{
7601 return bpf_object_load(obj, 0, NULL);
7602}
7603
7604static int make_parent_dir(const char *path)
7605{
7606 char *cp, errmsg[STRERR_BUFSIZE];
7607 char *dname, *dir;
7608 int err = 0;
7609
7610 dname = strdup(path);
7611 if (dname == NULL)
7612 return -ENOMEM;
7613
7614 dir = dirname(dname);
7615 if (mkdir(dir, 0700) && errno != EEXIST)
7616 err = -errno;
7617
7618 free(dname);
7619 if (err) {
7620 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7621 pr_warn("failed to mkdir %s: %s\n", path, cp);
7622 }
7623 return err;
7624}
7625
7626static int check_path(const char *path)
7627{
7628 char *cp, errmsg[STRERR_BUFSIZE];
7629 struct statfs st_fs;
7630 char *dname, *dir;
7631 int err = 0;
7632
7633 if (path == NULL)
7634 return -EINVAL;
7635
7636 dname = strdup(path);
7637 if (dname == NULL)
7638 return -ENOMEM;
7639
7640 dir = dirname(dname);
7641 if (statfs(dir, &st_fs)) {
7642 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7643 pr_warn("failed to statfs %s: %s\n", dir, cp);
7644 err = -errno;
7645 }
7646 free(dname);
7647
7648 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
7649 pr_warn("specified path %s is not on BPF FS\n", path);
7650 err = -EINVAL;
7651 }
7652
7653 return err;
7654}
7655
7656static int bpf_program_pin_instance(struct bpf_program *prog, const char *path, int instance)
7657{
7658 char *cp, errmsg[STRERR_BUFSIZE];
7659 int err;
7660
7661 err = make_parent_dir(path);
7662 if (err)
7663 return libbpf_err(err);
7664
7665 err = check_path(path);
7666 if (err)
7667 return libbpf_err(err);
7668
7669 if (prog == NULL) {
7670 pr_warn("invalid program pointer\n");
7671 return libbpf_err(-EINVAL);
7672 }
7673
7674 if (instance < 0 || instance >= prog->instances.nr) {
7675 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7676 instance, prog->name, prog->instances.nr);
7677 return libbpf_err(-EINVAL);
7678 }
7679
7680 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
7681 err = -errno;
7682 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
7683 pr_warn("failed to pin program: %s\n", cp);
7684 return libbpf_err(err);
7685 }
7686 pr_debug("pinned program '%s'\n", path);
7687
7688 return 0;
7689}
7690
7691static int bpf_program_unpin_instance(struct bpf_program *prog, const char *path, int instance)
7692{
7693 int err;
7694
7695 err = check_path(path);
7696 if (err)
7697 return libbpf_err(err);
7698
7699 if (prog == NULL) {
7700 pr_warn("invalid program pointer\n");
7701 return libbpf_err(-EINVAL);
7702 }
7703
7704 if (instance < 0 || instance >= prog->instances.nr) {
7705 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7706 instance, prog->name, prog->instances.nr);
7707 return libbpf_err(-EINVAL);
7708 }
7709
7710 err = unlink(path);
7711 if (err != 0)
7712 return libbpf_err(-errno);
7713
7714 pr_debug("unpinned program '%s'\n", path);
7715
7716 return 0;
7717}
7718
7719__attribute__((alias("bpf_program_pin_instance")))
7720int bpf_object__pin_instance(struct bpf_program *prog, const char *path, int instance);
7721
7722__attribute__((alias("bpf_program_unpin_instance")))
7723int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, int instance);
7724
7725int bpf_program__pin(struct bpf_program *prog, const char *path)
7726{
7727 int i, err;
7728
7729 err = make_parent_dir(path);
7730 if (err)
7731 return libbpf_err(err);
7732
7733 err = check_path(path);
7734 if (err)
7735 return libbpf_err(err);
7736
7737 if (prog == NULL) {
7738 pr_warn("invalid program pointer\n");
7739 return libbpf_err(-EINVAL);
7740 }
7741
7742 if (prog->instances.nr <= 0) {
7743 pr_warn("no instances of prog %s to pin\n", prog->name);
7744 return libbpf_err(-EINVAL);
7745 }
7746
7747 if (prog->instances.nr == 1) {
7748
7749 return bpf_program_pin_instance(prog, path, 0);
7750 }
7751
7752 for (i = 0; i < prog->instances.nr; i++) {
7753 char buf[PATH_MAX];
7754 int len;
7755
7756 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7757 if (len < 0) {
7758 err = -EINVAL;
7759 goto err_unpin;
7760 } else if (len >= PATH_MAX) {
7761 err = -ENAMETOOLONG;
7762 goto err_unpin;
7763 }
7764
7765 err = bpf_program_pin_instance(prog, buf, i);
7766 if (err)
7767 goto err_unpin;
7768 }
7769
7770 return 0;
7771
7772err_unpin:
7773 for (i = i - 1; i >= 0; i--) {
7774 char buf[PATH_MAX];
7775 int len;
7776
7777 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7778 if (len < 0)
7779 continue;
7780 else if (len >= PATH_MAX)
7781 continue;
7782
7783 bpf_program_unpin_instance(prog, buf, i);
7784 }
7785
7786 rmdir(path);
7787
7788 return libbpf_err(err);
7789}
7790
7791int bpf_program__unpin(struct bpf_program *prog, const char *path)
7792{
7793 int i, err;
7794
7795 err = check_path(path);
7796 if (err)
7797 return libbpf_err(err);
7798
7799 if (prog == NULL) {
7800 pr_warn("invalid program pointer\n");
7801 return libbpf_err(-EINVAL);
7802 }
7803
7804 if (prog->instances.nr <= 0) {
7805 pr_warn("no instances of prog %s to pin\n", prog->name);
7806 return libbpf_err(-EINVAL);
7807 }
7808
7809 if (prog->instances.nr == 1) {
7810
7811 return bpf_program_unpin_instance(prog, path, 0);
7812 }
7813
7814 for (i = 0; i < prog->instances.nr; i++) {
7815 char buf[PATH_MAX];
7816 int len;
7817
7818 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7819 if (len < 0)
7820 return libbpf_err(-EINVAL);
7821 else if (len >= PATH_MAX)
7822 return libbpf_err(-ENAMETOOLONG);
7823
7824 err = bpf_program_unpin_instance(prog, buf, i);
7825 if (err)
7826 return err;
7827 }
7828
7829 err = rmdir(path);
7830 if (err)
7831 return libbpf_err(-errno);
7832
7833 return 0;
7834}
7835
7836int bpf_map__pin(struct bpf_map *map, const char *path)
7837{
7838 char *cp, errmsg[STRERR_BUFSIZE];
7839 int err;
7840
7841 if (map == NULL) {
7842 pr_warn("invalid map pointer\n");
7843 return libbpf_err(-EINVAL);
7844 }
7845
7846 if (map->pin_path) {
7847 if (path && strcmp(path, map->pin_path)) {
7848 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7849 bpf_map__name(map), map->pin_path, path);
7850 return libbpf_err(-EINVAL);
7851 } else if (map->pinned) {
7852 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
7853 bpf_map__name(map), map->pin_path);
7854 return 0;
7855 }
7856 } else {
7857 if (!path) {
7858 pr_warn("missing a path to pin map '%s' at\n",
7859 bpf_map__name(map));
7860 return libbpf_err(-EINVAL);
7861 } else if (map->pinned) {
7862 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
7863 return libbpf_err(-EEXIST);
7864 }
7865
7866 map->pin_path = strdup(path);
7867 if (!map->pin_path) {
7868 err = -errno;
7869 goto out_err;
7870 }
7871 }
7872
7873 err = make_parent_dir(map->pin_path);
7874 if (err)
7875 return libbpf_err(err);
7876
7877 err = check_path(map->pin_path);
7878 if (err)
7879 return libbpf_err(err);
7880
7881 if (bpf_obj_pin(map->fd, map->pin_path)) {
7882 err = -errno;
7883 goto out_err;
7884 }
7885
7886 map->pinned = true;
7887 pr_debug("pinned map '%s'\n", map->pin_path);
7888
7889 return 0;
7890
7891out_err:
7892 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7893 pr_warn("failed to pin map: %s\n", cp);
7894 return libbpf_err(err);
7895}
7896
7897int bpf_map__unpin(struct bpf_map *map, const char *path)
7898{
7899 int err;
7900
7901 if (map == NULL) {
7902 pr_warn("invalid map pointer\n");
7903 return libbpf_err(-EINVAL);
7904 }
7905
7906 if (map->pin_path) {
7907 if (path && strcmp(path, map->pin_path)) {
7908 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7909 bpf_map__name(map), map->pin_path, path);
7910 return libbpf_err(-EINVAL);
7911 }
7912 path = map->pin_path;
7913 } else if (!path) {
7914 pr_warn("no path to unpin map '%s' from\n",
7915 bpf_map__name(map));
7916 return libbpf_err(-EINVAL);
7917 }
7918
7919 err = check_path(path);
7920 if (err)
7921 return libbpf_err(err);
7922
7923 err = unlink(path);
7924 if (err != 0)
7925 return libbpf_err(-errno);
7926
7927 map->pinned = false;
7928 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
7929
7930 return 0;
7931}
7932
7933int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
7934{
7935 char *new = NULL;
7936
7937 if (path) {
7938 new = strdup(path);
7939 if (!new)
7940 return libbpf_err(-errno);
7941 }
7942
7943 free(map->pin_path);
7944 map->pin_path = new;
7945 return 0;
7946}
7947
7948__alias(bpf_map__pin_path)
7949const char *bpf_map__get_pin_path(const struct bpf_map *map);
7950
7951const char *bpf_map__pin_path(const struct bpf_map *map)
7952{
7953 return map->pin_path;
7954}
7955
7956bool bpf_map__is_pinned(const struct bpf_map *map)
7957{
7958 return map->pinned;
7959}
7960
7961static void sanitize_pin_path(char *s)
7962{
7963
7964 while (*s) {
7965 if (*s == '.')
7966 *s = '_';
7967 s++;
7968 }
7969}
7970
7971int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
7972{
7973 struct bpf_map *map;
7974 int err;
7975
7976 if (!obj)
7977 return libbpf_err(-ENOENT);
7978
7979 if (!obj->loaded) {
7980 pr_warn("object not yet loaded; load it first\n");
7981 return libbpf_err(-ENOENT);
7982 }
7983
7984 bpf_object__for_each_map(map, obj) {
7985 char *pin_path = NULL;
7986 char buf[PATH_MAX];
7987
7988 if (map->skipped)
7989 continue;
7990
7991 if (path) {
7992 int len;
7993
7994 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7995 bpf_map__name(map));
7996 if (len < 0) {
7997 err = -EINVAL;
7998 goto err_unpin_maps;
7999 } else if (len >= PATH_MAX) {
8000 err = -ENAMETOOLONG;
8001 goto err_unpin_maps;
8002 }
8003 sanitize_pin_path(buf);
8004 pin_path = buf;
8005 } else if (!map->pin_path) {
8006 continue;
8007 }
8008
8009 err = bpf_map__pin(map, pin_path);
8010 if (err)
8011 goto err_unpin_maps;
8012 }
8013
8014 return 0;
8015
8016err_unpin_maps:
8017 while ((map = bpf_object__prev_map(obj, map))) {
8018 if (!map->pin_path)
8019 continue;
8020
8021 bpf_map__unpin(map, NULL);
8022 }
8023
8024 return libbpf_err(err);
8025}
8026
8027int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
8028{
8029 struct bpf_map *map;
8030 int err;
8031
8032 if (!obj)
8033 return libbpf_err(-ENOENT);
8034
8035 bpf_object__for_each_map(map, obj) {
8036 char *pin_path = NULL;
8037 char buf[PATH_MAX];
8038
8039 if (path) {
8040 int len;
8041
8042 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8043 bpf_map__name(map));
8044 if (len < 0)
8045 return libbpf_err(-EINVAL);
8046 else if (len >= PATH_MAX)
8047 return libbpf_err(-ENAMETOOLONG);
8048 sanitize_pin_path(buf);
8049 pin_path = buf;
8050 } else if (!map->pin_path) {
8051 continue;
8052 }
8053
8054 err = bpf_map__unpin(map, pin_path);
8055 if (err)
8056 return libbpf_err(err);
8057 }
8058
8059 return 0;
8060}
8061
8062int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8063{
8064 struct bpf_program *prog;
8065 int err;
8066
8067 if (!obj)
8068 return libbpf_err(-ENOENT);
8069
8070 if (!obj->loaded) {
8071 pr_warn("object not yet loaded; load it first\n");
8072 return libbpf_err(-ENOENT);
8073 }
8074
8075 bpf_object__for_each_program(prog, obj) {
8076 char buf[PATH_MAX];
8077 int len;
8078
8079 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8080 prog->pin_name);
8081 if (len < 0) {
8082 err = -EINVAL;
8083 goto err_unpin_programs;
8084 } else if (len >= PATH_MAX) {
8085 err = -ENAMETOOLONG;
8086 goto err_unpin_programs;
8087 }
8088
8089 err = bpf_program__pin(prog, buf);
8090 if (err)
8091 goto err_unpin_programs;
8092 }
8093
8094 return 0;
8095
8096err_unpin_programs:
8097 while ((prog = bpf_object__prev_program(obj, prog))) {
8098 char buf[PATH_MAX];
8099 int len;
8100
8101 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8102 prog->pin_name);
8103 if (len < 0)
8104 continue;
8105 else if (len >= PATH_MAX)
8106 continue;
8107
8108 bpf_program__unpin(prog, buf);
8109 }
8110
8111 return libbpf_err(err);
8112}
8113
8114int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8115{
8116 struct bpf_program *prog;
8117 int err;
8118
8119 if (!obj)
8120 return libbpf_err(-ENOENT);
8121
8122 bpf_object__for_each_program(prog, obj) {
8123 char buf[PATH_MAX];
8124 int len;
8125
8126 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8127 prog->pin_name);
8128 if (len < 0)
8129 return libbpf_err(-EINVAL);
8130 else if (len >= PATH_MAX)
8131 return libbpf_err(-ENAMETOOLONG);
8132
8133 err = bpf_program__unpin(prog, buf);
8134 if (err)
8135 return libbpf_err(err);
8136 }
8137
8138 return 0;
8139}
8140
8141int bpf_object__pin(struct bpf_object *obj, const char *path)
8142{
8143 int err;
8144
8145 err = bpf_object__pin_maps(obj, path);
8146 if (err)
8147 return libbpf_err(err);
8148
8149 err = bpf_object__pin_programs(obj, path);
8150 if (err) {
8151 bpf_object__unpin_maps(obj, path);
8152 return libbpf_err(err);
8153 }
8154
8155 return 0;
8156}
8157
8158static void bpf_map__destroy(struct bpf_map *map)
8159{
8160 if (map->clear_priv)
8161 map->clear_priv(map, map->priv);
8162 map->priv = NULL;
8163 map->clear_priv = NULL;
8164
8165 if (map->inner_map) {
8166 bpf_map__destroy(map->inner_map);
8167 zfree(&map->inner_map);
8168 }
8169
8170 zfree(&map->init_slots);
8171 map->init_slots_sz = 0;
8172
8173 if (map->mmaped) {
8174 munmap(map->mmaped, bpf_map_mmap_sz(map));
8175 map->mmaped = NULL;
8176 }
8177
8178 if (map->st_ops) {
8179 zfree(&map->st_ops->data);
8180 zfree(&map->st_ops->progs);
8181 zfree(&map->st_ops->kern_func_off);
8182 zfree(&map->st_ops);
8183 }
8184
8185 zfree(&map->name);
8186 zfree(&map->real_name);
8187 zfree(&map->pin_path);
8188
8189 if (map->fd >= 0)
8190 zclose(map->fd);
8191}
8192
8193void bpf_object__close(struct bpf_object *obj)
8194{
8195 size_t i;
8196
8197 if (IS_ERR_OR_NULL(obj))
8198 return;
8199
8200 if (obj->clear_priv)
8201 obj->clear_priv(obj, obj->priv);
8202
8203 bpf_gen__free(obj->gen_loader);
8204 bpf_object__elf_finish(obj);
8205 bpf_object_unload(obj);
8206 btf__free(obj->btf);
8207 btf_ext__free(obj->btf_ext);
8208
8209 for (i = 0; i < obj->nr_maps; i++)
8210 bpf_map__destroy(&obj->maps[i]);
8211
8212 zfree(&obj->btf_custom_path);
8213 zfree(&obj->kconfig);
8214 zfree(&obj->externs);
8215 obj->nr_extern = 0;
8216
8217 zfree(&obj->maps);
8218 obj->nr_maps = 0;
8219
8220 if (obj->programs && obj->nr_programs) {
8221 for (i = 0; i < obj->nr_programs; i++)
8222 bpf_program__exit(&obj->programs[i]);
8223 }
8224 zfree(&obj->programs);
8225
8226 list_del(&obj->list);
8227 free(obj);
8228}
8229
8230struct bpf_object *
8231bpf_object__next(struct bpf_object *prev)
8232{
8233 struct bpf_object *next;
8234 bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST);
8235
8236 if (strict)
8237 return NULL;
8238
8239 if (!prev)
8240 next = list_first_entry(&bpf_objects_list,
8241 struct bpf_object,
8242 list);
8243 else
8244 next = list_next_entry(prev, list);
8245
8246
8247 if (&next->list == &bpf_objects_list)
8248 return NULL;
8249
8250 return next;
8251}
8252
8253const char *bpf_object__name(const struct bpf_object *obj)
8254{
8255 return obj ? obj->name : libbpf_err_ptr(-EINVAL);
8256}
8257
8258unsigned int bpf_object__kversion(const struct bpf_object *obj)
8259{
8260 return obj ? obj->kern_version : 0;
8261}
8262
8263struct btf *bpf_object__btf(const struct bpf_object *obj)
8264{
8265 return obj ? obj->btf : NULL;
8266}
8267
8268int bpf_object__btf_fd(const struct bpf_object *obj)
8269{
8270 return obj->btf ? btf__fd(obj->btf) : -1;
8271}
8272
8273int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
8274{
8275 if (obj->loaded)
8276 return libbpf_err(-EINVAL);
8277
8278 obj->kern_version = kern_version;
8279
8280 return 0;
8281}
8282
8283int bpf_object__set_priv(struct bpf_object *obj, void *priv,
8284 bpf_object_clear_priv_t clear_priv)
8285{
8286 if (obj->priv && obj->clear_priv)
8287 obj->clear_priv(obj, obj->priv);
8288
8289 obj->priv = priv;
8290 obj->clear_priv = clear_priv;
8291 return 0;
8292}
8293
8294void *bpf_object__priv(const struct bpf_object *obj)
8295{
8296 return obj ? obj->priv : libbpf_err_ptr(-EINVAL);
8297}
8298
8299int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
8300{
8301 struct bpf_gen *gen;
8302
8303 if (!opts)
8304 return -EFAULT;
8305 if (!OPTS_VALID(opts, gen_loader_opts))
8306 return -EINVAL;
8307 gen = calloc(sizeof(*gen), 1);
8308 if (!gen)
8309 return -ENOMEM;
8310 gen->opts = opts;
8311 obj->gen_loader = gen;
8312 return 0;
8313}
8314
8315static struct bpf_program *
8316__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8317 bool forward)
8318{
8319 size_t nr_programs = obj->nr_programs;
8320 ssize_t idx;
8321
8322 if (!nr_programs)
8323 return NULL;
8324
8325 if (!p)
8326
8327 return forward ? &obj->programs[0] :
8328 &obj->programs[nr_programs - 1];
8329
8330 if (p->obj != obj) {
8331 pr_warn("error: program handler doesn't match object\n");
8332 return errno = EINVAL, NULL;
8333 }
8334
8335 idx = (p - obj->programs) + (forward ? 1 : -1);
8336 if (idx >= obj->nr_programs || idx < 0)
8337 return NULL;
8338 return &obj->programs[idx];
8339}
8340
8341struct bpf_program *
8342bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
8343{
8344 return bpf_object__next_program(obj, prev);
8345}
8346
8347struct bpf_program *
8348bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
8349{
8350 struct bpf_program *prog = prev;
8351
8352 do {
8353 prog = __bpf_program__iter(prog, obj, true);
8354 } while (prog && prog_is_subprog(obj, prog));
8355
8356 return prog;
8357}
8358
8359struct bpf_program *
8360bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
8361{
8362 return bpf_object__prev_program(obj, next);
8363}
8364
8365struct bpf_program *
8366bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
8367{
8368 struct bpf_program *prog = next;
8369
8370 do {
8371 prog = __bpf_program__iter(prog, obj, false);
8372 } while (prog && prog_is_subprog(obj, prog));
8373
8374 return prog;
8375}
8376
8377int bpf_program__set_priv(struct bpf_program *prog, void *priv,
8378 bpf_program_clear_priv_t clear_priv)
8379{
8380 if (prog->priv && prog->clear_priv)
8381 prog->clear_priv(prog, prog->priv);
8382
8383 prog->priv = priv;
8384 prog->clear_priv = clear_priv;
8385 return 0;
8386}
8387
8388void *bpf_program__priv(const struct bpf_program *prog)
8389{
8390 return prog ? prog->priv : libbpf_err_ptr(-EINVAL);
8391}
8392
8393void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
8394{
8395 prog->prog_ifindex = ifindex;
8396}
8397
8398const char *bpf_program__name(const struct bpf_program *prog)
8399{
8400 return prog->name;
8401}
8402
8403const char *bpf_program__section_name(const struct bpf_program *prog)
8404{
8405 return prog->sec_name;
8406}
8407
8408const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
8409{
8410 const char *title;
8411
8412 title = prog->sec_name;
8413 if (needs_copy) {
8414 title = strdup(title);
8415 if (!title) {
8416 pr_warn("failed to strdup program title\n");
8417 return libbpf_err_ptr(-ENOMEM);
8418 }
8419 }
8420
8421 return title;
8422}
8423
8424bool bpf_program__autoload(const struct bpf_program *prog)
8425{
8426 return prog->load;
8427}
8428
8429int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
8430{
8431 if (prog->obj->loaded)
8432 return libbpf_err(-EINVAL);
8433
8434 prog->load = autoload;
8435 return 0;
8436}
8437
8438static int bpf_program_nth_fd(const struct bpf_program *prog, int n);
8439
8440int bpf_program__fd(const struct bpf_program *prog)
8441{
8442 return bpf_program_nth_fd(prog, 0);
8443}
8444
8445size_t bpf_program__size(const struct bpf_program *prog)
8446{
8447 return prog->insns_cnt * BPF_INSN_SZ;
8448}
8449
8450const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
8451{
8452 return prog->insns;
8453}
8454
8455size_t bpf_program__insn_cnt(const struct bpf_program *prog)
8456{
8457 return prog->insns_cnt;
8458}
8459
8460int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
8461 bpf_program_prep_t prep)
8462{
8463 int *instances_fds;
8464
8465 if (nr_instances <= 0 || !prep)
8466 return libbpf_err(-EINVAL);
8467
8468 if (prog->instances.nr > 0 || prog->instances.fds) {
8469 pr_warn("Can't set pre-processor after loading\n");
8470 return libbpf_err(-EINVAL);
8471 }
8472
8473 instances_fds = malloc(sizeof(int) * nr_instances);
8474 if (!instances_fds) {
8475 pr_warn("alloc memory failed for fds\n");
8476 return libbpf_err(-ENOMEM);
8477 }
8478
8479
8480 memset(instances_fds, -1, sizeof(int) * nr_instances);
8481
8482 prog->instances.nr = nr_instances;
8483 prog->instances.fds = instances_fds;
8484 prog->preprocessor = prep;
8485 return 0;
8486}
8487
8488__attribute__((alias("bpf_program_nth_fd")))
8489int bpf_program__nth_fd(const struct bpf_program *prog, int n);
8490
8491static int bpf_program_nth_fd(const struct bpf_program *prog, int n)
8492{
8493 int fd;
8494
8495 if (!prog)
8496 return libbpf_err(-EINVAL);
8497
8498 if (n >= prog->instances.nr || n < 0) {
8499 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
8500 n, prog->name, prog->instances.nr);
8501 return libbpf_err(-EINVAL);
8502 }
8503
8504 fd = prog->instances.fds[n];
8505 if (fd < 0) {
8506 pr_warn("%dth instance of program '%s' is invalid\n",
8507 n, prog->name);
8508 return libbpf_err(-ENOENT);
8509 }
8510
8511 return fd;
8512}
8513
8514__alias(bpf_program__type)
8515enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
8516
8517enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
8518{
8519 return prog->type;
8520}
8521
8522void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
8523{
8524 prog->type = type;
8525}
8526
8527static bool bpf_program__is_type(const struct bpf_program *prog,
8528 enum bpf_prog_type type)
8529{
8530 return prog ? (prog->type == type) : false;
8531}
8532
8533#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
8534int bpf_program__set_##NAME(struct bpf_program *prog) \
8535{ \
8536 if (!prog) \
8537 return libbpf_err(-EINVAL); \
8538 bpf_program__set_type(prog, TYPE); \
8539 return 0; \
8540} \
8541 \
8542bool bpf_program__is_##NAME(const struct bpf_program *prog) \
8543{ \
8544 return bpf_program__is_type(prog, TYPE); \
8545} \
8546
8547BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
8548BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
8549BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
8550BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
8551BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
8552BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
8553BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
8554BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
8555BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
8556BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
8557BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
8558BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
8559BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
8560
8561__alias(bpf_program__expected_attach_type)
8562enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
8563
8564enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog)
8565{
8566 return prog->expected_attach_type;
8567}
8568
8569void bpf_program__set_expected_attach_type(struct bpf_program *prog,
8570 enum bpf_attach_type type)
8571{
8572 prog->expected_attach_type = type;
8573}
8574
8575__u32 bpf_program__flags(const struct bpf_program *prog)
8576{
8577 return prog->prog_flags;
8578}
8579
8580int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
8581{
8582 if (prog->obj->loaded)
8583 return libbpf_err(-EBUSY);
8584
8585 prog->prog_flags = flags;
8586 return 0;
8587}
8588
8589__u32 bpf_program__log_level(const struct bpf_program *prog)
8590{
8591 return prog->log_level;
8592}
8593
8594int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
8595{
8596 if (prog->obj->loaded)
8597 return libbpf_err(-EBUSY);
8598
8599 prog->log_level = log_level;
8600 return 0;
8601}
8602
8603const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
8604{
8605 *log_size = prog->log_size;
8606 return prog->log_buf;
8607}
8608
8609int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
8610{
8611 if (log_size && !log_buf)
8612 return -EINVAL;
8613 if (prog->log_size > UINT_MAX)
8614 return -EINVAL;
8615 if (prog->obj->loaded)
8616 return -EBUSY;
8617
8618 prog->log_buf = log_buf;
8619 prog->log_size = log_size;
8620 return 0;
8621}
8622
8623#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
8624 .sec = (char *)sec_pfx, \
8625 .prog_type = BPF_PROG_TYPE_##ptype, \
8626 .expected_attach_type = atype, \
8627 .cookie = (long)(flags), \
8628 .prog_prepare_load_fn = libbpf_prepare_prog_load, \
8629 __VA_ARGS__ \
8630}
8631
8632static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8633static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8634static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8635static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8636static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8637static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8638static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8639
8640static const struct bpf_sec_def section_defs[] = {
8641 SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE | SEC_SLOPPY_PFX),
8642 SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8643 SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8644 SEC_DEF("kprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
8645 SEC_DEF("uprobe/", KPROBE, 0, SEC_NONE),
8646 SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
8647 SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE),
8648 SEC_DEF("kprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
8649 SEC_DEF("kretprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
8650 SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE),
8651 SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX | SEC_DEPRECATED),
8652 SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX),
8653 SEC_DEF("tracepoint/", TRACEPOINT, 0, SEC_NONE, attach_tp),
8654 SEC_DEF("tp/", TRACEPOINT, 0, SEC_NONE, attach_tp),
8655 SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
8656 SEC_DEF("raw_tp/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
8657 SEC_DEF("raw_tracepoint.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
8658 SEC_DEF("raw_tp.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
8659 SEC_DEF("tp_btf/", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
8660 SEC_DEF("fentry/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
8661 SEC_DEF("fmod_ret/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
8662 SEC_DEF("fexit/", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
8663 SEC_DEF("fentry.s/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
8664 SEC_DEF("fmod_ret.s/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
8665 SEC_DEF("fexit.s/", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
8666 SEC_DEF("freplace/", EXT, 0, SEC_ATTACH_BTF, attach_trace),
8667 SEC_DEF("lsm/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
8668 SEC_DEF("lsm.s/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
8669 SEC_DEF("iter/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
8670 SEC_DEF("iter.s/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
8671 SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE),
8672 SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS),
8673 SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
8674 SEC_DEF("xdp_devmap/", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE | SEC_DEPRECATED),
8675 SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS),
8676 SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
8677 SEC_DEF("xdp_cpumap/", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE | SEC_DEPRECATED),
8678 SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS),
8679 SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8680 SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE | SEC_SLOPPY_PFX),
8681 SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE | SEC_SLOPPY_PFX),
8682 SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE | SEC_SLOPPY_PFX),
8683 SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE | SEC_SLOPPY_PFX),
8684 SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE | SEC_SLOPPY_PFX),
8685 SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8686 SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8687 SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX),
8688 SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8689 SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8690 SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8691 SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8692 SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8693 SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8694 SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8695 SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8696 SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8697 SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX),
8698 SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8699 SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8700 SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8701 SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8702 SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8703 SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8704 SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8705 SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8706 SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8707 SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8708 SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8709 SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8710 SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8711 SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8712 SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8713 SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8714 SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8715 SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8716 SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
8717 SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8718};
8719
8720static size_t custom_sec_def_cnt;
8721static struct bpf_sec_def *custom_sec_defs;
8722static struct bpf_sec_def custom_fallback_def;
8723static bool has_custom_fallback_def;
8724
8725static int last_custom_sec_def_handler_id;
8726
8727int libbpf_register_prog_handler(const char *sec,
8728 enum bpf_prog_type prog_type,
8729 enum bpf_attach_type exp_attach_type,
8730 const struct libbpf_prog_handler_opts *opts)
8731{
8732 struct bpf_sec_def *sec_def;
8733
8734 if (!OPTS_VALID(opts, libbpf_prog_handler_opts))
8735 return libbpf_err(-EINVAL);
8736
8737 if (last_custom_sec_def_handler_id == INT_MAX)
8738 return libbpf_err(-E2BIG);
8739
8740 if (sec) {
8741 sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1,
8742 sizeof(*sec_def));
8743 if (!sec_def)
8744 return libbpf_err(-ENOMEM);
8745
8746 custom_sec_defs = sec_def;
8747 sec_def = &custom_sec_defs[custom_sec_def_cnt];
8748 } else {
8749 if (has_custom_fallback_def)
8750 return libbpf_err(-EBUSY);
8751
8752 sec_def = &custom_fallback_def;
8753 }
8754
8755 sec_def->sec = sec ? strdup(sec) : NULL;
8756 if (sec && !sec_def->sec)
8757 return libbpf_err(-ENOMEM);
8758
8759 sec_def->prog_type = prog_type;
8760 sec_def->expected_attach_type = exp_attach_type;
8761 sec_def->cookie = OPTS_GET(opts, cookie, 0);
8762
8763 sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL);
8764 sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL);
8765 sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL);
8766
8767 sec_def->handler_id = ++last_custom_sec_def_handler_id;
8768
8769 if (sec)
8770 custom_sec_def_cnt++;
8771 else
8772 has_custom_fallback_def = true;
8773
8774 return sec_def->handler_id;
8775}
8776
8777int libbpf_unregister_prog_handler(int handler_id)
8778{
8779 struct bpf_sec_def *sec_defs;
8780 int i;
8781
8782 if (handler_id <= 0)
8783 return libbpf_err(-EINVAL);
8784
8785 if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) {
8786 memset(&custom_fallback_def, 0, sizeof(custom_fallback_def));
8787 has_custom_fallback_def = false;
8788 return 0;
8789 }
8790
8791 for (i = 0; i < custom_sec_def_cnt; i++) {
8792 if (custom_sec_defs[i].handler_id == handler_id)
8793 break;
8794 }
8795
8796 if (i == custom_sec_def_cnt)
8797 return libbpf_err(-ENOENT);
8798
8799 free(custom_sec_defs[i].sec);
8800 for (i = i + 1; i < custom_sec_def_cnt; i++)
8801 custom_sec_defs[i - 1] = custom_sec_defs[i];
8802 custom_sec_def_cnt--;
8803
8804
8805 sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
8806 if (sec_defs)
8807 custom_sec_defs = sec_defs;
8808
8809 return 0;
8810}
8811
8812static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name,
8813 bool allow_sloppy)
8814{
8815 size_t len = strlen(sec_def->sec);
8816
8817
8818 if (sec_def->sec[len - 1] == '/') {
8819 if (str_has_pfx(sec_name, sec_def->sec))
8820 return true;
8821 return false;
8822 }
8823
8824
8825
8826
8827 if (sec_def->sec[len - 1] == '+') {
8828 len--;
8829
8830 if (strncmp(sec_name, sec_def->sec, len) != 0)
8831 return false;
8832
8833 if (sec_name[len] == '\0' || sec_name[len] == '/')
8834 return true;
8835 return false;
8836 }
8837
8838
8839
8840
8841
8842
8843 if (allow_sloppy && str_has_pfx(sec_name, sec_def->sec))
8844 return true;
8845
8846
8847
8848
8849 return strcmp(sec_name, sec_def->sec) == 0;
8850}
8851
8852static const struct bpf_sec_def *find_sec_def(const char *sec_name)
8853{
8854 const struct bpf_sec_def *sec_def;
8855 int i, n;
8856 bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME, allow_sloppy;
8857
8858 n = custom_sec_def_cnt;
8859 for (i = 0; i < n; i++) {
8860 sec_def = &custom_sec_defs[i];
8861 if (sec_def_matches(sec_def, sec_name, false))
8862 return sec_def;
8863 }
8864
8865 n = ARRAY_SIZE(section_defs);
8866 for (i = 0; i < n; i++) {
8867 sec_def = §ion_defs[i];
8868 allow_sloppy = (sec_def->cookie & SEC_SLOPPY_PFX) && !strict;
8869 if (sec_def_matches(sec_def, sec_name, allow_sloppy))
8870 return sec_def;
8871 }
8872
8873 if (has_custom_fallback_def)
8874 return &custom_fallback_def;
8875
8876 return NULL;
8877}
8878
8879#define MAX_TYPE_NAME_SIZE 32
8880
8881static char *libbpf_get_type_names(bool attach_type)
8882{
8883 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
8884 char *buf;
8885
8886 buf = malloc(len);
8887 if (!buf)
8888 return NULL;
8889
8890 buf[0] = '\0';
8891
8892 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8893 const struct bpf_sec_def *sec_def = §ion_defs[i];
8894
8895 if (attach_type) {
8896 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
8897 continue;
8898
8899 if (!(sec_def->cookie & SEC_ATTACHABLE))
8900 continue;
8901 }
8902
8903 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
8904 free(buf);
8905 return NULL;
8906 }
8907 strcat(buf, " ");
8908 strcat(buf, section_defs[i].sec);
8909 }
8910
8911 return buf;
8912}
8913
8914int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
8915 enum bpf_attach_type *expected_attach_type)
8916{
8917 const struct bpf_sec_def *sec_def;
8918 char *type_names;
8919
8920 if (!name)
8921 return libbpf_err(-EINVAL);
8922
8923 sec_def = find_sec_def(name);
8924 if (sec_def) {
8925 *prog_type = sec_def->prog_type;
8926 *expected_attach_type = sec_def->expected_attach_type;
8927 return 0;
8928 }
8929
8930 pr_debug("failed to guess program type from ELF section '%s'\n", name);
8931 type_names = libbpf_get_type_names(false);
8932 if (type_names != NULL) {
8933 pr_debug("supported section(type) names are:%s\n", type_names);
8934 free(type_names);
8935 }
8936
8937 return libbpf_err(-ESRCH);
8938}
8939
8940static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
8941 size_t offset)
8942{
8943 struct bpf_map *map;
8944 size_t i;
8945
8946 for (i = 0; i < obj->nr_maps; i++) {
8947 map = &obj->maps[i];
8948 if (!bpf_map__is_struct_ops(map))
8949 continue;
8950 if (map->sec_offset <= offset &&
8951 offset - map->sec_offset < map->def.value_size)
8952 return map;
8953 }
8954
8955 return NULL;
8956}
8957
8958
8959static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
8960 Elf64_Shdr *shdr, Elf_Data *data)
8961{
8962 const struct btf_member *member;
8963 struct bpf_struct_ops *st_ops;
8964 struct bpf_program *prog;
8965 unsigned int shdr_idx;
8966 const struct btf *btf;
8967 struct bpf_map *map;
8968 unsigned int moff, insn_idx;
8969 const char *name;
8970 __u32 member_idx;
8971 Elf64_Sym *sym;
8972 Elf64_Rel *rel;
8973 int i, nrels;
8974
8975 btf = obj->btf;
8976 nrels = shdr->sh_size / shdr->sh_entsize;
8977 for (i = 0; i < nrels; i++) {
8978 rel = elf_rel_by_idx(data, i);
8979 if (!rel) {
8980 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
8981 return -LIBBPF_ERRNO__FORMAT;
8982 }
8983
8984 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
8985 if (!sym) {
8986 pr_warn("struct_ops reloc: symbol %zx not found\n",
8987 (size_t)ELF64_R_SYM(rel->r_info));
8988 return -LIBBPF_ERRNO__FORMAT;
8989 }
8990
8991 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
8992 map = find_struct_ops_map_by_offset(obj, rel->r_offset);
8993 if (!map) {
8994 pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
8995 (size_t)rel->r_offset);
8996 return -EINVAL;
8997 }
8998
8999 moff = rel->r_offset - map->sec_offset;
9000 shdr_idx = sym->st_shndx;
9001 st_ops = map->st_ops;
9002 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
9003 map->name,
9004 (long long)(rel->r_info >> 32),
9005 (long long)sym->st_value,
9006 shdr_idx, (size_t)rel->r_offset,
9007 map->sec_offset, sym->st_name, name);
9008
9009 if (shdr_idx >= SHN_LORESERVE) {
9010 pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
9011 map->name, (size_t)rel->r_offset, shdr_idx);
9012 return -LIBBPF_ERRNO__RELOC;
9013 }
9014 if (sym->st_value % BPF_INSN_SZ) {
9015 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
9016 map->name, (unsigned long long)sym->st_value);
9017 return -LIBBPF_ERRNO__FORMAT;
9018 }
9019 insn_idx = sym->st_value / BPF_INSN_SZ;
9020
9021 member = find_member_by_offset(st_ops->type, moff * 8);
9022 if (!member) {
9023 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
9024 map->name, moff);
9025 return -EINVAL;
9026 }
9027 member_idx = member - btf_members(st_ops->type);
9028 name = btf__name_by_offset(btf, member->name_off);
9029
9030 if (!resolve_func_ptr(btf, member->type, NULL)) {
9031 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
9032 map->name, name);
9033 return -EINVAL;
9034 }
9035
9036 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
9037 if (!prog) {
9038 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
9039 map->name, shdr_idx, name);
9040 return -EINVAL;
9041 }
9042
9043
9044 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
9045 pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n",
9046 map->name, prog->name);
9047 return -EINVAL;
9048 }
9049
9050
9051
9052
9053 if (!prog->attach_btf_id) {
9054 prog->attach_btf_id = st_ops->type_id;
9055 prog->expected_attach_type = member_idx;
9056 }
9057
9058
9059
9060
9061
9062 if (prog->attach_btf_id != st_ops->type_id ||
9063 prog->expected_attach_type != member_idx) {
9064 pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
9065 map->name, prog->name, prog->sec_name, prog->type,
9066 prog->attach_btf_id, prog->expected_attach_type, name);
9067 return -EINVAL;
9068 }
9069
9070 st_ops->progs[member_idx] = prog;
9071 }
9072
9073 return 0;
9074}
9075
9076#define BTF_TRACE_PREFIX "btf_trace_"
9077#define BTF_LSM_PREFIX "bpf_lsm_"
9078#define BTF_ITER_PREFIX "bpf_iter_"
9079#define BTF_MAX_NAME_SIZE 128
9080
9081void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
9082 const char **prefix, int *kind)
9083{
9084 switch (attach_type) {
9085 case BPF_TRACE_RAW_TP:
9086 *prefix = BTF_TRACE_PREFIX;
9087 *kind = BTF_KIND_TYPEDEF;
9088 break;
9089 case BPF_LSM_MAC:
9090 *prefix = BTF_LSM_PREFIX;
9091 *kind = BTF_KIND_FUNC;
9092 break;
9093 case BPF_TRACE_ITER:
9094 *prefix = BTF_ITER_PREFIX;
9095 *kind = BTF_KIND_FUNC;
9096 break;
9097 default:
9098 *prefix = "";
9099 *kind = BTF_KIND_FUNC;
9100 }
9101}
9102
9103static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
9104 const char *name, __u32 kind)
9105{
9106 char btf_type_name[BTF_MAX_NAME_SIZE];
9107 int ret;
9108
9109 ret = snprintf(btf_type_name, sizeof(btf_type_name),
9110 "%s%s", prefix, name);
9111
9112
9113
9114
9115 if (ret < 0 || ret >= sizeof(btf_type_name))
9116 return -ENAMETOOLONG;
9117 return btf__find_by_name_kind(btf, btf_type_name, kind);
9118}
9119
9120static inline int find_attach_btf_id(struct btf *btf, const char *name,
9121 enum bpf_attach_type attach_type)
9122{
9123 const char *prefix;
9124 int kind;
9125
9126 btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
9127 return find_btf_by_prefix_kind(btf, prefix, name, kind);
9128}
9129
9130int libbpf_find_vmlinux_btf_id(const char *name,
9131 enum bpf_attach_type attach_type)
9132{
9133 struct btf *btf;
9134 int err;
9135
9136 btf = btf__load_vmlinux_btf();
9137 err = libbpf_get_error(btf);
9138 if (err) {
9139 pr_warn("vmlinux BTF is not found\n");
9140 return libbpf_err(err);
9141 }
9142
9143 err = find_attach_btf_id(btf, name, attach_type);
9144 if (err <= 0)
9145 pr_warn("%s is not found in vmlinux BTF\n", name);
9146
9147 btf__free(btf);
9148 return libbpf_err(err);
9149}
9150
9151static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
9152{
9153 struct bpf_prog_info info = {};
9154 __u32 info_len = sizeof(info);
9155 struct btf *btf;
9156 int err;
9157
9158 err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len);
9159 if (err) {
9160 pr_warn("failed bpf_obj_get_info_by_fd for FD %d: %d\n",
9161 attach_prog_fd, err);
9162 return err;
9163 }
9164
9165 err = -EINVAL;
9166 if (!info.btf_id) {
9167 pr_warn("The target program doesn't have BTF\n");
9168 goto out;
9169 }
9170 btf = btf__load_from_kernel_by_id(info.btf_id);
9171 err = libbpf_get_error(btf);
9172 if (err) {
9173 pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
9174 goto out;
9175 }
9176 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
9177 btf__free(btf);
9178 if (err <= 0) {
9179 pr_warn("%s is not found in prog's BTF\n", name);
9180 goto out;
9181 }
9182out:
9183 return err;
9184}
9185
9186static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9187 enum bpf_attach_type attach_type,
9188 int *btf_obj_fd, int *btf_type_id)
9189{
9190 int ret, i;
9191
9192 ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
9193 if (ret > 0) {
9194 *btf_obj_fd = 0;
9195 *btf_type_id = ret;
9196 return 0;
9197 }
9198 if (ret != -ENOENT)
9199 return ret;
9200
9201 ret = load_module_btfs(obj);
9202 if (ret)
9203 return ret;
9204
9205 for (i = 0; i < obj->btf_module_cnt; i++) {
9206 const struct module_btf *mod = &obj->btf_modules[i];
9207
9208 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
9209 if (ret > 0) {
9210 *btf_obj_fd = mod->fd;
9211 *btf_type_id = ret;
9212 return 0;
9213 }
9214 if (ret == -ENOENT)
9215 continue;
9216
9217 return ret;
9218 }
9219
9220 return -ESRCH;
9221}
9222
9223static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
9224 int *btf_obj_fd, int *btf_type_id)
9225{
9226 enum bpf_attach_type attach_type = prog->expected_attach_type;
9227 __u32 attach_prog_fd = prog->attach_prog_fd;
9228 int err = 0;
9229
9230
9231 if (attach_prog_fd) {
9232 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9233 if (err < 0) {
9234 pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9235 attach_prog_fd, attach_name, err);
9236 return err;
9237 }
9238 *btf_obj_fd = 0;
9239 *btf_type_id = err;
9240 return 0;
9241 }
9242
9243
9244 if (prog->obj->gen_loader) {
9245 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
9246 *btf_obj_fd = 0;
9247 *btf_type_id = 1;
9248 } else {
9249 err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
9250 }
9251 if (err) {
9252 pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
9253 return err;
9254 }
9255 return 0;
9256}
9257
9258int libbpf_attach_type_by_name(const char *name,
9259 enum bpf_attach_type *attach_type)
9260{
9261 char *type_names;
9262 const struct bpf_sec_def *sec_def;
9263
9264 if (!name)
9265 return libbpf_err(-EINVAL);
9266
9267 sec_def = find_sec_def(name);
9268 if (!sec_def) {
9269 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9270 type_names = libbpf_get_type_names(true);
9271 if (type_names != NULL) {
9272 pr_debug("attachable section(type) names are:%s\n", type_names);
9273 free(type_names);
9274 }
9275
9276 return libbpf_err(-EINVAL);
9277 }
9278
9279 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
9280 return libbpf_err(-EINVAL);
9281 if (!(sec_def->cookie & SEC_ATTACHABLE))
9282 return libbpf_err(-EINVAL);
9283
9284 *attach_type = sec_def->expected_attach_type;
9285 return 0;
9286}
9287
9288int bpf_map__fd(const struct bpf_map *map)
9289{
9290 return map ? map->fd : libbpf_err(-EINVAL);
9291}
9292
9293const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
9294{
9295 return map ? &map->def : libbpf_err_ptr(-EINVAL);
9296}
9297
9298static bool map_uses_real_name(const struct bpf_map *map)
9299{
9300
9301
9302
9303
9304
9305
9306 if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
9307 return true;
9308 if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
9309 return true;
9310 return false;
9311}
9312
9313const char *bpf_map__name(const struct bpf_map *map)
9314{
9315 if (!map)
9316 return NULL;
9317
9318 if (map_uses_real_name(map))
9319 return map->real_name;
9320
9321 return map->name;
9322}
9323
9324enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9325{
9326 return map->def.type;
9327}
9328
9329int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9330{
9331 if (map->fd >= 0)
9332 return libbpf_err(-EBUSY);
9333 map->def.type = type;
9334 return 0;
9335}
9336
9337__u32 bpf_map__map_flags(const struct bpf_map *map)
9338{
9339 return map->def.map_flags;
9340}
9341
9342int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
9343{
9344 if (map->fd >= 0)
9345 return libbpf_err(-EBUSY);
9346 map->def.map_flags = flags;
9347 return 0;
9348}
9349
9350__u64 bpf_map__map_extra(const struct bpf_map *map)
9351{
9352 return map->map_extra;
9353}
9354
9355int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
9356{
9357 if (map->fd >= 0)
9358 return libbpf_err(-EBUSY);
9359 map->map_extra = map_extra;
9360 return 0;
9361}
9362
9363__u32 bpf_map__numa_node(const struct bpf_map *map)
9364{
9365 return map->numa_node;
9366}
9367
9368int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
9369{
9370 if (map->fd >= 0)
9371 return libbpf_err(-EBUSY);
9372 map->numa_node = numa_node;
9373 return 0;
9374}
9375
9376__u32 bpf_map__key_size(const struct bpf_map *map)
9377{
9378 return map->def.key_size;
9379}
9380
9381int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
9382{
9383 if (map->fd >= 0)
9384 return libbpf_err(-EBUSY);
9385 map->def.key_size = size;
9386 return 0;
9387}
9388
9389__u32 bpf_map__value_size(const struct bpf_map *map)
9390{
9391 return map->def.value_size;
9392}
9393
9394int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
9395{
9396 if (map->fd >= 0)
9397 return libbpf_err(-EBUSY);
9398 map->def.value_size = size;
9399 return 0;
9400}
9401
9402__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
9403{
9404 return map ? map->btf_key_type_id : 0;
9405}
9406
9407__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
9408{
9409 return map ? map->btf_value_type_id : 0;
9410}
9411
9412int bpf_map__set_priv(struct bpf_map *map, void *priv,
9413 bpf_map_clear_priv_t clear_priv)
9414{
9415 if (!map)
9416 return libbpf_err(-EINVAL);
9417
9418 if (map->priv) {
9419 if (map->clear_priv)
9420 map->clear_priv(map, map->priv);
9421 }
9422
9423 map->priv = priv;
9424 map->clear_priv = clear_priv;
9425 return 0;
9426}
9427
9428void *bpf_map__priv(const struct bpf_map *map)
9429{
9430 return map ? map->priv : libbpf_err_ptr(-EINVAL);
9431}
9432
9433int bpf_map__set_initial_value(struct bpf_map *map,
9434 const void *data, size_t size)
9435{
9436 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
9437 size != map->def.value_size || map->fd >= 0)
9438 return libbpf_err(-EINVAL);
9439
9440 memcpy(map->mmaped, data, size);
9441 return 0;
9442}
9443
9444const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
9445{
9446 if (!map->mmaped)
9447 return NULL;
9448 *psize = map->def.value_size;
9449 return map->mmaped;
9450}
9451
9452bool bpf_map__is_offload_neutral(const struct bpf_map *map)
9453{
9454 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
9455}
9456
9457bool bpf_map__is_internal(const struct bpf_map *map)
9458{
9459 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
9460}
9461
9462__u32 bpf_map__ifindex(const struct bpf_map *map)
9463{
9464 return map->map_ifindex;
9465}
9466
9467int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
9468{
9469 if (map->fd >= 0)
9470 return libbpf_err(-EBUSY);
9471 map->map_ifindex = ifindex;
9472 return 0;
9473}
9474
9475int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
9476{
9477 if (!bpf_map_type__is_map_in_map(map->def.type)) {
9478 pr_warn("error: unsupported map type\n");
9479 return libbpf_err(-EINVAL);
9480 }
9481 if (map->inner_map_fd != -1) {
9482 pr_warn("error: inner_map_fd already specified\n");
9483 return libbpf_err(-EINVAL);
9484 }
9485 if (map->inner_map) {
9486 bpf_map__destroy(map->inner_map);
9487 zfree(&map->inner_map);
9488 }
9489 map->inner_map_fd = fd;
9490 return 0;
9491}
9492
9493static struct bpf_map *
9494__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
9495{
9496 ssize_t idx;
9497 struct bpf_map *s, *e;
9498
9499 if (!obj || !obj->maps)
9500 return errno = EINVAL, NULL;
9501
9502 s = obj->maps;
9503 e = obj->maps + obj->nr_maps;
9504
9505 if ((m < s) || (m >= e)) {
9506 pr_warn("error in %s: map handler doesn't belong to object\n",
9507 __func__);
9508 return errno = EINVAL, NULL;
9509 }
9510
9511 idx = (m - obj->maps) + i;
9512 if (idx >= obj->nr_maps || idx < 0)
9513 return NULL;
9514 return &obj->maps[idx];
9515}
9516
9517struct bpf_map *
9518bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
9519{
9520 return bpf_object__next_map(obj, prev);
9521}
9522
9523struct bpf_map *
9524bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
9525{
9526 if (prev == NULL)
9527 return obj->maps;
9528
9529 return __bpf_map__iter(prev, obj, 1);
9530}
9531
9532struct bpf_map *
9533bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
9534{
9535 return bpf_object__prev_map(obj, next);
9536}
9537
9538struct bpf_map *
9539bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
9540{
9541 if (next == NULL) {
9542 if (!obj->nr_maps)
9543 return NULL;
9544 return obj->maps + obj->nr_maps - 1;
9545 }
9546
9547 return __bpf_map__iter(next, obj, -1);
9548}
9549
9550struct bpf_map *
9551bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
9552{
9553 struct bpf_map *pos;
9554
9555 bpf_object__for_each_map(pos, obj) {
9556
9557
9558
9559
9560 if (name[0] == '.') {
9561 if (pos->real_name && strcmp(pos->real_name, name) == 0)
9562 return pos;
9563 continue;
9564 }
9565
9566 if (map_uses_real_name(pos)) {
9567 if (strcmp(pos->real_name, name) == 0)
9568 return pos;
9569 continue;
9570 }
9571 if (strcmp(pos->name, name) == 0)
9572 return pos;
9573 }
9574 return errno = ENOENT, NULL;
9575}
9576
9577int
9578bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
9579{
9580 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
9581}
9582
9583struct bpf_map *
9584bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
9585{
9586 return libbpf_err_ptr(-ENOTSUP);
9587}
9588
9589long libbpf_get_error(const void *ptr)
9590{
9591 if (!IS_ERR_OR_NULL(ptr))
9592 return 0;
9593
9594 if (IS_ERR(ptr))
9595 errno = -PTR_ERR(ptr);
9596
9597
9598
9599
9600
9601
9602 return -errno;
9603}
9604
9605__attribute__((alias("bpf_prog_load_xattr2")))
9606int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
9607 struct bpf_object **pobj, int *prog_fd);
9608
9609static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr,
9610 struct bpf_object **pobj, int *prog_fd)
9611{
9612 struct bpf_object_open_attr open_attr = {};
9613 struct bpf_program *prog, *first_prog = NULL;
9614 struct bpf_object *obj;
9615 struct bpf_map *map;
9616 int err;
9617
9618 if (!attr)
9619 return libbpf_err(-EINVAL);
9620 if (!attr->file)
9621 return libbpf_err(-EINVAL);
9622
9623 open_attr.file = attr->file;
9624 open_attr.prog_type = attr->prog_type;
9625
9626 obj = __bpf_object__open_xattr(&open_attr, 0);
9627 err = libbpf_get_error(obj);
9628 if (err)
9629 return libbpf_err(-ENOENT);
9630
9631 bpf_object__for_each_program(prog, obj) {
9632 enum bpf_attach_type attach_type = attr->expected_attach_type;
9633
9634
9635
9636
9637
9638 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
9639 bpf_program__set_type(prog, attr->prog_type);
9640 bpf_program__set_expected_attach_type(prog,
9641 attach_type);
9642 }
9643 if (bpf_program__type(prog) == BPF_PROG_TYPE_UNSPEC) {
9644
9645
9646
9647
9648 bpf_object__close(obj);
9649 return libbpf_err(-EINVAL);
9650 }
9651
9652 prog->prog_ifindex = attr->ifindex;
9653 prog->log_level = attr->log_level;
9654 prog->prog_flags |= attr->prog_flags;
9655 if (!first_prog)
9656 first_prog = prog;
9657 }
9658
9659 bpf_object__for_each_map(map, obj) {
9660 if (map->def.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
9661 map->map_ifindex = attr->ifindex;
9662 }
9663
9664 if (!first_prog) {
9665 pr_warn("object file doesn't contain bpf program\n");
9666 bpf_object__close(obj);
9667 return libbpf_err(-ENOENT);
9668 }
9669
9670 err = bpf_object__load(obj);
9671 if (err) {
9672 bpf_object__close(obj);
9673 return libbpf_err(err);
9674 }
9675
9676 *pobj = obj;
9677 *prog_fd = bpf_program__fd(first_prog);
9678 return 0;
9679}
9680
9681COMPAT_VERSION(bpf_prog_load_deprecated, bpf_prog_load, LIBBPF_0.0.1)
9682int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type,
9683 struct bpf_object **pobj, int *prog_fd)
9684{
9685 struct bpf_prog_load_attr attr;
9686
9687 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
9688 attr.file = file;
9689 attr.prog_type = type;
9690 attr.expected_attach_type = 0;
9691
9692 return bpf_prog_load_xattr2(&attr, pobj, prog_fd);
9693}
9694
9695struct bpf_link {
9696 int (*detach)(struct bpf_link *link);
9697 void (*dealloc)(struct bpf_link *link);
9698 char *pin_path;
9699 int fd;
9700 bool disconnected;
9701};
9702
9703
9704int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
9705{
9706 int ret;
9707
9708 ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
9709 return libbpf_err_errno(ret);
9710}
9711
9712
9713
9714
9715
9716
9717
9718
9719
9720
9721
9722void bpf_link__disconnect(struct bpf_link *link)
9723{
9724 link->disconnected = true;
9725}
9726
9727int bpf_link__destroy(struct bpf_link *link)
9728{
9729 int err = 0;
9730
9731 if (IS_ERR_OR_NULL(link))
9732 return 0;
9733
9734 if (!link->disconnected && link->detach)
9735 err = link->detach(link);
9736 if (link->pin_path)
9737 free(link->pin_path);
9738 if (link->dealloc)
9739 link->dealloc(link);
9740 else
9741 free(link);
9742
9743 return libbpf_err(err);
9744}
9745
9746int bpf_link__fd(const struct bpf_link *link)
9747{
9748 return link->fd;
9749}
9750
9751const char *bpf_link__pin_path(const struct bpf_link *link)
9752{
9753 return link->pin_path;
9754}
9755
9756static int bpf_link__detach_fd(struct bpf_link *link)
9757{
9758 return libbpf_err_errno(close(link->fd));
9759}
9760
9761struct bpf_link *bpf_link__open(const char *path)
9762{
9763 struct bpf_link *link;
9764 int fd;
9765
9766 fd = bpf_obj_get(path);
9767 if (fd < 0) {
9768 fd = -errno;
9769 pr_warn("failed to open link at %s: %d\n", path, fd);
9770 return libbpf_err_ptr(fd);
9771 }
9772
9773 link = calloc(1, sizeof(*link));
9774 if (!link) {
9775 close(fd);
9776 return libbpf_err_ptr(-ENOMEM);
9777 }
9778 link->detach = &bpf_link__detach_fd;
9779 link->fd = fd;
9780
9781 link->pin_path = strdup(path);
9782 if (!link->pin_path) {
9783 bpf_link__destroy(link);
9784 return libbpf_err_ptr(-ENOMEM);
9785 }
9786
9787 return link;
9788}
9789
9790int bpf_link__detach(struct bpf_link *link)
9791{
9792 return bpf_link_detach(link->fd) ? -errno : 0;
9793}
9794
9795int bpf_link__pin(struct bpf_link *link, const char *path)
9796{
9797 int err;
9798
9799 if (link->pin_path)
9800 return libbpf_err(-EBUSY);
9801 err = make_parent_dir(path);
9802 if (err)
9803 return libbpf_err(err);
9804 err = check_path(path);
9805 if (err)
9806 return libbpf_err(err);
9807
9808 link->pin_path = strdup(path);
9809 if (!link->pin_path)
9810 return libbpf_err(-ENOMEM);
9811
9812 if (bpf_obj_pin(link->fd, link->pin_path)) {
9813 err = -errno;
9814 zfree(&link->pin_path);
9815 return libbpf_err(err);
9816 }
9817
9818 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
9819 return 0;
9820}
9821
9822int bpf_link__unpin(struct bpf_link *link)
9823{
9824 int err;
9825
9826 if (!link->pin_path)
9827 return libbpf_err(-EINVAL);
9828
9829 err = unlink(link->pin_path);
9830 if (err != 0)
9831 return -errno;
9832
9833 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
9834 zfree(&link->pin_path);
9835 return 0;
9836}
9837
9838struct bpf_link_perf {
9839 struct bpf_link link;
9840 int perf_event_fd;
9841
9842 char *legacy_probe_name;
9843 bool legacy_is_kprobe;
9844 bool legacy_is_retprobe;
9845};
9846
9847static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
9848static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
9849
9850static int bpf_link_perf_detach(struct bpf_link *link)
9851{
9852 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
9853 int err = 0;
9854
9855 if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
9856 err = -errno;
9857
9858 if (perf_link->perf_event_fd != link->fd)
9859 close(perf_link->perf_event_fd);
9860 close(link->fd);
9861
9862
9863 if (perf_link->legacy_probe_name) {
9864 if (perf_link->legacy_is_kprobe) {
9865 err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
9866 perf_link->legacy_is_retprobe);
9867 } else {
9868 err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
9869 perf_link->legacy_is_retprobe);
9870 }
9871 }
9872
9873 return err;
9874}
9875
9876static void bpf_link_perf_dealloc(struct bpf_link *link)
9877{
9878 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
9879
9880 free(perf_link->legacy_probe_name);
9881 free(perf_link);
9882}
9883
9884struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
9885 const struct bpf_perf_event_opts *opts)
9886{
9887 char errmsg[STRERR_BUFSIZE];
9888 struct bpf_link_perf *link;
9889 int prog_fd, link_fd = -1, err;
9890
9891 if (!OPTS_VALID(opts, bpf_perf_event_opts))
9892 return libbpf_err_ptr(-EINVAL);
9893
9894 if (pfd < 0) {
9895 pr_warn("prog '%s': invalid perf event FD %d\n",
9896 prog->name, pfd);
9897 return libbpf_err_ptr(-EINVAL);
9898 }
9899 prog_fd = bpf_program__fd(prog);
9900 if (prog_fd < 0) {
9901 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
9902 prog->name);
9903 return libbpf_err_ptr(-EINVAL);
9904 }
9905
9906 link = calloc(1, sizeof(*link));
9907 if (!link)
9908 return libbpf_err_ptr(-ENOMEM);
9909 link->link.detach = &bpf_link_perf_detach;
9910 link->link.dealloc = &bpf_link_perf_dealloc;
9911 link->perf_event_fd = pfd;
9912
9913 if (kernel_supports(prog->obj, FEAT_PERF_LINK)) {
9914 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
9915 .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
9916
9917 link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
9918 if (link_fd < 0) {
9919 err = -errno;
9920 pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n",
9921 prog->name, pfd,
9922 err, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9923 goto err_out;
9924 }
9925 link->link.fd = link_fd;
9926 } else {
9927 if (OPTS_GET(opts, bpf_cookie, 0)) {
9928 pr_warn("prog '%s': user context value is not supported\n", prog->name);
9929 err = -EOPNOTSUPP;
9930 goto err_out;
9931 }
9932
9933 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
9934 err = -errno;
9935 pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
9936 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9937 if (err == -EPROTO)
9938 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
9939 prog->name, pfd);
9940 goto err_out;
9941 }
9942 link->link.fd = pfd;
9943 }
9944 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
9945 err = -errno;
9946 pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
9947 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9948 goto err_out;
9949 }
9950
9951 return &link->link;
9952err_out:
9953 if (link_fd >= 0)
9954 close(link_fd);
9955 free(link);
9956 return libbpf_err_ptr(err);
9957}
9958
9959struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd)
9960{
9961 return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
9962}
9963
9964
9965
9966
9967
9968
9969static int parse_uint_from_file(const char *file, const char *fmt)
9970{
9971 char buf[STRERR_BUFSIZE];
9972 int err, ret;
9973 FILE *f;
9974
9975 f = fopen(file, "r");
9976 if (!f) {
9977 err = -errno;
9978 pr_debug("failed to open '%s': %s\n", file,
9979 libbpf_strerror_r(err, buf, sizeof(buf)));
9980 return err;
9981 }
9982 err = fscanf(f, fmt, &ret);
9983 if (err != 1) {
9984 err = err == EOF ? -EIO : -errno;
9985 pr_debug("failed to parse '%s': %s\n", file,
9986 libbpf_strerror_r(err, buf, sizeof(buf)));
9987 fclose(f);
9988 return err;
9989 }
9990 fclose(f);
9991 return ret;
9992}
9993
9994static int determine_kprobe_perf_type(void)
9995{
9996 const char *file = "/sys/bus/event_source/devices/kprobe/type";
9997
9998 return parse_uint_from_file(file, "%d\n");
9999}
10000
10001static int determine_uprobe_perf_type(void)
10002{
10003 const char *file = "/sys/bus/event_source/devices/uprobe/type";
10004
10005 return parse_uint_from_file(file, "%d\n");
10006}
10007
10008static int determine_kprobe_retprobe_bit(void)
10009{
10010 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
10011
10012 return parse_uint_from_file(file, "config:%d\n");
10013}
10014
10015static int determine_uprobe_retprobe_bit(void)
10016{
10017 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
10018
10019 return parse_uint_from_file(file, "config:%d\n");
10020}
10021
10022#define PERF_UPROBE_REF_CTR_OFFSET_BITS 32
10023#define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32
10024
10025static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
10026 uint64_t offset, int pid, size_t ref_ctr_off)
10027{
10028 struct perf_event_attr attr = {};
10029 char errmsg[STRERR_BUFSIZE];
10030 int type, pfd, err;
10031
10032 if (ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
10033 return -EINVAL;
10034
10035 type = uprobe ? determine_uprobe_perf_type()
10036 : determine_kprobe_perf_type();
10037 if (type < 0) {
10038 pr_warn("failed to determine %s perf type: %s\n",
10039 uprobe ? "uprobe" : "kprobe",
10040 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
10041 return type;
10042 }
10043 if (retprobe) {
10044 int bit = uprobe ? determine_uprobe_retprobe_bit()
10045 : determine_kprobe_retprobe_bit();
10046
10047 if (bit < 0) {
10048 pr_warn("failed to determine %s retprobe bit: %s\n",
10049 uprobe ? "uprobe" : "kprobe",
10050 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
10051 return bit;
10052 }
10053 attr.config |= 1 << bit;
10054 }
10055 attr.size = sizeof(attr);
10056 attr.type = type;
10057 attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
10058 attr.config1 = ptr_to_u64(name);
10059 attr.config2 = offset;
10060
10061
10062 pfd = syscall(__NR_perf_event_open, &attr,
10063 pid < 0 ? -1 : pid ,
10064 pid == -1 ? 0 : -1 ,
10065 -1 , PERF_FLAG_FD_CLOEXEC);
10066 if (pfd < 0) {
10067 err = -errno;
10068 pr_warn("%s perf_event_open() failed: %s\n",
10069 uprobe ? "uprobe" : "kprobe",
10070 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10071 return err;
10072 }
10073 return pfd;
10074}
10075
10076static int append_to_file(const char *file, const char *fmt, ...)
10077{
10078 int fd, n, err = 0;
10079 va_list ap;
10080
10081 fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
10082 if (fd < 0)
10083 return -errno;
10084
10085 va_start(ap, fmt);
10086 n = vdprintf(fd, fmt, ap);
10087 va_end(ap);
10088
10089 if (n < 0)
10090 err = -errno;
10091
10092 close(fd);
10093 return err;
10094}
10095
10096static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
10097 const char *kfunc_name, size_t offset)
10098{
10099 static int index = 0;
10100
10101 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
10102 __sync_fetch_and_add(&index, 1));
10103}
10104
10105static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
10106 const char *kfunc_name, size_t offset)
10107{
10108 const char *file = "/sys/kernel/debug/tracing/kprobe_events";
10109
10110 return append_to_file(file, "%c:%s/%s %s+0x%zx",
10111 retprobe ? 'r' : 'p',
10112 retprobe ? "kretprobes" : "kprobes",
10113 probe_name, kfunc_name, offset);
10114}
10115
10116static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
10117{
10118 const char *file = "/sys/kernel/debug/tracing/kprobe_events";
10119
10120 return append_to_file(file, "-:%s/%s", retprobe ? "kretprobes" : "kprobes", probe_name);
10121}
10122
10123static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
10124{
10125 char file[256];
10126
10127 snprintf(file, sizeof(file),
10128 "/sys/kernel/debug/tracing/events/%s/%s/id",
10129 retprobe ? "kretprobes" : "kprobes", probe_name);
10130
10131 return parse_uint_from_file(file, "%d\n");
10132}
10133
10134static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
10135 const char *kfunc_name, size_t offset, int pid)
10136{
10137 struct perf_event_attr attr = {};
10138 char errmsg[STRERR_BUFSIZE];
10139 int type, pfd, err;
10140
10141 err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
10142 if (err < 0) {
10143 pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
10144 kfunc_name, offset,
10145 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10146 return err;
10147 }
10148 type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
10149 if (type < 0) {
10150 pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
10151 kfunc_name, offset,
10152 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
10153 return type;
10154 }
10155 attr.size = sizeof(attr);
10156 attr.config = type;
10157 attr.type = PERF_TYPE_TRACEPOINT;
10158
10159 pfd = syscall(__NR_perf_event_open, &attr,
10160 pid < 0 ? -1 : pid,
10161 pid == -1 ? 0 : -1,
10162 -1 , PERF_FLAG_FD_CLOEXEC);
10163 if (pfd < 0) {
10164 err = -errno;
10165 pr_warn("legacy kprobe perf_event_open() failed: %s\n",
10166 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10167 return err;
10168 }
10169 return pfd;
10170}
10171
10172struct bpf_link *
10173bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
10174 const char *func_name,
10175 const struct bpf_kprobe_opts *opts)
10176{
10177 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
10178 char errmsg[STRERR_BUFSIZE];
10179 char *legacy_probe = NULL;
10180 struct bpf_link *link;
10181 size_t offset;
10182 bool retprobe, legacy;
10183 int pfd, err;
10184
10185 if (!OPTS_VALID(opts, bpf_kprobe_opts))
10186 return libbpf_err_ptr(-EINVAL);
10187
10188 retprobe = OPTS_GET(opts, retprobe, false);
10189 offset = OPTS_GET(opts, offset, 0);
10190 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
10191
10192 legacy = determine_kprobe_perf_type() < 0;
10193 if (!legacy) {
10194 pfd = perf_event_open_probe(false , retprobe,
10195 func_name, offset,
10196 -1 , 0 );
10197 } else {
10198 char probe_name[256];
10199
10200 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
10201 func_name, offset);
10202
10203 legacy_probe = strdup(probe_name);
10204 if (!legacy_probe)
10205 return libbpf_err_ptr(-ENOMEM);
10206
10207 pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
10208 offset, -1 );
10209 }
10210 if (pfd < 0) {
10211 err = -errno;
10212 pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
10213 prog->name, retprobe ? "kretprobe" : "kprobe",
10214 func_name, offset,
10215 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10216 goto err_out;
10217 }
10218 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
10219 err = libbpf_get_error(link);
10220 if (err) {
10221 close(pfd);
10222 pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
10223 prog->name, retprobe ? "kretprobe" : "kprobe",
10224 func_name, offset,
10225 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10226 goto err_out;
10227 }
10228 if (legacy) {
10229 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10230
10231 perf_link->legacy_probe_name = legacy_probe;
10232 perf_link->legacy_is_kprobe = true;
10233 perf_link->legacy_is_retprobe = retprobe;
10234 }
10235
10236 return link;
10237err_out:
10238 free(legacy_probe);
10239 return libbpf_err_ptr(err);
10240}
10241
10242struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
10243 bool retprobe,
10244 const char *func_name)
10245{
10246 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
10247 .retprobe = retprobe,
10248 );
10249
10250 return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
10251}
10252
10253
10254static bool glob_match(const char *str, const char *pat)
10255{
10256 while (*str && *pat && *pat != '*') {
10257 if (*pat == '?') {
10258 str++;
10259 pat++;
10260 continue;
10261 }
10262 if (*str != *pat)
10263 return false;
10264 str++;
10265 pat++;
10266 }
10267
10268 if (*pat == '*') {
10269 while (*pat == '*')
10270 pat++;
10271 if (!*pat)
10272 return true;
10273 while (*str)
10274 if (glob_match(str++, pat))
10275 return true;
10276 }
10277 return !*str && !*pat;
10278}
10279
10280struct kprobe_multi_resolve {
10281 const char *pattern;
10282 unsigned long *addrs;
10283 size_t cap;
10284 size_t cnt;
10285};
10286
10287static int
10288resolve_kprobe_multi_cb(unsigned long long sym_addr, char sym_type,
10289 const char *sym_name, void *ctx)
10290{
10291 struct kprobe_multi_resolve *res = ctx;
10292 int err;
10293
10294 if (!glob_match(sym_name, res->pattern))
10295 return 0;
10296
10297 err = libbpf_ensure_mem((void **) &res->addrs, &res->cap, sizeof(unsigned long),
10298 res->cnt + 1);
10299 if (err)
10300 return err;
10301
10302 res->addrs[res->cnt++] = (unsigned long) sym_addr;
10303 return 0;
10304}
10305
10306struct bpf_link *
10307bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
10308 const char *pattern,
10309 const struct bpf_kprobe_multi_opts *opts)
10310{
10311 LIBBPF_OPTS(bpf_link_create_opts, lopts);
10312 struct kprobe_multi_resolve res = {
10313 .pattern = pattern,
10314 };
10315 struct bpf_link *link = NULL;
10316 char errmsg[STRERR_BUFSIZE];
10317 const unsigned long *addrs;
10318 int err, link_fd, prog_fd;
10319 const __u64 *cookies;
10320 const char **syms;
10321 bool retprobe;
10322 size_t cnt;
10323
10324 if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
10325 return libbpf_err_ptr(-EINVAL);
10326
10327 syms = OPTS_GET(opts, syms, false);
10328 addrs = OPTS_GET(opts, addrs, false);
10329 cnt = OPTS_GET(opts, cnt, false);
10330 cookies = OPTS_GET(opts, cookies, false);
10331
10332 if (!pattern && !addrs && !syms)
10333 return libbpf_err_ptr(-EINVAL);
10334 if (pattern && (addrs || syms || cookies || cnt))
10335 return libbpf_err_ptr(-EINVAL);
10336 if (!pattern && !cnt)
10337 return libbpf_err_ptr(-EINVAL);
10338 if (addrs && syms)
10339 return libbpf_err_ptr(-EINVAL);
10340
10341 if (pattern) {
10342 err = libbpf_kallsyms_parse(resolve_kprobe_multi_cb, &res);
10343 if (err)
10344 goto error;
10345 if (!res.cnt) {
10346 err = -ENOENT;
10347 goto error;
10348 }
10349 addrs = res.addrs;
10350 cnt = res.cnt;
10351 }
10352
10353 retprobe = OPTS_GET(opts, retprobe, false);
10354
10355 lopts.kprobe_multi.syms = syms;
10356 lopts.kprobe_multi.addrs = addrs;
10357 lopts.kprobe_multi.cookies = cookies;
10358 lopts.kprobe_multi.cnt = cnt;
10359 lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0;
10360
10361 link = calloc(1, sizeof(*link));
10362 if (!link) {
10363 err = -ENOMEM;
10364 goto error;
10365 }
10366 link->detach = &bpf_link__detach_fd;
10367
10368 prog_fd = bpf_program__fd(prog);
10369 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts);
10370 if (link_fd < 0) {
10371 err = -errno;
10372 pr_warn("prog '%s': failed to attach: %s\n",
10373 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10374 goto error;
10375 }
10376 link->fd = link_fd;
10377 free(res.addrs);
10378 return link;
10379
10380error:
10381 free(link);
10382 free(res.addrs);
10383 return libbpf_err_ptr(err);
10384}
10385
10386static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
10387{
10388 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
10389 unsigned long offset = 0;
10390 const char *func_name;
10391 char *func;
10392 int n;
10393
10394 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
10395 if (opts.retprobe)
10396 func_name = prog->sec_name + sizeof("kretprobe/") - 1;
10397 else
10398 func_name = prog->sec_name + sizeof("kprobe/") - 1;
10399
10400 n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
10401 if (n < 1) {
10402 pr_warn("kprobe name is invalid: %s\n", func_name);
10403 return -EINVAL;
10404 }
10405 if (opts.retprobe && offset != 0) {
10406 free(func);
10407 pr_warn("kretprobes do not support offset specification\n");
10408 return -EINVAL;
10409 }
10410
10411 opts.offset = offset;
10412 *link = bpf_program__attach_kprobe_opts(prog, func, &opts);
10413 free(func);
10414 return libbpf_get_error(*link);
10415}
10416
10417static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
10418{
10419 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
10420 const char *spec;
10421 char *pattern;
10422 int n;
10423
10424 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
10425 if (opts.retprobe)
10426 spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
10427 else
10428 spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
10429
10430 n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
10431 if (n < 1) {
10432 pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
10433 return -EINVAL;
10434 }
10435
10436 *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
10437 free(pattern);
10438 return libbpf_get_error(*link);
10439}
10440
10441static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
10442 const char *binary_path, uint64_t offset)
10443{
10444 int i;
10445
10446 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
10447
10448
10449 for (i = 0; buf[i]; i++) {
10450 if (!isalnum(buf[i]))
10451 buf[i] = '_';
10452 }
10453}
10454
10455static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
10456 const char *binary_path, size_t offset)
10457{
10458 const char *file = "/sys/kernel/debug/tracing/uprobe_events";
10459
10460 return append_to_file(file, "%c:%s/%s %s:0x%zx",
10461 retprobe ? 'r' : 'p',
10462 retprobe ? "uretprobes" : "uprobes",
10463 probe_name, binary_path, offset);
10464}
10465
10466static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
10467{
10468 const char *file = "/sys/kernel/debug/tracing/uprobe_events";
10469
10470 return append_to_file(file, "-:%s/%s", retprobe ? "uretprobes" : "uprobes", probe_name);
10471}
10472
10473static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
10474{
10475 char file[512];
10476
10477 snprintf(file, sizeof(file),
10478 "/sys/kernel/debug/tracing/events/%s/%s/id",
10479 retprobe ? "uretprobes" : "uprobes", probe_name);
10480
10481 return parse_uint_from_file(file, "%d\n");
10482}
10483
10484static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
10485 const char *binary_path, size_t offset, int pid)
10486{
10487 struct perf_event_attr attr;
10488 int type, pfd, err;
10489
10490 err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
10491 if (err < 0) {
10492 pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
10493 binary_path, (size_t)offset, err);
10494 return err;
10495 }
10496 type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
10497 if (type < 0) {
10498 pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
10499 binary_path, offset, err);
10500 return type;
10501 }
10502
10503 memset(&attr, 0, sizeof(attr));
10504 attr.size = sizeof(attr);
10505 attr.config = type;
10506 attr.type = PERF_TYPE_TRACEPOINT;
10507
10508 pfd = syscall(__NR_perf_event_open, &attr,
10509 pid < 0 ? -1 : pid,
10510 pid == -1 ? 0 : -1,
10511 -1 , PERF_FLAG_FD_CLOEXEC);
10512 if (pfd < 0) {
10513 err = -errno;
10514 pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
10515 return err;
10516 }
10517 return pfd;
10518}
10519
10520LIBBPF_API struct bpf_link *
10521bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
10522 const char *binary_path, size_t func_offset,
10523 const struct bpf_uprobe_opts *opts)
10524{
10525 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
10526 char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
10527 struct bpf_link *link;
10528 size_t ref_ctr_off;
10529 int pfd, err;
10530 bool retprobe, legacy;
10531
10532 if (!OPTS_VALID(opts, bpf_uprobe_opts))
10533 return libbpf_err_ptr(-EINVAL);
10534
10535 retprobe = OPTS_GET(opts, retprobe, false);
10536 ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
10537 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
10538
10539 legacy = determine_uprobe_perf_type() < 0;
10540 if (!legacy) {
10541 pfd = perf_event_open_probe(true , retprobe, binary_path,
10542 func_offset, pid, ref_ctr_off);
10543 } else {
10544 char probe_name[512];
10545
10546 if (ref_ctr_off)
10547 return libbpf_err_ptr(-EINVAL);
10548
10549 gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
10550 binary_path, func_offset);
10551
10552 legacy_probe = strdup(probe_name);
10553 if (!legacy_probe)
10554 return libbpf_err_ptr(-ENOMEM);
10555
10556 pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
10557 binary_path, func_offset, pid);
10558 }
10559 if (pfd < 0) {
10560 err = -errno;
10561 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
10562 prog->name, retprobe ? "uretprobe" : "uprobe",
10563 binary_path, func_offset,
10564 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10565 goto err_out;
10566 }
10567
10568 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
10569 err = libbpf_get_error(link);
10570 if (err) {
10571 close(pfd);
10572 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
10573 prog->name, retprobe ? "uretprobe" : "uprobe",
10574 binary_path, func_offset,
10575 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10576 goto err_out;
10577 }
10578 if (legacy) {
10579 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10580
10581 perf_link->legacy_probe_name = legacy_probe;
10582 perf_link->legacy_is_kprobe = false;
10583 perf_link->legacy_is_retprobe = retprobe;
10584 }
10585 return link;
10586err_out:
10587 free(legacy_probe);
10588 return libbpf_err_ptr(err);
10589
10590}
10591
10592struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
10593 bool retprobe, pid_t pid,
10594 const char *binary_path,
10595 size_t func_offset)
10596{
10597 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe);
10598
10599 return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
10600}
10601
10602static int determine_tracepoint_id(const char *tp_category,
10603 const char *tp_name)
10604{
10605 char file[PATH_MAX];
10606 int ret;
10607
10608 ret = snprintf(file, sizeof(file),
10609 "/sys/kernel/debug/tracing/events/%s/%s/id",
10610 tp_category, tp_name);
10611 if (ret < 0)
10612 return -errno;
10613 if (ret >= sizeof(file)) {
10614 pr_debug("tracepoint %s/%s path is too long\n",
10615 tp_category, tp_name);
10616 return -E2BIG;
10617 }
10618 return parse_uint_from_file(file, "%d\n");
10619}
10620
10621static int perf_event_open_tracepoint(const char *tp_category,
10622 const char *tp_name)
10623{
10624 struct perf_event_attr attr = {};
10625 char errmsg[STRERR_BUFSIZE];
10626 int tp_id, pfd, err;
10627
10628 tp_id = determine_tracepoint_id(tp_category, tp_name);
10629 if (tp_id < 0) {
10630 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
10631 tp_category, tp_name,
10632 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
10633 return tp_id;
10634 }
10635
10636 attr.type = PERF_TYPE_TRACEPOINT;
10637 attr.size = sizeof(attr);
10638 attr.config = tp_id;
10639
10640 pfd = syscall(__NR_perf_event_open, &attr, -1 , 0 ,
10641 -1 , PERF_FLAG_FD_CLOEXEC);
10642 if (pfd < 0) {
10643 err = -errno;
10644 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
10645 tp_category, tp_name,
10646 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10647 return err;
10648 }
10649 return pfd;
10650}
10651
10652struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
10653 const char *tp_category,
10654 const char *tp_name,
10655 const struct bpf_tracepoint_opts *opts)
10656{
10657 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
10658 char errmsg[STRERR_BUFSIZE];
10659 struct bpf_link *link;
10660 int pfd, err;
10661
10662 if (!OPTS_VALID(opts, bpf_tracepoint_opts))
10663 return libbpf_err_ptr(-EINVAL);
10664
10665 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
10666
10667 pfd = perf_event_open_tracepoint(tp_category, tp_name);
10668 if (pfd < 0) {
10669 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
10670 prog->name, tp_category, tp_name,
10671 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10672 return libbpf_err_ptr(pfd);
10673 }
10674 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
10675 err = libbpf_get_error(link);
10676 if (err) {
10677 close(pfd);
10678 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
10679 prog->name, tp_category, tp_name,
10680 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10681 return libbpf_err_ptr(err);
10682 }
10683 return link;
10684}
10685
10686struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
10687 const char *tp_category,
10688 const char *tp_name)
10689{
10690 return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
10691}
10692
10693static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
10694{
10695 char *sec_name, *tp_cat, *tp_name;
10696
10697 sec_name = strdup(prog->sec_name);
10698 if (!sec_name)
10699 return -ENOMEM;
10700
10701
10702 if (str_has_pfx(prog->sec_name, "tp/"))
10703 tp_cat = sec_name + sizeof("tp/") - 1;
10704 else
10705 tp_cat = sec_name + sizeof("tracepoint/") - 1;
10706 tp_name = strchr(tp_cat, '/');
10707 if (!tp_name) {
10708 free(sec_name);
10709 return -EINVAL;
10710 }
10711 *tp_name = '\0';
10712 tp_name++;
10713
10714 *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
10715 free(sec_name);
10716 return libbpf_get_error(*link);
10717}
10718
10719struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
10720 const char *tp_name)
10721{
10722 char errmsg[STRERR_BUFSIZE];
10723 struct bpf_link *link;
10724 int prog_fd, pfd;
10725
10726 prog_fd = bpf_program__fd(prog);
10727 if (prog_fd < 0) {
10728 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10729 return libbpf_err_ptr(-EINVAL);
10730 }
10731
10732 link = calloc(1, sizeof(*link));
10733 if (!link)
10734 return libbpf_err_ptr(-ENOMEM);
10735 link->detach = &bpf_link__detach_fd;
10736
10737 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
10738 if (pfd < 0) {
10739 pfd = -errno;
10740 free(link);
10741 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
10742 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10743 return libbpf_err_ptr(pfd);
10744 }
10745 link->fd = pfd;
10746 return link;
10747}
10748
10749static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
10750{
10751 static const char *const prefixes[] = {
10752 "raw_tp/",
10753 "raw_tracepoint/",
10754 "raw_tp.w/",
10755 "raw_tracepoint.w/",
10756 };
10757 size_t i;
10758 const char *tp_name = NULL;
10759
10760 for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
10761 if (str_has_pfx(prog->sec_name, prefixes[i])) {
10762 tp_name = prog->sec_name + strlen(prefixes[i]);
10763 break;
10764 }
10765 }
10766 if (!tp_name) {
10767 pr_warn("prog '%s': invalid section name '%s'\n",
10768 prog->name, prog->sec_name);
10769 return -EINVAL;
10770 }
10771
10772 *link = bpf_program__attach_raw_tracepoint(prog, tp_name);
10773 return libbpf_get_error(link);
10774}
10775
10776
10777static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog)
10778{
10779 char errmsg[STRERR_BUFSIZE];
10780 struct bpf_link *link;
10781 int prog_fd, pfd;
10782
10783 prog_fd = bpf_program__fd(prog);
10784 if (prog_fd < 0) {
10785 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10786 return libbpf_err_ptr(-EINVAL);
10787 }
10788
10789 link = calloc(1, sizeof(*link));
10790 if (!link)
10791 return libbpf_err_ptr(-ENOMEM);
10792 link->detach = &bpf_link__detach_fd;
10793
10794 pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
10795 if (pfd < 0) {
10796 pfd = -errno;
10797 free(link);
10798 pr_warn("prog '%s': failed to attach: %s\n",
10799 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10800 return libbpf_err_ptr(pfd);
10801 }
10802 link->fd = pfd;
10803 return (struct bpf_link *)link;
10804}
10805
10806struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
10807{
10808 return bpf_program__attach_btf_id(prog);
10809}
10810
10811struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
10812{
10813 return bpf_program__attach_btf_id(prog);
10814}
10815
10816static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
10817{
10818 *link = bpf_program__attach_trace(prog);
10819 return libbpf_get_error(*link);
10820}
10821
10822static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link)
10823{
10824 *link = bpf_program__attach_lsm(prog);
10825 return libbpf_get_error(*link);
10826}
10827
10828static struct bpf_link *
10829bpf_program__attach_fd(const struct bpf_program *prog, int target_fd, int btf_id,
10830 const char *target_name)
10831{
10832 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
10833 .target_btf_id = btf_id);
10834 enum bpf_attach_type attach_type;
10835 char errmsg[STRERR_BUFSIZE];
10836 struct bpf_link *link;
10837 int prog_fd, link_fd;
10838
10839 prog_fd = bpf_program__fd(prog);
10840 if (prog_fd < 0) {
10841 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10842 return libbpf_err_ptr(-EINVAL);
10843 }
10844
10845 link = calloc(1, sizeof(*link));
10846 if (!link)
10847 return libbpf_err_ptr(-ENOMEM);
10848 link->detach = &bpf_link__detach_fd;
10849
10850 attach_type = bpf_program__expected_attach_type(prog);
10851 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
10852 if (link_fd < 0) {
10853 link_fd = -errno;
10854 free(link);
10855 pr_warn("prog '%s': failed to attach to %s: %s\n",
10856 prog->name, target_name,
10857 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10858 return libbpf_err_ptr(link_fd);
10859 }
10860 link->fd = link_fd;
10861 return link;
10862}
10863
10864struct bpf_link *
10865bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd)
10866{
10867 return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
10868}
10869
10870struct bpf_link *
10871bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
10872{
10873 return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
10874}
10875
10876struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
10877{
10878
10879 return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
10880}
10881
10882struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
10883 int target_fd,
10884 const char *attach_func_name)
10885{
10886 int btf_id;
10887
10888 if (!!target_fd != !!attach_func_name) {
10889 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
10890 prog->name);
10891 return libbpf_err_ptr(-EINVAL);
10892 }
10893
10894 if (prog->type != BPF_PROG_TYPE_EXT) {
10895 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
10896 prog->name);
10897 return libbpf_err_ptr(-EINVAL);
10898 }
10899
10900 if (target_fd) {
10901 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
10902 if (btf_id < 0)
10903 return libbpf_err_ptr(btf_id);
10904
10905 return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
10906 } else {
10907
10908
10909
10910 return bpf_program__attach_trace(prog);
10911 }
10912}
10913
10914struct bpf_link *
10915bpf_program__attach_iter(const struct bpf_program *prog,
10916 const struct bpf_iter_attach_opts *opts)
10917{
10918 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
10919 char errmsg[STRERR_BUFSIZE];
10920 struct bpf_link *link;
10921 int prog_fd, link_fd;
10922 __u32 target_fd = 0;
10923
10924 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
10925 return libbpf_err_ptr(-EINVAL);
10926
10927 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
10928 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
10929
10930 prog_fd = bpf_program__fd(prog);
10931 if (prog_fd < 0) {
10932 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10933 return libbpf_err_ptr(-EINVAL);
10934 }
10935
10936 link = calloc(1, sizeof(*link));
10937 if (!link)
10938 return libbpf_err_ptr(-ENOMEM);
10939 link->detach = &bpf_link__detach_fd;
10940
10941 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
10942 &link_create_opts);
10943 if (link_fd < 0) {
10944 link_fd = -errno;
10945 free(link);
10946 pr_warn("prog '%s': failed to attach to iterator: %s\n",
10947 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10948 return libbpf_err_ptr(link_fd);
10949 }
10950 link->fd = link_fd;
10951 return link;
10952}
10953
10954static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link)
10955{
10956 *link = bpf_program__attach_iter(prog, NULL);
10957 return libbpf_get_error(*link);
10958}
10959
10960struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
10961{
10962 struct bpf_link *link = NULL;
10963 int err;
10964
10965 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
10966 return libbpf_err_ptr(-EOPNOTSUPP);
10967
10968 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
10969 if (err)
10970 return libbpf_err_ptr(err);
10971
10972
10973
10974
10975
10976
10977 if (!link)
10978 return libbpf_err_ptr(-EOPNOTSUPP);
10979
10980 return link;
10981}
10982
10983static int bpf_link__detach_struct_ops(struct bpf_link *link)
10984{
10985 __u32 zero = 0;
10986
10987 if (bpf_map_delete_elem(link->fd, &zero))
10988 return -errno;
10989
10990 return 0;
10991}
10992
10993struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
10994{
10995 struct bpf_struct_ops *st_ops;
10996 struct bpf_link *link;
10997 __u32 i, zero = 0;
10998 int err;
10999
11000 if (!bpf_map__is_struct_ops(map) || map->fd == -1)
11001 return libbpf_err_ptr(-EINVAL);
11002
11003 link = calloc(1, sizeof(*link));
11004 if (!link)
11005 return libbpf_err_ptr(-EINVAL);
11006
11007 st_ops = map->st_ops;
11008 for (i = 0; i < btf_vlen(st_ops->type); i++) {
11009 struct bpf_program *prog = st_ops->progs[i];
11010 void *kern_data;
11011 int prog_fd;
11012
11013 if (!prog)
11014 continue;
11015
11016 prog_fd = bpf_program__fd(prog);
11017 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
11018 *(unsigned long *)kern_data = prog_fd;
11019 }
11020
11021 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
11022 if (err) {
11023 err = -errno;
11024 free(link);
11025 return libbpf_err_ptr(err);
11026 }
11027
11028 link->detach = bpf_link__detach_struct_ops;
11029 link->fd = map->fd;
11030
11031 return link;
11032}
11033
11034static enum bpf_perf_event_ret
11035perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
11036 void **copy_mem, size_t *copy_size,
11037 bpf_perf_event_print_t fn, void *private_data)
11038{
11039 struct perf_event_mmap_page *header = mmap_mem;
11040 __u64 data_head = ring_buffer_read_head(header);
11041 __u64 data_tail = header->data_tail;
11042 void *base = ((__u8 *)header) + page_size;
11043 int ret = LIBBPF_PERF_EVENT_CONT;
11044 struct perf_event_header *ehdr;
11045 size_t ehdr_size;
11046
11047 while (data_head != data_tail) {
11048 ehdr = base + (data_tail & (mmap_size - 1));
11049 ehdr_size = ehdr->size;
11050
11051 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
11052 void *copy_start = ehdr;
11053 size_t len_first = base + mmap_size - copy_start;
11054 size_t len_secnd = ehdr_size - len_first;
11055
11056 if (*copy_size < ehdr_size) {
11057 free(*copy_mem);
11058 *copy_mem = malloc(ehdr_size);
11059 if (!*copy_mem) {
11060 *copy_size = 0;
11061 ret = LIBBPF_PERF_EVENT_ERROR;
11062 break;
11063 }
11064 *copy_size = ehdr_size;
11065 }
11066
11067 memcpy(*copy_mem, copy_start, len_first);
11068 memcpy(*copy_mem + len_first, base, len_secnd);
11069 ehdr = *copy_mem;
11070 }
11071
11072 ret = fn(ehdr, private_data);
11073 data_tail += ehdr_size;
11074 if (ret != LIBBPF_PERF_EVENT_CONT)
11075 break;
11076 }
11077
11078 ring_buffer_write_tail(header, data_tail);
11079 return libbpf_err(ret);
11080}
11081
11082__attribute__((alias("perf_event_read_simple")))
11083enum bpf_perf_event_ret
11084bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
11085 void **copy_mem, size_t *copy_size,
11086 bpf_perf_event_print_t fn, void *private_data);
11087
11088struct perf_buffer;
11089
11090struct perf_buffer_params {
11091 struct perf_event_attr *attr;
11092
11093 perf_buffer_event_fn event_cb;
11094
11095 perf_buffer_sample_fn sample_cb;
11096 perf_buffer_lost_fn lost_cb;
11097 void *ctx;
11098 int cpu_cnt;
11099 int *cpus;
11100 int *map_keys;
11101};
11102
11103struct perf_cpu_buf {
11104 struct perf_buffer *pb;
11105 void *base;
11106 void *buf;
11107 size_t buf_size;
11108 int fd;
11109 int cpu;
11110 int map_key;
11111};
11112
11113struct perf_buffer {
11114 perf_buffer_event_fn event_cb;
11115 perf_buffer_sample_fn sample_cb;
11116 perf_buffer_lost_fn lost_cb;
11117 void *ctx;
11118
11119 size_t page_size;
11120 size_t mmap_size;
11121 struct perf_cpu_buf **cpu_bufs;
11122 struct epoll_event *events;
11123 int cpu_cnt;
11124 int epoll_fd;
11125 int map_fd;
11126};
11127
11128static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
11129 struct perf_cpu_buf *cpu_buf)
11130{
11131 if (!cpu_buf)
11132 return;
11133 if (cpu_buf->base &&
11134 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
11135 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
11136 if (cpu_buf->fd >= 0) {
11137 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
11138 close(cpu_buf->fd);
11139 }
11140 free(cpu_buf->buf);
11141 free(cpu_buf);
11142}
11143
11144void perf_buffer__free(struct perf_buffer *pb)
11145{
11146 int i;
11147
11148 if (IS_ERR_OR_NULL(pb))
11149 return;
11150 if (pb->cpu_bufs) {
11151 for (i = 0; i < pb->cpu_cnt; i++) {
11152 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
11153
11154 if (!cpu_buf)
11155 continue;
11156
11157 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
11158 perf_buffer__free_cpu_buf(pb, cpu_buf);
11159 }
11160 free(pb->cpu_bufs);
11161 }
11162 if (pb->epoll_fd >= 0)
11163 close(pb->epoll_fd);
11164 free(pb->events);
11165 free(pb);
11166}
11167
11168static struct perf_cpu_buf *
11169perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
11170 int cpu, int map_key)
11171{
11172 struct perf_cpu_buf *cpu_buf;
11173 char msg[STRERR_BUFSIZE];
11174 int err;
11175
11176 cpu_buf = calloc(1, sizeof(*cpu_buf));
11177 if (!cpu_buf)
11178 return ERR_PTR(-ENOMEM);
11179
11180 cpu_buf->pb = pb;
11181 cpu_buf->cpu = cpu;
11182 cpu_buf->map_key = map_key;
11183
11184 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 , cpu,
11185 -1, PERF_FLAG_FD_CLOEXEC);
11186 if (cpu_buf->fd < 0) {
11187 err = -errno;
11188 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
11189 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
11190 goto error;
11191 }
11192
11193 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
11194 PROT_READ | PROT_WRITE, MAP_SHARED,
11195 cpu_buf->fd, 0);
11196 if (cpu_buf->base == MAP_FAILED) {
11197 cpu_buf->base = NULL;
11198 err = -errno;
11199 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
11200 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
11201 goto error;
11202 }
11203
11204 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
11205 err = -errno;
11206 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
11207 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
11208 goto error;
11209 }
11210
11211 return cpu_buf;
11212
11213error:
11214 perf_buffer__free_cpu_buf(pb, cpu_buf);
11215 return (struct perf_cpu_buf *)ERR_PTR(err);
11216}
11217
11218static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
11219 struct perf_buffer_params *p);
11220
11221DEFAULT_VERSION(perf_buffer__new_v0_6_0, perf_buffer__new, LIBBPF_0.6.0)
11222struct perf_buffer *perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt,
11223 perf_buffer_sample_fn sample_cb,
11224 perf_buffer_lost_fn lost_cb,
11225 void *ctx,
11226 const struct perf_buffer_opts *opts)
11227{
11228 struct perf_buffer_params p = {};
11229 struct perf_event_attr attr = {};
11230
11231 if (!OPTS_VALID(opts, perf_buffer_opts))
11232 return libbpf_err_ptr(-EINVAL);
11233
11234 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
11235 attr.type = PERF_TYPE_SOFTWARE;
11236 attr.sample_type = PERF_SAMPLE_RAW;
11237 attr.sample_period = 1;
11238 attr.wakeup_events = 1;
11239
11240 p.attr = &attr;
11241 p.sample_cb = sample_cb;
11242 p.lost_cb = lost_cb;
11243 p.ctx = ctx;
11244
11245 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
11246}
11247
11248COMPAT_VERSION(perf_buffer__new_deprecated, perf_buffer__new, LIBBPF_0.0.4)
11249struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt,
11250 const struct perf_buffer_opts *opts)
11251{
11252 return perf_buffer__new_v0_6_0(map_fd, page_cnt,
11253 opts ? opts->sample_cb : NULL,
11254 opts ? opts->lost_cb : NULL,
11255 opts ? opts->ctx : NULL,
11256 NULL);
11257}
11258
11259DEFAULT_VERSION(perf_buffer__new_raw_v0_6_0, perf_buffer__new_raw, LIBBPF_0.6.0)
11260struct perf_buffer *perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt,
11261 struct perf_event_attr *attr,
11262 perf_buffer_event_fn event_cb, void *ctx,
11263 const struct perf_buffer_raw_opts *opts)
11264{
11265 struct perf_buffer_params p = {};
11266
11267 if (!attr)
11268 return libbpf_err_ptr(-EINVAL);
11269
11270 if (!OPTS_VALID(opts, perf_buffer_raw_opts))
11271 return libbpf_err_ptr(-EINVAL);
11272
11273 p.attr = attr;
11274 p.event_cb = event_cb;
11275 p.ctx = ctx;
11276 p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
11277 p.cpus = OPTS_GET(opts, cpus, NULL);
11278 p.map_keys = OPTS_GET(opts, map_keys, NULL);
11279
11280 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
11281}
11282
11283COMPAT_VERSION(perf_buffer__new_raw_deprecated, perf_buffer__new_raw, LIBBPF_0.0.4)
11284struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt,
11285 const struct perf_buffer_raw_opts *opts)
11286{
11287 LIBBPF_OPTS(perf_buffer_raw_opts, inner_opts,
11288 .cpu_cnt = opts->cpu_cnt,
11289 .cpus = opts->cpus,
11290 .map_keys = opts->map_keys,
11291 );
11292
11293 return perf_buffer__new_raw_v0_6_0(map_fd, page_cnt, opts->attr,
11294 opts->event_cb, opts->ctx, &inner_opts);
11295}
11296
11297static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
11298 struct perf_buffer_params *p)
11299{
11300 const char *online_cpus_file = "/sys/devices/system/cpu/online";
11301 struct bpf_map_info map;
11302 char msg[STRERR_BUFSIZE];
11303 struct perf_buffer *pb;
11304 bool *online = NULL;
11305 __u32 map_info_len;
11306 int err, i, j, n;
11307
11308 if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) {
11309 pr_warn("page count should be power of two, but is %zu\n",
11310 page_cnt);
11311 return ERR_PTR(-EINVAL);
11312 }
11313
11314
11315 memset(&map, 0, sizeof(map));
11316 map_info_len = sizeof(map);
11317 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
11318 if (err) {
11319 err = -errno;
11320
11321
11322
11323 if (err != -EINVAL) {
11324 pr_warn("failed to get map info for map FD %d: %s\n",
11325 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
11326 return ERR_PTR(err);
11327 }
11328 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
11329 map_fd);
11330 } else {
11331 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
11332 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
11333 map.name);
11334 return ERR_PTR(-EINVAL);
11335 }
11336 }
11337
11338 pb = calloc(1, sizeof(*pb));
11339 if (!pb)
11340 return ERR_PTR(-ENOMEM);
11341
11342 pb->event_cb = p->event_cb;
11343 pb->sample_cb = p->sample_cb;
11344 pb->lost_cb = p->lost_cb;
11345 pb->ctx = p->ctx;
11346
11347 pb->page_size = getpagesize();
11348 pb->mmap_size = pb->page_size * page_cnt;
11349 pb->map_fd = map_fd;
11350
11351 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
11352 if (pb->epoll_fd < 0) {
11353 err = -errno;
11354 pr_warn("failed to create epoll instance: %s\n",
11355 libbpf_strerror_r(err, msg, sizeof(msg)));
11356 goto error;
11357 }
11358
11359 if (p->cpu_cnt > 0) {
11360 pb->cpu_cnt = p->cpu_cnt;
11361 } else {
11362 pb->cpu_cnt = libbpf_num_possible_cpus();
11363 if (pb->cpu_cnt < 0) {
11364 err = pb->cpu_cnt;
11365 goto error;
11366 }
11367 if (map.max_entries && map.max_entries < pb->cpu_cnt)
11368 pb->cpu_cnt = map.max_entries;
11369 }
11370
11371 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
11372 if (!pb->events) {
11373 err = -ENOMEM;
11374 pr_warn("failed to allocate events: out of memory\n");
11375 goto error;
11376 }
11377 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
11378 if (!pb->cpu_bufs) {
11379 err = -ENOMEM;
11380 pr_warn("failed to allocate buffers: out of memory\n");
11381 goto error;
11382 }
11383
11384 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
11385 if (err) {
11386 pr_warn("failed to get online CPU mask: %d\n", err);
11387 goto error;
11388 }
11389
11390 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
11391 struct perf_cpu_buf *cpu_buf;
11392 int cpu, map_key;
11393
11394 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
11395 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
11396
11397
11398
11399
11400 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
11401 continue;
11402
11403 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
11404 if (IS_ERR(cpu_buf)) {
11405 err = PTR_ERR(cpu_buf);
11406 goto error;
11407 }
11408
11409 pb->cpu_bufs[j] = cpu_buf;
11410
11411 err = bpf_map_update_elem(pb->map_fd, &map_key,
11412 &cpu_buf->fd, 0);
11413 if (err) {
11414 err = -errno;
11415 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
11416 cpu, map_key, cpu_buf->fd,
11417 libbpf_strerror_r(err, msg, sizeof(msg)));
11418 goto error;
11419 }
11420
11421 pb->events[j].events = EPOLLIN;
11422 pb->events[j].data.ptr = cpu_buf;
11423 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
11424 &pb->events[j]) < 0) {
11425 err = -errno;
11426 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
11427 cpu, cpu_buf->fd,
11428 libbpf_strerror_r(err, msg, sizeof(msg)));
11429 goto error;
11430 }
11431 j++;
11432 }
11433 pb->cpu_cnt = j;
11434 free(online);
11435
11436 return pb;
11437
11438error:
11439 free(online);
11440 if (pb)
11441 perf_buffer__free(pb);
11442 return ERR_PTR(err);
11443}
11444
11445struct perf_sample_raw {
11446 struct perf_event_header header;
11447 uint32_t size;
11448 char data[];
11449};
11450
11451struct perf_sample_lost {
11452 struct perf_event_header header;
11453 uint64_t id;
11454 uint64_t lost;
11455 uint64_t sample_id;
11456};
11457
11458static enum bpf_perf_event_ret
11459perf_buffer__process_record(struct perf_event_header *e, void *ctx)
11460{
11461 struct perf_cpu_buf *cpu_buf = ctx;
11462 struct perf_buffer *pb = cpu_buf->pb;
11463 void *data = e;
11464
11465
11466 if (pb->event_cb)
11467 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
11468
11469 switch (e->type) {
11470 case PERF_RECORD_SAMPLE: {
11471 struct perf_sample_raw *s = data;
11472
11473 if (pb->sample_cb)
11474 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
11475 break;
11476 }
11477 case PERF_RECORD_LOST: {
11478 struct perf_sample_lost *s = data;
11479
11480 if (pb->lost_cb)
11481 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
11482 break;
11483 }
11484 default:
11485 pr_warn("unknown perf sample type %d\n", e->type);
11486 return LIBBPF_PERF_EVENT_ERROR;
11487 }
11488 return LIBBPF_PERF_EVENT_CONT;
11489}
11490
11491static int perf_buffer__process_records(struct perf_buffer *pb,
11492 struct perf_cpu_buf *cpu_buf)
11493{
11494 enum bpf_perf_event_ret ret;
11495
11496 ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
11497 pb->page_size, &cpu_buf->buf,
11498 &cpu_buf->buf_size,
11499 perf_buffer__process_record, cpu_buf);
11500 if (ret != LIBBPF_PERF_EVENT_CONT)
11501 return ret;
11502 return 0;
11503}
11504
11505int perf_buffer__epoll_fd(const struct perf_buffer *pb)
11506{
11507 return pb->epoll_fd;
11508}
11509
11510int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
11511{
11512 int i, cnt, err;
11513
11514 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
11515 if (cnt < 0)
11516 return -errno;
11517
11518 for (i = 0; i < cnt; i++) {
11519 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
11520
11521 err = perf_buffer__process_records(pb, cpu_buf);
11522 if (err) {
11523 pr_warn("error while processing records: %d\n", err);
11524 return libbpf_err(err);
11525 }
11526 }
11527 return cnt;
11528}
11529
11530
11531
11532
11533size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
11534{
11535 return pb->cpu_cnt;
11536}
11537
11538
11539
11540
11541
11542
11543int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
11544{
11545 struct perf_cpu_buf *cpu_buf;
11546
11547 if (buf_idx >= pb->cpu_cnt)
11548 return libbpf_err(-EINVAL);
11549
11550 cpu_buf = pb->cpu_bufs[buf_idx];
11551 if (!cpu_buf)
11552 return libbpf_err(-ENOENT);
11553
11554 return cpu_buf->fd;
11555}
11556
11557
11558
11559
11560
11561
11562
11563
11564
11565int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
11566{
11567 struct perf_cpu_buf *cpu_buf;
11568
11569 if (buf_idx >= pb->cpu_cnt)
11570 return libbpf_err(-EINVAL);
11571
11572 cpu_buf = pb->cpu_bufs[buf_idx];
11573 if (!cpu_buf)
11574 return libbpf_err(-ENOENT);
11575
11576 return perf_buffer__process_records(pb, cpu_buf);
11577}
11578
11579int perf_buffer__consume(struct perf_buffer *pb)
11580{
11581 int i, err;
11582
11583 for (i = 0; i < pb->cpu_cnt; i++) {
11584 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
11585
11586 if (!cpu_buf)
11587 continue;
11588
11589 err = perf_buffer__process_records(pb, cpu_buf);
11590 if (err) {
11591 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
11592 return libbpf_err(err);
11593 }
11594 }
11595 return 0;
11596}
11597
11598struct bpf_prog_info_array_desc {
11599 int array_offset;
11600 int count_offset;
11601 int size_offset;
11602
11603
11604};
11605
11606static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
11607 [BPF_PROG_INFO_JITED_INSNS] = {
11608 offsetof(struct bpf_prog_info, jited_prog_insns),
11609 offsetof(struct bpf_prog_info, jited_prog_len),
11610 -1,
11611 },
11612 [BPF_PROG_INFO_XLATED_INSNS] = {
11613 offsetof(struct bpf_prog_info, xlated_prog_insns),
11614 offsetof(struct bpf_prog_info, xlated_prog_len),
11615 -1,
11616 },
11617 [BPF_PROG_INFO_MAP_IDS] = {
11618 offsetof(struct bpf_prog_info, map_ids),
11619 offsetof(struct bpf_prog_info, nr_map_ids),
11620 -(int)sizeof(__u32),
11621 },
11622 [BPF_PROG_INFO_JITED_KSYMS] = {
11623 offsetof(struct bpf_prog_info, jited_ksyms),
11624 offsetof(struct bpf_prog_info, nr_jited_ksyms),
11625 -(int)sizeof(__u64),
11626 },
11627 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
11628 offsetof(struct bpf_prog_info, jited_func_lens),
11629 offsetof(struct bpf_prog_info, nr_jited_func_lens),
11630 -(int)sizeof(__u32),
11631 },
11632 [BPF_PROG_INFO_FUNC_INFO] = {
11633 offsetof(struct bpf_prog_info, func_info),
11634 offsetof(struct bpf_prog_info, nr_func_info),
11635 offsetof(struct bpf_prog_info, func_info_rec_size),
11636 },
11637 [BPF_PROG_INFO_LINE_INFO] = {
11638 offsetof(struct bpf_prog_info, line_info),
11639 offsetof(struct bpf_prog_info, nr_line_info),
11640 offsetof(struct bpf_prog_info, line_info_rec_size),
11641 },
11642 [BPF_PROG_INFO_JITED_LINE_INFO] = {
11643 offsetof(struct bpf_prog_info, jited_line_info),
11644 offsetof(struct bpf_prog_info, nr_jited_line_info),
11645 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
11646 },
11647 [BPF_PROG_INFO_PROG_TAGS] = {
11648 offsetof(struct bpf_prog_info, prog_tags),
11649 offsetof(struct bpf_prog_info, nr_prog_tags),
11650 -(int)sizeof(__u8) * BPF_TAG_SIZE,
11651 },
11652
11653};
11654
11655static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
11656 int offset)
11657{
11658 __u32 *array = (__u32 *)info;
11659
11660 if (offset >= 0)
11661 return array[offset / sizeof(__u32)];
11662 return -(int)offset;
11663}
11664
11665static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
11666 int offset)
11667{
11668 __u64 *array = (__u64 *)info;
11669
11670 if (offset >= 0)
11671 return array[offset / sizeof(__u64)];
11672 return -(int)offset;
11673}
11674
11675static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
11676 __u32 val)
11677{
11678 __u32 *array = (__u32 *)info;
11679
11680 if (offset >= 0)
11681 array[offset / sizeof(__u32)] = val;
11682}
11683
11684static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
11685 __u64 val)
11686{
11687 __u64 *array = (__u64 *)info;
11688
11689 if (offset >= 0)
11690 array[offset / sizeof(__u64)] = val;
11691}
11692
11693struct bpf_prog_info_linear *
11694bpf_program__get_prog_info_linear(int fd, __u64 arrays)
11695{
11696 struct bpf_prog_info_linear *info_linear;
11697 struct bpf_prog_info info = {};
11698 __u32 info_len = sizeof(info);
11699 __u32 data_len = 0;
11700 int i, err;
11701 void *ptr;
11702
11703 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
11704 return libbpf_err_ptr(-EINVAL);
11705
11706
11707 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
11708 if (err) {
11709 pr_debug("can't get prog info: %s", strerror(errno));
11710 return libbpf_err_ptr(-EFAULT);
11711 }
11712
11713
11714 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11715 bool include_array = (arrays & (1UL << i)) > 0;
11716 struct bpf_prog_info_array_desc *desc;
11717 __u32 count, size;
11718
11719 desc = bpf_prog_info_array_desc + i;
11720
11721
11722 if (info_len < desc->array_offset + sizeof(__u32) ||
11723 info_len < desc->count_offset + sizeof(__u32) ||
11724 (desc->size_offset > 0 && info_len < desc->size_offset))
11725 include_array = false;
11726
11727 if (!include_array) {
11728 arrays &= ~(1UL << i);
11729 continue;
11730 }
11731
11732 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11733 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11734
11735 data_len += count * size;
11736 }
11737
11738
11739 data_len = roundup(data_len, sizeof(__u64));
11740 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
11741 if (!info_linear)
11742 return libbpf_err_ptr(-ENOMEM);
11743
11744
11745 info_linear->arrays = arrays;
11746 memset(&info_linear->info, 0, sizeof(info));
11747 ptr = info_linear->data;
11748
11749 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11750 struct bpf_prog_info_array_desc *desc;
11751 __u32 count, size;
11752
11753 if ((arrays & (1UL << i)) == 0)
11754 continue;
11755
11756 desc = bpf_prog_info_array_desc + i;
11757 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11758 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11759 bpf_prog_info_set_offset_u32(&info_linear->info,
11760 desc->count_offset, count);
11761 bpf_prog_info_set_offset_u32(&info_linear->info,
11762 desc->size_offset, size);
11763 bpf_prog_info_set_offset_u64(&info_linear->info,
11764 desc->array_offset,
11765 ptr_to_u64(ptr));
11766 ptr += count * size;
11767 }
11768
11769
11770 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
11771 if (err) {
11772 pr_debug("can't get prog info: %s", strerror(errno));
11773 free(info_linear);
11774 return libbpf_err_ptr(-EFAULT);
11775 }
11776
11777
11778 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11779 struct bpf_prog_info_array_desc *desc;
11780 __u32 v1, v2;
11781
11782 if ((arrays & (1UL << i)) == 0)
11783 continue;
11784
11785 desc = bpf_prog_info_array_desc + i;
11786 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11787 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
11788 desc->count_offset);
11789 if (v1 != v2)
11790 pr_warn("%s: mismatch in element count\n", __func__);
11791
11792 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11793 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
11794 desc->size_offset);
11795 if (v1 != v2)
11796 pr_warn("%s: mismatch in rec size\n", __func__);
11797 }
11798
11799
11800 info_linear->info_len = sizeof(struct bpf_prog_info);
11801 info_linear->data_len = data_len;
11802
11803 return info_linear;
11804}
11805
11806void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
11807{
11808 int i;
11809
11810 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11811 struct bpf_prog_info_array_desc *desc;
11812 __u64 addr, offs;
11813
11814 if ((info_linear->arrays & (1UL << i)) == 0)
11815 continue;
11816
11817 desc = bpf_prog_info_array_desc + i;
11818 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
11819 desc->array_offset);
11820 offs = addr - ptr_to_u64(info_linear->data);
11821 bpf_prog_info_set_offset_u64(&info_linear->info,
11822 desc->array_offset, offs);
11823 }
11824}
11825
11826void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
11827{
11828 int i;
11829
11830 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11831 struct bpf_prog_info_array_desc *desc;
11832 __u64 addr, offs;
11833
11834 if ((info_linear->arrays & (1UL << i)) == 0)
11835 continue;
11836
11837 desc = bpf_prog_info_array_desc + i;
11838 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
11839 desc->array_offset);
11840 addr = offs + ptr_to_u64(info_linear->data);
11841 bpf_prog_info_set_offset_u64(&info_linear->info,
11842 desc->array_offset, addr);
11843 }
11844}
11845
11846int bpf_program__set_attach_target(struct bpf_program *prog,
11847 int attach_prog_fd,
11848 const char *attach_func_name)
11849{
11850 int btf_obj_fd = 0, btf_id = 0, err;
11851
11852 if (!prog || attach_prog_fd < 0)
11853 return libbpf_err(-EINVAL);
11854
11855 if (prog->obj->loaded)
11856 return libbpf_err(-EINVAL);
11857
11858 if (attach_prog_fd && !attach_func_name) {
11859
11860
11861
11862 prog->attach_prog_fd = attach_prog_fd;
11863 return 0;
11864 }
11865
11866 if (attach_prog_fd) {
11867 btf_id = libbpf_find_prog_btf_id(attach_func_name,
11868 attach_prog_fd);
11869 if (btf_id < 0)
11870 return libbpf_err(btf_id);
11871 } else {
11872 if (!attach_func_name)
11873 return libbpf_err(-EINVAL);
11874
11875
11876 err = bpf_object__load_vmlinux_btf(prog->obj, true);
11877 if (err)
11878 return libbpf_err(err);
11879 err = find_kernel_btf_id(prog->obj, attach_func_name,
11880 prog->expected_attach_type,
11881 &btf_obj_fd, &btf_id);
11882 if (err)
11883 return libbpf_err(err);
11884 }
11885
11886 prog->attach_btf_id = btf_id;
11887 prog->attach_btf_obj_fd = btf_obj_fd;
11888 prog->attach_prog_fd = attach_prog_fd;
11889 return 0;
11890}
11891
11892int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
11893{
11894 int err = 0, n, len, start, end = -1;
11895 bool *tmp;
11896
11897 *mask = NULL;
11898 *mask_sz = 0;
11899
11900
11901 while (*s) {
11902 if (*s == ',' || *s == '\n') {
11903 s++;
11904 continue;
11905 }
11906 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
11907 if (n <= 0 || n > 2) {
11908 pr_warn("Failed to get CPU range %s: %d\n", s, n);
11909 err = -EINVAL;
11910 goto cleanup;
11911 } else if (n == 1) {
11912 end = start;
11913 }
11914 if (start < 0 || start > end) {
11915 pr_warn("Invalid CPU range [%d,%d] in %s\n",
11916 start, end, s);
11917 err = -EINVAL;
11918 goto cleanup;
11919 }
11920 tmp = realloc(*mask, end + 1);
11921 if (!tmp) {
11922 err = -ENOMEM;
11923 goto cleanup;
11924 }
11925 *mask = tmp;
11926 memset(tmp + *mask_sz, 0, start - *mask_sz);
11927 memset(tmp + start, 1, end - start + 1);
11928 *mask_sz = end + 1;
11929 s += len;
11930 }
11931 if (!*mask_sz) {
11932 pr_warn("Empty CPU range\n");
11933 return -EINVAL;
11934 }
11935 return 0;
11936cleanup:
11937 free(*mask);
11938 *mask = NULL;
11939 return err;
11940}
11941
11942int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
11943{
11944 int fd, err = 0, len;
11945 char buf[128];
11946
11947 fd = open(fcpu, O_RDONLY | O_CLOEXEC);
11948 if (fd < 0) {
11949 err = -errno;
11950 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
11951 return err;
11952 }
11953 len = read(fd, buf, sizeof(buf));
11954 close(fd);
11955 if (len <= 0) {
11956 err = len ? -errno : -EINVAL;
11957 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
11958 return err;
11959 }
11960 if (len >= sizeof(buf)) {
11961 pr_warn("CPU mask is too big in file %s\n", fcpu);
11962 return -E2BIG;
11963 }
11964 buf[len] = '\0';
11965
11966 return parse_cpu_mask_str(buf, mask, mask_sz);
11967}
11968
11969int libbpf_num_possible_cpus(void)
11970{
11971 static const char *fcpu = "/sys/devices/system/cpu/possible";
11972 static int cpus;
11973 int err, n, i, tmp_cpus;
11974 bool *mask;
11975
11976 tmp_cpus = READ_ONCE(cpus);
11977 if (tmp_cpus > 0)
11978 return tmp_cpus;
11979
11980 err = parse_cpu_mask_file(fcpu, &mask, &n);
11981 if (err)
11982 return libbpf_err(err);
11983
11984 tmp_cpus = 0;
11985 for (i = 0; i < n; i++) {
11986 if (mask[i])
11987 tmp_cpus++;
11988 }
11989 free(mask);
11990
11991 WRITE_ONCE(cpus, tmp_cpus);
11992 return tmp_cpus;
11993}
11994
11995static int populate_skeleton_maps(const struct bpf_object *obj,
11996 struct bpf_map_skeleton *maps,
11997 size_t map_cnt)
11998{
11999 int i;
12000
12001 for (i = 0; i < map_cnt; i++) {
12002 struct bpf_map **map = maps[i].map;
12003 const char *name = maps[i].name;
12004 void **mmaped = maps[i].mmaped;
12005
12006 *map = bpf_object__find_map_by_name(obj, name);
12007 if (!*map) {
12008 pr_warn("failed to find skeleton map '%s'\n", name);
12009 return -ESRCH;
12010 }
12011
12012
12013 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
12014 *mmaped = (*map)->mmaped;
12015 }
12016 return 0;
12017}
12018
12019static int populate_skeleton_progs(const struct bpf_object *obj,
12020 struct bpf_prog_skeleton *progs,
12021 size_t prog_cnt)
12022{
12023 int i;
12024
12025 for (i = 0; i < prog_cnt; i++) {
12026 struct bpf_program **prog = progs[i].prog;
12027 const char *name = progs[i].name;
12028
12029 *prog = bpf_object__find_program_by_name(obj, name);
12030 if (!*prog) {
12031 pr_warn("failed to find skeleton program '%s'\n", name);
12032 return -ESRCH;
12033 }
12034 }
12035 return 0;
12036}
12037
12038int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
12039 const struct bpf_object_open_opts *opts)
12040{
12041 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
12042 .object_name = s->name,
12043 );
12044 struct bpf_object *obj;
12045 int err;
12046
12047
12048
12049
12050
12051
12052
12053 if (opts) {
12054 memcpy(&skel_opts, opts, sizeof(*opts));
12055 if (!opts->object_name)
12056 skel_opts.object_name = s->name;
12057 }
12058
12059 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
12060 err = libbpf_get_error(obj);
12061 if (err) {
12062 pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
12063 s->name, err);
12064 return libbpf_err(err);
12065 }
12066
12067 *s->obj = obj;
12068 err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
12069 if (err) {
12070 pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
12071 return libbpf_err(err);
12072 }
12073
12074 err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
12075 if (err) {
12076 pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
12077 return libbpf_err(err);
12078 }
12079
12080 return 0;
12081}
12082
12083int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
12084{
12085 int err, len, var_idx, i;
12086 const char *var_name;
12087 const struct bpf_map *map;
12088 struct btf *btf;
12089 __u32 map_type_id;
12090 const struct btf_type *map_type, *var_type;
12091 const struct bpf_var_skeleton *var_skel;
12092 struct btf_var_secinfo *var;
12093
12094 if (!s->obj)
12095 return libbpf_err(-EINVAL);
12096
12097 btf = bpf_object__btf(s->obj);
12098 if (!btf) {
12099 pr_warn("subskeletons require BTF at runtime (object %s)\n",
12100 bpf_object__name(s->obj));
12101 return libbpf_err(-errno);
12102 }
12103
12104 err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
12105 if (err) {
12106 pr_warn("failed to populate subskeleton maps: %d\n", err);
12107 return libbpf_err(err);
12108 }
12109
12110 err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
12111 if (err) {
12112 pr_warn("failed to populate subskeleton maps: %d\n", err);
12113 return libbpf_err(err);
12114 }
12115
12116 for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
12117 var_skel = &s->vars[var_idx];
12118 map = *var_skel->map;
12119 map_type_id = bpf_map__btf_value_type_id(map);
12120 map_type = btf__type_by_id(btf, map_type_id);
12121
12122 if (!btf_is_datasec(map_type)) {
12123 pr_warn("type for map '%1$s' is not a datasec: %2$s",
12124 bpf_map__name(map),
12125 __btf_kind_str(btf_kind(map_type)));
12126 return libbpf_err(-EINVAL);
12127 }
12128
12129 len = btf_vlen(map_type);
12130 var = btf_var_secinfos(map_type);
12131 for (i = 0; i < len; i++, var++) {
12132 var_type = btf__type_by_id(btf, var->type);
12133 var_name = btf__name_by_offset(btf, var_type->name_off);
12134 if (strcmp(var_name, var_skel->name) == 0) {
12135 *var_skel->addr = map->mmaped + var->offset;
12136 break;
12137 }
12138 }
12139 }
12140 return 0;
12141}
12142
12143void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
12144{
12145 if (!s)
12146 return;
12147 free(s->maps);
12148 free(s->progs);
12149 free(s->vars);
12150 free(s);
12151}
12152
12153int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
12154{
12155 int i, err;
12156
12157 err = bpf_object__load(*s->obj);
12158 if (err) {
12159 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
12160 return libbpf_err(err);
12161 }
12162
12163 for (i = 0; i < s->map_cnt; i++) {
12164 struct bpf_map *map = *s->maps[i].map;
12165 size_t mmap_sz = bpf_map_mmap_sz(map);
12166 int prot, map_fd = bpf_map__fd(map);
12167 void **mmaped = s->maps[i].mmaped;
12168
12169 if (!mmaped)
12170 continue;
12171
12172 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
12173 *mmaped = NULL;
12174 continue;
12175 }
12176
12177 if (map->def.map_flags & BPF_F_RDONLY_PROG)
12178 prot = PROT_READ;
12179 else
12180 prot = PROT_READ | PROT_WRITE;
12181
12182
12183
12184
12185
12186
12187
12188
12189
12190
12191
12192 *mmaped = mmap(map->mmaped, mmap_sz, prot,
12193 MAP_SHARED | MAP_FIXED, map_fd, 0);
12194 if (*mmaped == MAP_FAILED) {
12195 err = -errno;
12196 *mmaped = NULL;
12197 pr_warn("failed to re-mmap() map '%s': %d\n",
12198 bpf_map__name(map), err);
12199 return libbpf_err(err);
12200 }
12201 }
12202
12203 return 0;
12204}
12205
12206int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
12207{
12208 int i, err;
12209
12210 for (i = 0; i < s->prog_cnt; i++) {
12211 struct bpf_program *prog = *s->progs[i].prog;
12212 struct bpf_link **link = s->progs[i].link;
12213
12214 if (!prog->load)
12215 continue;
12216
12217
12218 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
12219 continue;
12220
12221
12222 if (*link)
12223 continue;
12224
12225 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
12226 if (err) {
12227 pr_warn("prog '%s': failed to auto-attach: %d\n",
12228 bpf_program__name(prog), err);
12229 return libbpf_err(err);
12230 }
12231
12232
12233
12234
12235
12236
12237
12238
12239
12240
12241
12242 }
12243
12244 return 0;
12245}
12246
12247void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
12248{
12249 int i;
12250
12251 for (i = 0; i < s->prog_cnt; i++) {
12252 struct bpf_link **link = s->progs[i].link;
12253
12254 bpf_link__destroy(*link);
12255 *link = NULL;
12256 }
12257}
12258
12259void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
12260{
12261 if (!s)
12262 return;
12263
12264 if (s->progs)
12265 bpf_object__detach_skeleton(s);
12266 if (s->obj)
12267 bpf_object__close(*s->obj);
12268 free(s->maps);
12269 free(s->progs);
12270 free(s);
12271}
12272